aws-sdk-kinesis 1.0.0.rc1 → 1.0.0.rc2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 8a78e60717988184c051db6f7783a95c3d389a66
4
- data.tar.gz: 6dfb75dc9cc5503f78cf09bc4acf725b5564edf6
3
+ metadata.gz: 3696a3abd27e38354e2c99aa5f62882ad3c66110
4
+ data.tar.gz: 7fcb6cf73c101785afe8a2f5eeef721af9740459
5
5
  SHA512:
6
- metadata.gz: 7513a4fd22c617ced84e6a98044a56817e2d1fe373bbee4b8fd27a24487fd1e15adfcb2b3b7b7f992c4a1ea1d6a25bf5af64836d327d282a58eddbb81ac57904
7
- data.tar.gz: 336e5f2eecd77035a3299f329cd7c57e128c1519d21ccafe6ac7428dcb0019928f5dcf8909e0fc52219d151f73f62f91f462d0e235fd42458fed5d03eb070608
6
+ metadata.gz: 8b76cf9a445cffa8e2afa860c5426b626888d74501f9b52a3f3046cecca76207fdacce0361a078627aa531e3728553de082dafb9c07b88804a6cfdc557c403e7
7
+ data.tar.gz: 9a9b2e9aacb639619035ed5d5ee218908bc59ad3116dc6c3762b8cae95298b862c9c58a0674113606c5958361bb5ba790a35fb8be51d76bb74e32a3e16710af9
@@ -1,6 +1,6 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
@@ -43,6 +43,6 @@ require_relative 'aws-sdk-kinesis/customizations'
43
43
  # @service
44
44
  module Aws::Kinesis
45
45
 
46
- GEM_VERSION = '1.0.0.rc1'
46
+ GEM_VERSION = '1.0.0.rc2'
47
47
 
48
48
  end
@@ -1,6 +1,6 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
@@ -18,1400 +18,1565 @@ require 'aws-sdk-core/plugins/regional_endpoint.rb'
18
18
  require 'aws-sdk-core/plugins/response_paging.rb'
19
19
  require 'aws-sdk-core/plugins/stub_responses.rb'
20
20
  require 'aws-sdk-core/plugins/idempotency_token.rb'
21
+ require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
21
22
  require 'aws-sdk-core/plugins/signature_v4.rb'
22
23
  require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
23
24
 
24
25
  Aws::Plugins::GlobalConfiguration.add_identifier(:kinesis)
25
26
 
26
- module Aws
27
- module Kinesis
28
- class Client < Seahorse::Client::Base
27
+ module Aws::Kinesis
28
+ class Client < Seahorse::Client::Base
29
29
 
30
- include Aws::ClientStubs
30
+ include Aws::ClientStubs
31
31
 
32
- @identifier = :kinesis
32
+ @identifier = :kinesis
33
33
 
34
- set_api(ClientApi::API)
34
+ set_api(ClientApi::API)
35
35
 
36
- add_plugin(Seahorse::Client::Plugins::ContentLength)
37
- add_plugin(Aws::Plugins::CredentialsConfiguration)
38
- add_plugin(Aws::Plugins::Logging)
39
- add_plugin(Aws::Plugins::ParamConverter)
40
- add_plugin(Aws::Plugins::ParamValidator)
41
- add_plugin(Aws::Plugins::UserAgent)
42
- add_plugin(Aws::Plugins::HelpfulSocketErrors)
43
- add_plugin(Aws::Plugins::RetryErrors)
44
- add_plugin(Aws::Plugins::GlobalConfiguration)
45
- add_plugin(Aws::Plugins::RegionalEndpoint)
46
- add_plugin(Aws::Plugins::ResponsePaging)
47
- add_plugin(Aws::Plugins::StubResponses)
48
- add_plugin(Aws::Plugins::IdempotencyToken)
49
- add_plugin(Aws::Plugins::SignatureV4)
50
- add_plugin(Aws::Plugins::Protocols::JsonRpc)
36
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
37
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
38
+ add_plugin(Aws::Plugins::Logging)
39
+ add_plugin(Aws::Plugins::ParamConverter)
40
+ add_plugin(Aws::Plugins::ParamValidator)
41
+ add_plugin(Aws::Plugins::UserAgent)
42
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
43
+ add_plugin(Aws::Plugins::RetryErrors)
44
+ add_plugin(Aws::Plugins::GlobalConfiguration)
45
+ add_plugin(Aws::Plugins::RegionalEndpoint)
46
+ add_plugin(Aws::Plugins::ResponsePaging)
47
+ add_plugin(Aws::Plugins::StubResponses)
48
+ add_plugin(Aws::Plugins::IdempotencyToken)
49
+ add_plugin(Aws::Plugins::JsonvalueConverter)
50
+ add_plugin(Aws::Plugins::SignatureV4)
51
+ add_plugin(Aws::Plugins::Protocols::JsonRpc)
51
52
 
52
- # @option options [required, Aws::CredentialProvider] :credentials
53
- # Your AWS credentials. This can be an instance of any one of the
54
- # following classes:
55
- #
56
- # * `Aws::Credentials` - Used for configuring static, non-refreshing
57
- # credentials.
58
- #
59
- # * `Aws::InstanceProfileCredentials` - Used for loading credentials
60
- # from an EC2 IMDS on an EC2 instance.
61
- #
62
- # * `Aws::SharedCredentials` - Used for loading credentials from a
63
- # shared file, such as `~/.aws/config`.
64
- #
65
- # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
66
- #
67
- # When `:credentials` are not configured directly, the following
68
- # locations will be searched for credentials:
69
- #
70
- # * `Aws.config[:credentials]`
71
- # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
72
- # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
73
- # * `~/.aws/credentials`
74
- # * `~/.aws/config`
75
- # * EC2 IMDS instance profile - When used by default, the timeouts are
76
- # very aggressive. Construct and pass an instance of
77
- # `Aws::InstanceProfileCredentails` to enable retries and extended
78
- # timeouts.
79
- # @option options [required, String] :region
80
- # The AWS region to connect to. The configured `:region` is
81
- # used to determine the service `:endpoint`. When not passed,
82
- # a default `:region` is search for in the following locations:
83
- #
84
- # * `Aws.config[:region]`
85
- # * `ENV['AWS_REGION']`
86
- # * `ENV['AMAZON_REGION']`
87
- # * `ENV['AWS_DEFAULT_REGION']`
88
- # * `~/.aws/credentials`
89
- # * `~/.aws/config`
90
- # @option options [String] :access_key_id
91
- # @option options [Boolean] :convert_params (true)
92
- # When `true`, an attempt is made to coerce request parameters into
93
- # the required types.
94
- # @option options [String] :endpoint
95
- # The client endpoint is normally constructed from the `:region`
96
- # option. You should only configure an `:endpoint` when connecting
97
- # to test endpoints. This should be avalid HTTP(S) URI.
98
- # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
99
- # The log formatter.
100
- # @option options [Symbol] :log_level (:info)
101
- # The log level to send messages to the `:logger` at.
102
- # @option options [Logger] :logger
103
- # The Logger instance to send log messages to. If this option
104
- # is not set, logging will be disabled.
105
- # @option options [String] :profile ("default")
106
- # Used when loading credentials from the shared credentials file
107
- # at HOME/.aws/credentials. When not specified, 'default' is used.
108
- # @option options [Integer] :retry_limit (3)
109
- # The maximum number of times to retry failed requests. Only
110
- # ~ 500 level server errors and certain ~ 400 level client errors
111
- # are retried. Generally, these are throttling errors, data
112
- # checksum errors, networking errors, timeout errors and auth
113
- # errors from expired credentials.
114
- # @option options [String] :secret_access_key
115
- # @option options [String] :session_token
116
- # @option options [Boolean] :simple_json (false)
117
- # Disables request parameter conversion, validation, and formatting.
118
- # Also disable response data type conversions. This option is useful
119
- # when you want to ensure the highest level of performance by
120
- # avoiding overhead of walking request parameters and response data
121
- # structures.
122
- #
123
- # When `:simple_json` is enabled, the request parameters hash must
124
- # be formatted exactly as the DynamoDB API expects.
125
- # @option options [Boolean] :stub_responses (false)
126
- # Causes the client to return stubbed responses. By default
127
- # fake responses are generated and returned. You can specify
128
- # the response data to return or errors to raise by calling
129
- # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
130
- #
131
- # ** Please note ** When response stubbing is enabled, no HTTP
132
- # requests are made, and retries are disabled.
133
- # @option options [Boolean] :validate_params (true)
134
- # When `true`, request parameters are validated before
135
- # sending the request.
136
- def initialize(*args)
137
- super
138
- end
139
-
140
- # @!group API Operations
53
+ # @option options [required, Aws::CredentialProvider] :credentials
54
+ # Your AWS credentials. This can be an instance of any one of the
55
+ # following classes:
56
+ #
57
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
58
+ # credentials.
59
+ #
60
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
61
+ # from an EC2 IMDS on an EC2 instance.
62
+ #
63
+ # * `Aws::SharedCredentials` - Used for loading credentials from a
64
+ # shared file, such as `~/.aws/config`.
65
+ #
66
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
67
+ #
68
+ # When `:credentials` are not configured directly, the following
69
+ # locations will be searched for credentials:
70
+ #
71
+ # * `Aws.config[:credentials]`
72
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
73
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
74
+ # * `~/.aws/credentials`
75
+ # * `~/.aws/config`
76
+ # * EC2 IMDS instance profile - When used by default, the timeouts are
77
+ # very aggressive. Construct and pass an instance of
78
+ # `Aws::InstanceProfileCredentails` to enable retries and extended
79
+ # timeouts.
80
+ #
81
+ # @option options [required, String] :region
82
+ # The AWS region to connect to. The configured `:region` is
83
+ # used to determine the service `:endpoint`. When not passed,
84
+ # a default `:region` is search for in the following locations:
85
+ #
86
+ # * `Aws.config[:region]`
87
+ # * `ENV['AWS_REGION']`
88
+ # * `ENV['AMAZON_REGION']`
89
+ # * `ENV['AWS_DEFAULT_REGION']`
90
+ # * `~/.aws/credentials`
91
+ # * `~/.aws/config`
92
+ #
93
+ # @option options [String] :access_key_id
94
+ #
95
+ # @option options [Boolean] :convert_params (true)
96
+ # When `true`, an attempt is made to coerce request parameters into
97
+ # the required types.
98
+ #
99
+ # @option options [String] :endpoint
100
+ # The client endpoint is normally constructed from the `:region`
101
+ # option. You should only configure an `:endpoint` when connecting
102
+ # to test endpoints. This should be avalid HTTP(S) URI.
103
+ #
104
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
105
+ # The log formatter.
106
+ #
107
+ # @option options [Symbol] :log_level (:info)
108
+ # The log level to send messages to the `:logger` at.
109
+ #
110
+ # @option options [Logger] :logger
111
+ # The Logger instance to send log messages to. If this option
112
+ # is not set, logging will be disabled.
113
+ #
114
+ # @option options [String] :profile ("default")
115
+ # Used when loading credentials from the shared credentials file
116
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
117
+ #
118
+ # @option options [Integer] :retry_limit (3)
119
+ # The maximum number of times to retry failed requests. Only
120
+ # ~ 500 level server errors and certain ~ 400 level client errors
121
+ # are retried. Generally, these are throttling errors, data
122
+ # checksum errors, networking errors, timeout errors and auth
123
+ # errors from expired credentials.
124
+ #
125
+ # @option options [String] :secret_access_key
126
+ #
127
+ # @option options [String] :session_token
128
+ #
129
+ # @option options [Boolean] :simple_json (false)
130
+ # Disables request parameter conversion, validation, and formatting.
131
+ # Also disable response data type conversions. This option is useful
132
+ # when you want to ensure the highest level of performance by
133
+ # avoiding overhead of walking request parameters and response data
134
+ # structures.
135
+ #
136
+ # When `:simple_json` is enabled, the request parameters hash must
137
+ # be formatted exactly as the DynamoDB API expects.
138
+ #
139
+ # @option options [Boolean] :stub_responses (false)
140
+ # Causes the client to return stubbed responses. By default
141
+ # fake responses are generated and returned. You can specify
142
+ # the response data to return or errors to raise by calling
143
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
144
+ #
145
+ # ** Please note ** When response stubbing is enabled, no HTTP
146
+ # requests are made, and retries are disabled.
147
+ #
148
+ # @option options [Boolean] :validate_params (true)
149
+ # When `true`, request parameters are validated before
150
+ # sending the request.
151
+ #
152
+ def initialize(*args)
153
+ super
154
+ end
141
155
 
142
- # Adds or updates tags for the specified Amazon Kinesis stream. Each
143
- # stream can have up to 10 tags.
144
- #
145
- # If tags have already been assigned to the stream, `AddTagsToStream`
146
- # overwrites any existing tags that correspond to the specified tag
147
- # keys.
148
- # @option params [required, String] :stream_name
149
- # The name of the stream.
150
- # @option params [required, Hash<String,String>] :tags
151
- # The set of key-value pairs to use to create the tags.
152
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
153
- #
154
- # @example Request syntax with placeholder values
155
- # resp = client.add_tags_to_stream({
156
- # stream_name: "StreamName", # required
157
- # tags: { # required
158
- # "TagKey" => "TagValue",
159
- # },
160
- # })
161
- # @overload add_tags_to_stream(params = {})
162
- # @param [Hash] params ({})
163
- def add_tags_to_stream(params = {}, options = {})
164
- req = build_request(:add_tags_to_stream, params)
165
- req.send_request(options)
166
- end
156
+ # @!group API Operations
167
157
 
168
- # Creates an Amazon Kinesis stream. A stream captures and transports
169
- # data records that are continuously emitted from different data sources
170
- # or *producers*. Scale-out within a stream is explicitly supported by
171
- # means of shards, which are uniquely identified groups of data records
172
- # in a stream.
173
- #
174
- # You specify and control the number of shards that a stream is composed
175
- # of. Each shard can support reads up to 5 transactions per second, up
176
- # to a maximum data read total of 2 MB per second. Each shard can
177
- # support writes up to 1,000 records per second, up to a maximum data
178
- # write total of 1 MB per second. You can add shards to a stream if the
179
- # amount of data input increases and you can remove shards if the amount
180
- # of data input decreases.
181
- #
182
- # The stream name identifies the stream. The name is scoped to the AWS
183
- # account used by the application. It is also scoped by region. That is,
184
- # two streams in two different accounts can have the same name, and two
185
- # streams in the same account, but in two different regions, can have
186
- # the same name.
187
- #
188
- # `CreateStream` is an asynchronous operation. Upon receiving a
189
- # `CreateStream` request, Amazon Kinesis immediately returns and sets
190
- # the stream status to `CREATING`. After the stream is created, Amazon
191
- # Kinesis sets the stream status to `ACTIVE`. You should perform read
192
- # and write operations only on an `ACTIVE` stream.
193
- #
194
- # You receive a `LimitExceededException` when making a `CreateStream`
195
- # request if you try to do one of the following:
196
- #
197
- # * Have more than five streams in the `CREATING` state at any point in
198
- # time.
199
- #
200
- # * Create more shards than are authorized for your account.
201
- #
202
- # For the default shard limit for an AWS account, see [Streams
203
- # Limits][1] in the *Amazon Kinesis Streams Developer Guide*. If you
204
- # need to increase this limit, [contact AWS Support][2].
205
- #
206
- # You can use `DescribeStream` to check the stream status, which is
207
- # returned in `StreamStatus`.
208
- #
209
- # CreateStream has a limit of 5 transactions per second per account.
210
- #
211
- #
212
- #
213
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
214
- # [2]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
215
- # @option params [required, String] :stream_name
216
- # A name to identify the stream. The stream name is scoped to the AWS
217
- # account used by the application that creates the stream. It is also
218
- # scoped by region. That is, two streams in two different AWS accounts
219
- # can have the same name, and two streams in the same AWS account but in
220
- # two different regions can have the same name.
221
- # @option params [required, Integer] :shard_count
222
- # The number of shards that the stream will use. The throughput of the
223
- # stream is a function of the number of shards; more shards are required
224
- # for greater provisioned throughput.
225
- #
226
- # DefaultShardLimit;
227
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
228
- #
229
- # @example Request syntax with placeholder values
230
- # resp = client.create_stream({
231
- # stream_name: "StreamName", # required
232
- # shard_count: 1, # required
233
- # })
234
- # @overload create_stream(params = {})
235
- # @param [Hash] params ({})
236
- def create_stream(params = {}, options = {})
237
- req = build_request(:create_stream, params)
238
- req.send_request(options)
239
- end
158
+ # Adds or updates tags for the specified Amazon Kinesis stream. Each
159
+ # stream can have up to 10 tags.
160
+ #
161
+ # If tags have already been assigned to the stream, `AddTagsToStream`
162
+ # overwrites any existing tags that correspond to the specified tag
163
+ # keys.
164
+ #
165
+ # @option params [required, String] :stream_name
166
+ # The name of the stream.
167
+ #
168
+ # @option params [required, Hash<String,String>] :tags
169
+ # The set of key-value pairs to use to create the tags.
170
+ #
171
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
172
+ #
173
+ # @example Request syntax with placeholder values
174
+ #
175
+ # resp = client.add_tags_to_stream({
176
+ # stream_name: "StreamName", # required
177
+ # tags: { # required
178
+ # "TagKey" => "TagValue",
179
+ # },
180
+ # })
181
+ #
182
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/AddTagsToStream AWS API Documentation
183
+ #
184
+ # @overload add_tags_to_stream(params = {})
185
+ # @param [Hash] params ({})
186
+ def add_tags_to_stream(params = {}, options = {})
187
+ req = build_request(:add_tags_to_stream, params)
188
+ req.send_request(options)
189
+ end
240
190
 
241
- # Decreases the Amazon Kinesis stream's retention period, which is the
242
- # length of time data records are accessible after they are added to the
243
- # stream. The minimum value of a stream's retention period is 24 hours.
244
- #
245
- # This operation may result in lost data. For example, if the stream's
246
- # retention period is 48 hours and is decreased to 24 hours, any data
247
- # already in the stream that is older than 24 hours is inaccessible.
248
- # @option params [required, String] :stream_name
249
- # The name of the stream to modify.
250
- # @option params [required, Integer] :retention_period_hours
251
- # The new retention period of the stream, in hours. Must be less than
252
- # the current retention period.
253
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
254
- #
255
- # @example Request syntax with placeholder values
256
- # resp = client.decrease_stream_retention_period({
257
- # stream_name: "StreamName", # required
258
- # retention_period_hours: 1, # required
259
- # })
260
- # @overload decrease_stream_retention_period(params = {})
261
- # @param [Hash] params ({})
262
- def decrease_stream_retention_period(params = {}, options = {})
263
- req = build_request(:decrease_stream_retention_period, params)
264
- req.send_request(options)
265
- end
191
+ # Creates an Amazon Kinesis stream. A stream captures and transports
192
+ # data records that are continuously emitted from different data sources
193
+ # or *producers*. Scale-out within a stream is explicitly supported by
194
+ # means of shards, which are uniquely identified groups of data records
195
+ # in a stream.
196
+ #
197
+ # You specify and control the number of shards that a stream is composed
198
+ # of. Each shard can support reads up to 5 transactions per second, up
199
+ # to a maximum data read total of 2 MB per second. Each shard can
200
+ # support writes up to 1,000 records per second, up to a maximum data
201
+ # write total of 1 MB per second. You can add shards to a stream if the
202
+ # amount of data input increases and you can remove shards if the amount
203
+ # of data input decreases.
204
+ #
205
+ # The stream name identifies the stream. The name is scoped to the AWS
206
+ # account used by the application. It is also scoped by region. That is,
207
+ # two streams in two different accounts can have the same name, and two
208
+ # streams in the same account, but in two different regions, can have
209
+ # the same name.
210
+ #
211
+ # `CreateStream` is an asynchronous operation. Upon receiving a
212
+ # `CreateStream` request, Amazon Kinesis immediately returns and sets
213
+ # the stream status to `CREATING`. After the stream is created, Amazon
214
+ # Kinesis sets the stream status to `ACTIVE`. You should perform read
215
+ # and write operations only on an `ACTIVE` stream.
216
+ #
217
+ # You receive a `LimitExceededException` when making a `CreateStream`
218
+ # request if you try to do one of the following:
219
+ #
220
+ # * Have more than five streams in the `CREATING` state at any point in
221
+ # time.
222
+ #
223
+ # * Create more shards than are authorized for your account.
224
+ #
225
+ # For the default shard limit for an AWS account, see [Streams
226
+ # Limits][1] in the *Amazon Kinesis Streams Developer Guide*. If you
227
+ # need to increase this limit, [contact AWS Support][2].
228
+ #
229
+ # You can use `DescribeStream` to check the stream status, which is
230
+ # returned in `StreamStatus`.
231
+ #
232
+ # CreateStream has a limit of 5 transactions per second per account.
233
+ #
234
+ #
235
+ #
236
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
237
+ # [2]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
238
+ #
239
+ # @option params [required, String] :stream_name
240
+ # A name to identify the stream. The stream name is scoped to the AWS
241
+ # account used by the application that creates the stream. It is also
242
+ # scoped by region. That is, two streams in two different AWS accounts
243
+ # can have the same name, and two streams in the same AWS account but in
244
+ # two different regions can have the same name.
245
+ #
246
+ # @option params [required, Integer] :shard_count
247
+ # The number of shards that the stream will use. The throughput of the
248
+ # stream is a function of the number of shards; more shards are required
249
+ # for greater provisioned throughput.
250
+ #
251
+ # DefaultShardLimit;
252
+ #
253
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
254
+ #
255
+ # @example Request syntax with placeholder values
256
+ #
257
+ # resp = client.create_stream({
258
+ # stream_name: "StreamName", # required
259
+ # shard_count: 1, # required
260
+ # })
261
+ #
262
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/CreateStream AWS API Documentation
263
+ #
264
+ # @overload create_stream(params = {})
265
+ # @param [Hash] params ({})
266
+ def create_stream(params = {}, options = {})
267
+ req = build_request(:create_stream, params)
268
+ req.send_request(options)
269
+ end
266
270
 
267
- # Deletes an Amazon Kinesis stream and all its shards and data. You must
268
- # shut down any applications that are operating on the stream before you
269
- # delete the stream. If an application attempts to operate on a deleted
270
- # stream, it will receive the exception `ResourceNotFoundException`.
271
- #
272
- # If the stream is in the `ACTIVE` state, you can delete it. After a
273
- # `DeleteStream` request, the specified stream is in the `DELETING`
274
- # state until Amazon Kinesis completes the deletion.
275
- #
276
- # **Note:** Amazon Kinesis might continue to accept data read and write
277
- # operations, such as PutRecord, PutRecords, and GetRecords, on a stream
278
- # in the `DELETING` state until the stream deletion is complete.
279
- #
280
- # When you delete a stream, any shards in that stream are also deleted,
281
- # and any tags are dissociated from the stream.
282
- #
283
- # You can use the DescribeStream operation to check the state of the
284
- # stream, which is returned in `StreamStatus`.
285
- #
286
- # DeleteStream has a limit of 5 transactions per second per account.
287
- # @option params [required, String] :stream_name
288
- # The name of the stream to delete.
289
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
290
- #
291
- # @example Request syntax with placeholder values
292
- # resp = client.delete_stream({
293
- # stream_name: "StreamName", # required
294
- # })
295
- # @overload delete_stream(params = {})
296
- # @param [Hash] params ({})
297
- def delete_stream(params = {}, options = {})
298
- req = build_request(:delete_stream, params)
299
- req.send_request(options)
300
- end
271
+ # Decreases the Amazon Kinesis stream's retention period, which is the
272
+ # length of time data records are accessible after they are added to the
273
+ # stream. The minimum value of a stream's retention period is 24 hours.
274
+ #
275
+ # This operation may result in lost data. For example, if the stream's
276
+ # retention period is 48 hours and is decreased to 24 hours, any data
277
+ # already in the stream that is older than 24 hours is inaccessible.
278
+ #
279
+ # @option params [required, String] :stream_name
280
+ # The name of the stream to modify.
281
+ #
282
+ # @option params [required, Integer] :retention_period_hours
283
+ # The new retention period of the stream, in hours. Must be less than
284
+ # the current retention period.
285
+ #
286
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
287
+ #
288
+ # @example Request syntax with placeholder values
289
+ #
290
+ # resp = client.decrease_stream_retention_period({
291
+ # stream_name: "StreamName", # required
292
+ # retention_period_hours: 1, # required
293
+ # })
294
+ #
295
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DecreaseStreamRetentionPeriod AWS API Documentation
296
+ #
297
+ # @overload decrease_stream_retention_period(params = {})
298
+ # @param [Hash] params ({})
299
+ def decrease_stream_retention_period(params = {}, options = {})
300
+ req = build_request(:decrease_stream_retention_period, params)
301
+ req.send_request(options)
302
+ end
301
303
 
302
- # Describes the shard limits and usage for the account.
303
- #
304
- # If you update your account limits, the old limits might be returned
305
- # for a few minutes.
306
- #
307
- # This operation has a limit of 1 transaction per second per account.
308
- # @return [Types::DescribeLimitsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
309
- #
310
- # * {Types::DescribeLimitsOutput#shard_limit #ShardLimit} => Integer
311
- # * {Types::DescribeLimitsOutput#open_shard_count #OpenShardCount} => Integer
312
- #
313
- # @example Request syntax with placeholder values
314
- # resp = client.describe_limits()
315
- #
316
- # @example Response structure
317
- # resp.shard_limit #=> Integer
318
- # resp.open_shard_count #=> Integer
319
- # @overload describe_limits(params = {})
320
- # @param [Hash] params ({})
321
- def describe_limits(params = {}, options = {})
322
- req = build_request(:describe_limits, params)
323
- req.send_request(options)
324
- end
304
+ # Deletes an Amazon Kinesis stream and all its shards and data. You must
305
+ # shut down any applications that are operating on the stream before you
306
+ # delete the stream. If an application attempts to operate on a deleted
307
+ # stream, it will receive the exception `ResourceNotFoundException`.
308
+ #
309
+ # If the stream is in the `ACTIVE` state, you can delete it. After a
310
+ # `DeleteStream` request, the specified stream is in the `DELETING`
311
+ # state until Amazon Kinesis completes the deletion.
312
+ #
313
+ # **Note:** Amazon Kinesis might continue to accept data read and write
314
+ # operations, such as PutRecord, PutRecords, and GetRecords, on a stream
315
+ # in the `DELETING` state until the stream deletion is complete.
316
+ #
317
+ # When you delete a stream, any shards in that stream are also deleted,
318
+ # and any tags are dissociated from the stream.
319
+ #
320
+ # You can use the DescribeStream operation to check the state of the
321
+ # stream, which is returned in `StreamStatus`.
322
+ #
323
+ # DeleteStream has a limit of 5 transactions per second per account.
324
+ #
325
+ # @option params [required, String] :stream_name
326
+ # The name of the stream to delete.
327
+ #
328
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
329
+ #
330
+ # @example Request syntax with placeholder values
331
+ #
332
+ # resp = client.delete_stream({
333
+ # stream_name: "StreamName", # required
334
+ # })
335
+ #
336
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DeleteStream AWS API Documentation
337
+ #
338
+ # @overload delete_stream(params = {})
339
+ # @param [Hash] params ({})
340
+ def delete_stream(params = {}, options = {})
341
+ req = build_request(:delete_stream, params)
342
+ req.send_request(options)
343
+ end
325
344
 
326
- # Describes the specified Amazon Kinesis stream.
327
- #
328
- # The information returned includes the stream name, Amazon Resource
329
- # Name (ARN), creation time, enhanced metric configuration, and shard
330
- # map. The shard map is an array of shard objects. For each shard
331
- # object, there is the hash key and sequence number ranges that the
332
- # shard spans, and the IDs of any earlier shards that played in a role
333
- # in creating the shard. Every record ingested in the stream is
334
- # identified by a sequence number, which is assigned when the record is
335
- # put into the stream.
336
- #
337
- # You can limit the number of shards returned by each call. For more
338
- # information, see [Retrieving Shards from a Stream][1] in the *Amazon
339
- # Kinesis Streams Developer Guide*.
340
- #
341
- # There are no guarantees about the chronological order shards returned.
342
- # To process shards in chronological order, use the ID of the parent
343
- # shard to track the lineage to the oldest shard.
344
- #
345
- # This operation has a limit of 10 transactions per second per account.
346
- #
347
- #
348
- #
349
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html
350
- # @option params [required, String] :stream_name
351
- # The name of the stream to describe.
352
- # @option params [Integer] :limit
353
- # The maximum number of shards to return in a single call. The default
354
- # value is 100. If you specify a value greater than 100, at most 100
355
- # shards are returned.
356
- # @option params [String] :exclusive_start_shard_id
357
- # The shard ID of the shard to start with.
358
- # @return [Types::DescribeStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
359
- #
360
- # * {Types::DescribeStreamOutput#stream_description #StreamDescription} => Types::StreamDescription
361
- #
362
- # @example Request syntax with placeholder values
363
- # resp = client.describe_stream({
364
- # stream_name: "StreamName", # required
365
- # limit: 1,
366
- # exclusive_start_shard_id: "ShardId",
367
- # })
368
- #
369
- # @example Response structure
370
- # resp.stream_description.stream_name #=> String
371
- # resp.stream_description.stream_arn #=> String
372
- # resp.stream_description.stream_status #=> String, one of "CREATING", "DELETING", "ACTIVE", "UPDATING"
373
- # resp.stream_description.shards #=> Array
374
- # resp.stream_description.shards[0].shard_id #=> String
375
- # resp.stream_description.shards[0].parent_shard_id #=> String
376
- # resp.stream_description.shards[0].adjacent_parent_shard_id #=> String
377
- # resp.stream_description.shards[0].hash_key_range.starting_hash_key #=> String
378
- # resp.stream_description.shards[0].hash_key_range.ending_hash_key #=> String
379
- # resp.stream_description.shards[0].sequence_number_range.starting_sequence_number #=> String
380
- # resp.stream_description.shards[0].sequence_number_range.ending_sequence_number #=> String
381
- # resp.stream_description.has_more_shards #=> Boolean
382
- # resp.stream_description.retention_period_hours #=> Integer
383
- # resp.stream_description.stream_creation_timestamp #=> Time
384
- # resp.stream_description.enhanced_monitoring #=> Array
385
- # resp.stream_description.enhanced_monitoring[0].shard_level_metrics #=> Array
386
- # resp.stream_description.enhanced_monitoring[0].shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
387
- # @overload describe_stream(params = {})
388
- # @param [Hash] params ({})
389
- def describe_stream(params = {}, options = {})
390
- req = build_request(:describe_stream, params)
391
- req.send_request(options)
392
- end
345
+ # Describes the shard limits and usage for the account.
346
+ #
347
+ # If you update your account limits, the old limits might be returned
348
+ # for a few minutes.
349
+ #
350
+ # This operation has a limit of 1 transaction per second per account.
351
+ #
352
+ # @return [Types::DescribeLimitsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
353
+ #
354
+ # * {Types::DescribeLimitsOutput#shard_limit #shard_limit} => Integer
355
+ # * {Types::DescribeLimitsOutput#open_shard_count #open_shard_count} => Integer
356
+ #
357
+ # @example Response structure
358
+ #
359
+ # resp.shard_limit #=> Integer
360
+ # resp.open_shard_count #=> Integer
361
+ #
362
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DescribeLimits AWS API Documentation
363
+ #
364
+ # @overload describe_limits(params = {})
365
+ # @param [Hash] params ({})
366
+ def describe_limits(params = {}, options = {})
367
+ req = build_request(:describe_limits, params)
368
+ req.send_request(options)
369
+ end
393
370
 
394
- # Disables enhanced monitoring.
395
- # @option params [required, String] :stream_name
396
- # The name of the Amazon Kinesis stream for which to disable enhanced
397
- # monitoring.
398
- # @option params [required, Array<String>] :shard_level_metrics
399
- # List of shard-level metrics to disable.
400
- #
401
- # The following are the valid shard-level metrics. The value "`ALL`"
402
- # disables every metric.
403
- #
404
- # * `IncomingBytes`
405
- #
406
- # * `IncomingRecords`
407
- #
408
- # * `OutgoingBytes`
409
- #
410
- # * `OutgoingRecords`
411
- #
412
- # * `WriteProvisionedThroughputExceeded`
413
- #
414
- # * `ReadProvisionedThroughputExceeded`
415
- #
416
- # * `IteratorAgeMilliseconds`
417
- #
418
- # * `ALL`
419
- #
420
- # For more information, see [Monitoring the Amazon Kinesis Streams
421
- # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
422
- # Developer Guide*.
423
- #
424
- #
425
- #
426
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
427
- # @return [Types::EnhancedMonitoringOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
428
- #
429
- # * {Types::EnhancedMonitoringOutput#stream_name #StreamName} => String
430
- # * {Types::EnhancedMonitoringOutput#current_shard_level_metrics #CurrentShardLevelMetrics} => Array&lt;String&gt;
431
- # * {Types::EnhancedMonitoringOutput#desired_shard_level_metrics #DesiredShardLevelMetrics} => Array&lt;String&gt;
432
- #
433
- # @example Request syntax with placeholder values
434
- # resp = client.disable_enhanced_monitoring({
435
- # stream_name: "StreamName", # required
436
- # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
437
- # })
438
- #
439
- # @example Response structure
440
- # resp.stream_name #=> String
441
- # resp.current_shard_level_metrics #=> Array
442
- # resp.current_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
443
- # resp.desired_shard_level_metrics #=> Array
444
- # resp.desired_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
445
- # @overload disable_enhanced_monitoring(params = {})
446
- # @param [Hash] params ({})
447
- def disable_enhanced_monitoring(params = {}, options = {})
448
- req = build_request(:disable_enhanced_monitoring, params)
449
- req.send_request(options)
450
- end
371
+ # Describes the specified Amazon Kinesis stream.
372
+ #
373
+ # The information returned includes the stream name, Amazon Resource
374
+ # Name (ARN), creation time, enhanced metric configuration, and shard
375
+ # map. The shard map is an array of shard objects. For each shard
376
+ # object, there is the hash key and sequence number ranges that the
377
+ # shard spans, and the IDs of any earlier shards that played in a role
378
+ # in creating the shard. Every record ingested in the stream is
379
+ # identified by a sequence number, which is assigned when the record is
380
+ # put into the stream.
381
+ #
382
+ # You can limit the number of shards returned by each call. For more
383
+ # information, see [Retrieving Shards from a Stream][1] in the *Amazon
384
+ # Kinesis Streams Developer Guide*.
385
+ #
386
+ # There are no guarantees about the chronological order shards returned.
387
+ # To process shards in chronological order, use the ID of the parent
388
+ # shard to track the lineage to the oldest shard.
389
+ #
390
+ # This operation has a limit of 10 transactions per second per account.
391
+ #
392
+ #
393
+ #
394
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html
395
+ #
396
+ # @option params [required, String] :stream_name
397
+ # The name of the stream to describe.
398
+ #
399
+ # @option params [Integer] :limit
400
+ # The maximum number of shards to return in a single call. The default
401
+ # value is 100. If you specify a value greater than 100, at most 100
402
+ # shards are returned.
403
+ #
404
+ # @option params [String] :exclusive_start_shard_id
405
+ # The shard ID of the shard to start with.
406
+ #
407
+ # @return [Types::DescribeStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
408
+ #
409
+ # * {Types::DescribeStreamOutput#stream_description #stream_description} => Types::StreamDescription
410
+ #
411
+ # @example Request syntax with placeholder values
412
+ #
413
+ # resp = client.describe_stream({
414
+ # stream_name: "StreamName", # required
415
+ # limit: 1,
416
+ # exclusive_start_shard_id: "ShardId",
417
+ # })
418
+ #
419
+ # @example Response structure
420
+ #
421
+ # resp.stream_description.stream_name #=> String
422
+ # resp.stream_description.stream_arn #=> String
423
+ # resp.stream_description.stream_status #=> String, one of "CREATING", "DELETING", "ACTIVE", "UPDATING"
424
+ # resp.stream_description.shards #=> Array
425
+ # resp.stream_description.shards[0].shard_id #=> String
426
+ # resp.stream_description.shards[0].parent_shard_id #=> String
427
+ # resp.stream_description.shards[0].adjacent_parent_shard_id #=> String
428
+ # resp.stream_description.shards[0].hash_key_range.starting_hash_key #=> String
429
+ # resp.stream_description.shards[0].hash_key_range.ending_hash_key #=> String
430
+ # resp.stream_description.shards[0].sequence_number_range.starting_sequence_number #=> String
431
+ # resp.stream_description.shards[0].sequence_number_range.ending_sequence_number #=> String
432
+ # resp.stream_description.has_more_shards #=> Boolean
433
+ # resp.stream_description.retention_period_hours #=> Integer
434
+ # resp.stream_description.stream_creation_timestamp #=> Time
435
+ # resp.stream_description.enhanced_monitoring #=> Array
436
+ # resp.stream_description.enhanced_monitoring[0].shard_level_metrics #=> Array
437
+ # resp.stream_description.enhanced_monitoring[0].shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
438
+ #
439
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DescribeStream AWS API Documentation
440
+ #
441
+ # @overload describe_stream(params = {})
442
+ # @param [Hash] params ({})
443
+ def describe_stream(params = {}, options = {})
444
+ req = build_request(:describe_stream, params)
445
+ req.send_request(options)
446
+ end
451
447
 
452
- # Enables enhanced Amazon Kinesis stream monitoring for shard-level
453
- # metrics.
454
- # @option params [required, String] :stream_name
455
- # The name of the stream for which to enable enhanced monitoring.
456
- # @option params [required, Array<String>] :shard_level_metrics
457
- # List of shard-level metrics to enable.
458
- #
459
- # The following are the valid shard-level metrics. The value "`ALL`"
460
- # enables every metric.
461
- #
462
- # * `IncomingBytes`
463
- #
464
- # * `IncomingRecords`
465
- #
466
- # * `OutgoingBytes`
467
- #
468
- # * `OutgoingRecords`
469
- #
470
- # * `WriteProvisionedThroughputExceeded`
471
- #
472
- # * `ReadProvisionedThroughputExceeded`
473
- #
474
- # * `IteratorAgeMilliseconds`
475
- #
476
- # * `ALL`
477
- #
478
- # For more information, see [Monitoring the Amazon Kinesis Streams
479
- # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
480
- # Developer Guide*.
481
- #
482
- #
483
- #
484
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
485
- # @return [Types::EnhancedMonitoringOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
486
- #
487
- # * {Types::EnhancedMonitoringOutput#stream_name #StreamName} => String
488
- # * {Types::EnhancedMonitoringOutput#current_shard_level_metrics #CurrentShardLevelMetrics} => Array&lt;String&gt;
489
- # * {Types::EnhancedMonitoringOutput#desired_shard_level_metrics #DesiredShardLevelMetrics} => Array&lt;String&gt;
490
- #
491
- # @example Request syntax with placeholder values
492
- # resp = client.enable_enhanced_monitoring({
493
- # stream_name: "StreamName", # required
494
- # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
495
- # })
496
- #
497
- # @example Response structure
498
- # resp.stream_name #=> String
499
- # resp.current_shard_level_metrics #=> Array
500
- # resp.current_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
501
- # resp.desired_shard_level_metrics #=> Array
502
- # resp.desired_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
503
- # @overload enable_enhanced_monitoring(params = {})
504
- # @param [Hash] params ({})
505
- def enable_enhanced_monitoring(params = {}, options = {})
506
- req = build_request(:enable_enhanced_monitoring, params)
507
- req.send_request(options)
508
- end
448
+ # Disables enhanced monitoring.
449
+ #
450
+ # @option params [required, String] :stream_name
451
+ # The name of the Amazon Kinesis stream for which to disable enhanced
452
+ # monitoring.
453
+ #
454
+ # @option params [required, Array<String>] :shard_level_metrics
455
+ # List of shard-level metrics to disable.
456
+ #
457
+ # The following are the valid shard-level metrics. The value "`ALL`"
458
+ # disables every metric.
459
+ #
460
+ # * `IncomingBytes`
461
+ #
462
+ # * `IncomingRecords`
463
+ #
464
+ # * `OutgoingBytes`
465
+ #
466
+ # * `OutgoingRecords`
467
+ #
468
+ # * `WriteProvisionedThroughputExceeded`
469
+ #
470
+ # * `ReadProvisionedThroughputExceeded`
471
+ #
472
+ # * `IteratorAgeMilliseconds`
473
+ #
474
+ # * `ALL`
475
+ #
476
+ # For more information, see [Monitoring the Amazon Kinesis Streams
477
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
478
+ # Developer Guide*.
479
+ #
480
+ #
481
+ #
482
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
483
+ #
484
+ # @return [Types::EnhancedMonitoringOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
485
+ #
486
+ # * {Types::EnhancedMonitoringOutput#stream_name #stream_name} => String
487
+ # * {Types::EnhancedMonitoringOutput#current_shard_level_metrics #current_shard_level_metrics} => Array&lt;String&gt;
488
+ # * {Types::EnhancedMonitoringOutput#desired_shard_level_metrics #desired_shard_level_metrics} => Array&lt;String&gt;
489
+ #
490
+ # @example Request syntax with placeholder values
491
+ #
492
+ # resp = client.disable_enhanced_monitoring({
493
+ # stream_name: "StreamName", # required
494
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
495
+ # })
496
+ #
497
+ # @example Response structure
498
+ #
499
+ # resp.stream_name #=> String
500
+ # resp.current_shard_level_metrics #=> Array
501
+ # resp.current_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
502
+ # resp.desired_shard_level_metrics #=> Array
503
+ # resp.desired_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
504
+ #
505
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DisableEnhancedMonitoring AWS API Documentation
506
+ #
507
+ # @overload disable_enhanced_monitoring(params = {})
508
+ # @param [Hash] params ({})
509
+ def disable_enhanced_monitoring(params = {}, options = {})
510
+ req = build_request(:disable_enhanced_monitoring, params)
511
+ req.send_request(options)
512
+ end
509
513
 
510
- # Gets data records from an Amazon Kinesis stream's shard.
511
- #
512
- # Specify a shard iterator using the `ShardIterator` parameter. The
513
- # shard iterator specifies the position in the shard from which you want
514
- # to start reading data records sequentially. If there are no records
515
- # available in the portion of the shard that the iterator points to,
516
- # GetRecords returns an empty list. Note that it might take multiple
517
- # calls to get to a portion of the shard that contains records.
518
- #
519
- # You can scale by provisioning multiple shards per stream while
520
- # considering service limits (for more information, see [Streams
521
- # Limits][1] in the *Amazon Kinesis Streams Developer Guide*). Your
522
- # application should have one thread per shard, each reading
523
- # continuously from its stream. To read from a stream continually, call
524
- # GetRecords in a loop. Use GetShardIterator to get the shard iterator
525
- # to specify in the first GetRecords call. GetRecords returns a new
526
- # shard iterator in `NextShardIterator`. Specify the shard iterator
527
- # returned in `NextShardIterator` in subsequent calls to GetRecords.
528
- # Note that if the shard has been closed, the shard iterator can't
529
- # return more data and GetRecords returns `null` in `NextShardIterator`.
530
- # You can terminate the loop when the shard is closed, or when the shard
531
- # iterator reaches the record with the sequence number or other
532
- # attribute that marks it as the last record to process.
533
- #
534
- # Each data record can be up to 1 MB in size, and each shard can read up
535
- # to 2 MB per second. You can ensure that your calls don't exceed the
536
- # maximum supported size or throughput by using the `Limit` parameter to
537
- # specify the maximum number of records that GetRecords can return.
538
- # Consider your average record size when determining this limit.
539
- #
540
- # The size of the data returned by GetRecords varies depending on the
541
- # utilization of the shard. The maximum size of data that GetRecords can
542
- # return is 10 MB. If a call returns this amount of data, subsequent
543
- # calls made within the next 5 seconds throw
544
- # `ProvisionedThroughputExceededException`. If there is insufficient
545
- # provisioned throughput on the shard, subsequent calls made within the
546
- # next 1 second throw `ProvisionedThroughputExceededException`. Note
547
- # that GetRecords won't return any data when it throws an exception.
548
- # For this reason, we recommend that you wait one second between calls
549
- # to GetRecords; however, it's possible that the application will get
550
- # exceptions for longer than 1 second.
551
- #
552
- # To detect whether the application is falling behind in processing, you
553
- # can use the `MillisBehindLatest` response attribute. You can also
554
- # monitor the stream using CloudWatch metrics and other mechanisms (see
555
- # [Monitoring][2] in the *Amazon Kinesis Streams Developer Guide*).
556
- #
557
- # Each Amazon Kinesis record includes a value,
558
- # `ApproximateArrivalTimestamp`, that is set when a stream successfully
559
- # receives and stores a record. This is commonly referred to as a
560
- # server-side timestamp, whereas a client-side timestamp is set when a
561
- # data producer creates or sends the record to a stream (a data producer
562
- # is any data source putting data records into a stream, for example
563
- # with PutRecords). The timestamp has millisecond precision. There are
564
- # no guarantees about the timestamp accuracy, or that the timestamp is
565
- # always increasing. For example, records in a shard or across a stream
566
- # might have timestamps that are out of order.
567
- #
568
- #
569
- #
570
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
571
- # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html
572
- # @option params [required, String] :shard_iterator
573
- # The position in the shard from which you want to start sequentially
574
- # reading data records. A shard iterator specifies this position using
575
- # the sequence number of a data record in the shard.
576
- # @option params [Integer] :limit
577
- # The maximum number of records to return. Specify a value of up to
578
- # 10,000. If you specify a value that is greater than 10,000, GetRecords
579
- # throws `InvalidArgumentException`.
580
- # @return [Types::GetRecordsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
581
- #
582
- # * {Types::GetRecordsOutput#records #Records} => Array&lt;Types::Record&gt;
583
- # * {Types::GetRecordsOutput#next_shard_iterator #NextShardIterator} => String
584
- # * {Types::GetRecordsOutput#millis_behind_latest #MillisBehindLatest} => Integer
585
- #
586
- # @example Request syntax with placeholder values
587
- # resp = client.get_records({
588
- # shard_iterator: "ShardIterator", # required
589
- # limit: 1,
590
- # })
591
- #
592
- # @example Response structure
593
- # resp.records #=> Array
594
- # resp.records[0].sequence_number #=> String
595
- # resp.records[0].approximate_arrival_timestamp #=> Time
596
- # resp.records[0].data #=> String
597
- # resp.records[0].partition_key #=> String
598
- # resp.next_shard_iterator #=> String
599
- # resp.millis_behind_latest #=> Integer
600
- # @overload get_records(params = {})
601
- # @param [Hash] params ({})
602
- def get_records(params = {}, options = {})
603
- req = build_request(:get_records, params)
604
- req.send_request(options)
605
- end
514
+ # Enables enhanced Amazon Kinesis stream monitoring for shard-level
515
+ # metrics.
516
+ #
517
+ # @option params [required, String] :stream_name
518
+ # The name of the stream for which to enable enhanced monitoring.
519
+ #
520
+ # @option params [required, Array<String>] :shard_level_metrics
521
+ # List of shard-level metrics to enable.
522
+ #
523
+ # The following are the valid shard-level metrics. The value "`ALL`"
524
+ # enables every metric.
525
+ #
526
+ # * `IncomingBytes`
527
+ #
528
+ # * `IncomingRecords`
529
+ #
530
+ # * `OutgoingBytes`
531
+ #
532
+ # * `OutgoingRecords`
533
+ #
534
+ # * `WriteProvisionedThroughputExceeded`
535
+ #
536
+ # * `ReadProvisionedThroughputExceeded`
537
+ #
538
+ # * `IteratorAgeMilliseconds`
539
+ #
540
+ # * `ALL`
541
+ #
542
+ # For more information, see [Monitoring the Amazon Kinesis Streams
543
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
544
+ # Developer Guide*.
545
+ #
546
+ #
547
+ #
548
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
549
+ #
550
+ # @return [Types::EnhancedMonitoringOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
551
+ #
552
+ # * {Types::EnhancedMonitoringOutput#stream_name #stream_name} => String
553
+ # * {Types::EnhancedMonitoringOutput#current_shard_level_metrics #current_shard_level_metrics} => Array&lt;String&gt;
554
+ # * {Types::EnhancedMonitoringOutput#desired_shard_level_metrics #desired_shard_level_metrics} => Array&lt;String&gt;
555
+ #
556
+ # @example Request syntax with placeholder values
557
+ #
558
+ # resp = client.enable_enhanced_monitoring({
559
+ # stream_name: "StreamName", # required
560
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
561
+ # })
562
+ #
563
+ # @example Response structure
564
+ #
565
+ # resp.stream_name #=> String
566
+ # resp.current_shard_level_metrics #=> Array
567
+ # resp.current_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
568
+ # resp.desired_shard_level_metrics #=> Array
569
+ # resp.desired_shard_level_metrics[0] #=> String, one of "IncomingBytes", "IncomingRecords", "OutgoingBytes", "OutgoingRecords", "WriteProvisionedThroughputExceeded", "ReadProvisionedThroughputExceeded", "IteratorAgeMilliseconds", "ALL"
570
+ #
571
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/EnableEnhancedMonitoring AWS API Documentation
572
+ #
573
+ # @overload enable_enhanced_monitoring(params = {})
574
+ # @param [Hash] params ({})
575
+ def enable_enhanced_monitoring(params = {}, options = {})
576
+ req = build_request(:enable_enhanced_monitoring, params)
577
+ req.send_request(options)
578
+ end
606
579
 
607
- # Gets an Amazon Kinesis shard iterator. A shard iterator expires five
608
- # minutes after it is returned to the requester.
609
- #
610
- # A shard iterator specifies the shard position from which to start
611
- # reading data records sequentially. The position is specified using the
612
- # sequence number of a data record in a shard. A sequence number is the
613
- # identifier associated with every record ingested in the stream, and is
614
- # assigned when a record is put into the stream. Each stream has one or
615
- # more shards.
616
- #
617
- # You must specify the shard iterator type. For example, you can set the
618
- # `ShardIteratorType` parameter to read exactly from the position
619
- # denoted by a specific sequence number by using the
620
- # `AT_SEQUENCE_NUMBER` shard iterator type, or right after the sequence
621
- # number by using the `AFTER_SEQUENCE_NUMBER` shard iterator type, using
622
- # sequence numbers returned by earlier calls to PutRecord, PutRecords,
623
- # GetRecords, or DescribeStream. In the request, you can specify the
624
- # shard iterator type `AT_TIMESTAMP` to read records from an arbitrary
625
- # point in time, `TRIM_HORIZON` to cause `ShardIterator` to point to the
626
- # last untrimmed record in the shard in the system (the oldest data
627
- # record in the shard), or `LATEST` so that you always read the most
628
- # recent data in the shard.
629
- #
630
- # When you read repeatedly from a stream, use a GetShardIterator request
631
- # to get the first shard iterator for use in your first GetRecords
632
- # request and for subsequent reads use the shard iterator returned by
633
- # the GetRecords request in `NextShardIterator`. A new shard iterator is
634
- # returned by every GetRecords request in `NextShardIterator`, which you
635
- # use in the `ShardIterator` parameter of the next GetRecords request.
636
- #
637
- # If a GetShardIterator request is made too often, you receive a
638
- # `ProvisionedThroughputExceededException`. For more information about
639
- # throughput limits, see GetRecords, and [Streams Limits][1] in the
640
- # *Amazon Kinesis Streams Developer Guide*.
641
- #
642
- # If the shard is closed, GetShardIterator returns a valid iterator for
643
- # the last sequence number of the shard. Note that a shard can be closed
644
- # as a result of using SplitShard or MergeShards.
645
- #
646
- # GetShardIterator has a limit of 5 transactions per second per account
647
- # per open shard.
648
- #
649
- #
650
- #
651
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
652
- # @option params [required, String] :stream_name
653
- # The name of the Amazon Kinesis stream.
654
- # @option params [required, String] :shard_id
655
- # The shard ID of the Amazon Kinesis shard to get the iterator for.
656
- # @option params [required, String] :shard_iterator_type
657
- # Determines how the shard iterator is used to start reading data
658
- # records from the shard.
659
- #
660
- # The following are the valid Amazon Kinesis shard iterator types:
661
- #
662
- # * AT\_SEQUENCE\_NUMBER - Start reading from the position denoted by a
663
- # specific sequence number, provided in the value
664
- # `StartingSequenceNumber`.
665
- #
666
- # * AFTER\_SEQUENCE\_NUMBER - Start reading right after the position
667
- # denoted by a specific sequence number, provided in the value
668
- # `StartingSequenceNumber`.
669
- #
670
- # * AT\_TIMESTAMP - Start reading from the position denoted by a
671
- # specific timestamp, provided in the value `Timestamp`.
672
- #
673
- # * TRIM\_HORIZON - Start reading at the last untrimmed record in the
674
- # shard in the system, which is the oldest data record in the shard.
675
- #
676
- # * LATEST - Start reading just after the most recent record in the
677
- # shard, so that you always read the most recent data in the shard.
678
- # @option params [String] :starting_sequence_number
679
- # The sequence number of the data record in the shard from which to
680
- # start reading. Used with shard iterator type AT\_SEQUENCE\_NUMBER and
681
- # AFTER\_SEQUENCE\_NUMBER.
682
- # @option params [Time,DateTime,Date,Integer,String] :timestamp
683
- # The timestamp of the data record from which to start reading. Used
684
- # with shard iterator type AT\_TIMESTAMP. A timestamp is the Unix epoch
685
- # date with precision in milliseconds. For example,
686
- # `2016-04-04T19:58:46.480-00:00` or `1459799926.480`. If a record with
687
- # this exact timestamp does not exist, the iterator returned is for the
688
- # next (later) record. If the timestamp is older than the current trim
689
- # horizon, the iterator returned is for the oldest untrimmed data record
690
- # (TRIM\_HORIZON).
691
- # @return [Types::GetShardIteratorOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
692
- #
693
- # * {Types::GetShardIteratorOutput#shard_iterator #ShardIterator} => String
694
- #
695
- # @example Request syntax with placeholder values
696
- # resp = client.get_shard_iterator({
697
- # stream_name: "StreamName", # required
698
- # shard_id: "ShardId", # required
699
- # shard_iterator_type: "AT_SEQUENCE_NUMBER", # required, accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST, AT_TIMESTAMP
700
- # starting_sequence_number: "SequenceNumber",
701
- # timestamp: Time.now,
702
- # })
703
- #
704
- # @example Response structure
705
- # resp.shard_iterator #=> String
706
- # @overload get_shard_iterator(params = {})
707
- # @param [Hash] params ({})
708
- def get_shard_iterator(params = {}, options = {})
709
- req = build_request(:get_shard_iterator, params)
710
- req.send_request(options)
711
- end
580
+ # Gets data records from an Amazon Kinesis stream's shard.
581
+ #
582
+ # Specify a shard iterator using the `ShardIterator` parameter. The
583
+ # shard iterator specifies the position in the shard from which you want
584
+ # to start reading data records sequentially. If there are no records
585
+ # available in the portion of the shard that the iterator points to,
586
+ # GetRecords returns an empty list. Note that it might take multiple
587
+ # calls to get to a portion of the shard that contains records.
588
+ #
589
+ # You can scale by provisioning multiple shards per stream while
590
+ # considering service limits (for more information, see [Streams
591
+ # Limits][1] in the *Amazon Kinesis Streams Developer Guide*). Your
592
+ # application should have one thread per shard, each reading
593
+ # continuously from its stream. To read from a stream continually, call
594
+ # GetRecords in a loop. Use GetShardIterator to get the shard iterator
595
+ # to specify in the first GetRecords call. GetRecords returns a new
596
+ # shard iterator in `NextShardIterator`. Specify the shard iterator
597
+ # returned in `NextShardIterator` in subsequent calls to GetRecords.
598
+ # Note that if the shard has been closed, the shard iterator can't
599
+ # return more data and GetRecords returns `null` in `NextShardIterator`.
600
+ # You can terminate the loop when the shard is closed, or when the shard
601
+ # iterator reaches the record with the sequence number or other
602
+ # attribute that marks it as the last record to process.
603
+ #
604
+ # Each data record can be up to 1 MB in size, and each shard can read up
605
+ # to 2 MB per second. You can ensure that your calls don't exceed the
606
+ # maximum supported size or throughput by using the `Limit` parameter to
607
+ # specify the maximum number of records that GetRecords can return.
608
+ # Consider your average record size when determining this limit.
609
+ #
610
+ # The size of the data returned by GetRecords varies depending on the
611
+ # utilization of the shard. The maximum size of data that GetRecords can
612
+ # return is 10 MB. If a call returns this amount of data, subsequent
613
+ # calls made within the next 5 seconds throw
614
+ # `ProvisionedThroughputExceededException`. If there is insufficient
615
+ # provisioned throughput on the shard, subsequent calls made within the
616
+ # next 1 second throw `ProvisionedThroughputExceededException`. Note
617
+ # that GetRecords won't return any data when it throws an exception.
618
+ # For this reason, we recommend that you wait one second between calls
619
+ # to GetRecords; however, it's possible that the application will get
620
+ # exceptions for longer than 1 second.
621
+ #
622
+ # To detect whether the application is falling behind in processing, you
623
+ # can use the `MillisBehindLatest` response attribute. You can also
624
+ # monitor the stream using CloudWatch metrics and other mechanisms (see
625
+ # [Monitoring][2] in the *Amazon Kinesis Streams Developer Guide*).
626
+ #
627
+ # Each Amazon Kinesis record includes a value,
628
+ # `ApproximateArrivalTimestamp`, that is set when a stream successfully
629
+ # receives and stores a record. This is commonly referred to as a
630
+ # server-side timestamp, whereas a client-side timestamp is set when a
631
+ # data producer creates or sends the record to a stream (a data producer
632
+ # is any data source putting data records into a stream, for example
633
+ # with PutRecords). The timestamp has millisecond precision. There are
634
+ # no guarantees about the timestamp accuracy, or that the timestamp is
635
+ # always increasing. For example, records in a shard or across a stream
636
+ # might have timestamps that are out of order.
637
+ #
638
+ #
639
+ #
640
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
641
+ # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html
642
+ #
643
+ # @option params [required, String] :shard_iterator
644
+ # The position in the shard from which you want to start sequentially
645
+ # reading data records. A shard iterator specifies this position using
646
+ # the sequence number of a data record in the shard.
647
+ #
648
+ # @option params [Integer] :limit
649
+ # The maximum number of records to return. Specify a value of up to
650
+ # 10,000. If you specify a value that is greater than 10,000, GetRecords
651
+ # throws `InvalidArgumentException`.
652
+ #
653
+ # @return [Types::GetRecordsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
654
+ #
655
+ # * {Types::GetRecordsOutput#records #records} => Array&lt;Types::Record&gt;
656
+ # * {Types::GetRecordsOutput#next_shard_iterator #next_shard_iterator} => String
657
+ # * {Types::GetRecordsOutput#millis_behind_latest #millis_behind_latest} => Integer
658
+ #
659
+ # @example Request syntax with placeholder values
660
+ #
661
+ # resp = client.get_records({
662
+ # shard_iterator: "ShardIterator", # required
663
+ # limit: 1,
664
+ # })
665
+ #
666
+ # @example Response structure
667
+ #
668
+ # resp.records #=> Array
669
+ # resp.records[0].sequence_number #=> String
670
+ # resp.records[0].approximate_arrival_timestamp #=> Time
671
+ # resp.records[0].data #=> String
672
+ # resp.records[0].partition_key #=> String
673
+ # resp.next_shard_iterator #=> String
674
+ # resp.millis_behind_latest #=> Integer
675
+ #
676
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetRecords AWS API Documentation
677
+ #
678
+ # @overload get_records(params = {})
679
+ # @param [Hash] params ({})
680
+ def get_records(params = {}, options = {})
681
+ req = build_request(:get_records, params)
682
+ req.send_request(options)
683
+ end
712
684
 
713
- # Increases the Amazon Kinesis stream's retention period, which is the
714
- # length of time data records are accessible after they are added to the
715
- # stream. The maximum value of a stream's retention period is 168 hours
716
- # (7 days).
717
- #
718
- # Upon choosing a longer stream retention period, this operation will
719
- # increase the time period records are accessible that have not yet
720
- # expired. However, it will not make previous data that has expired
721
- # (older than the stream's previous retention period) accessible after
722
- # the operation has been called. For example, if a stream's retention
723
- # period is set to 24 hours and is increased to 168 hours, any data that
724
- # is older than 24 hours will remain inaccessible to consumer
725
- # applications.
726
- # @option params [required, String] :stream_name
727
- # The name of the stream to modify.
728
- # @option params [required, Integer] :retention_period_hours
729
- # The new retention period of the stream, in hours. Must be more than
730
- # the current retention period.
731
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
732
- #
733
- # @example Request syntax with placeholder values
734
- # resp = client.increase_stream_retention_period({
735
- # stream_name: "StreamName", # required
736
- # retention_period_hours: 1, # required
737
- # })
738
- # @overload increase_stream_retention_period(params = {})
739
- # @param [Hash] params ({})
740
- def increase_stream_retention_period(params = {}, options = {})
741
- req = build_request(:increase_stream_retention_period, params)
742
- req.send_request(options)
743
- end
685
+ # Gets an Amazon Kinesis shard iterator. A shard iterator expires five
686
+ # minutes after it is returned to the requester.
687
+ #
688
+ # A shard iterator specifies the shard position from which to start
689
+ # reading data records sequentially. The position is specified using the
690
+ # sequence number of a data record in a shard. A sequence number is the
691
+ # identifier associated with every record ingested in the stream, and is
692
+ # assigned when a record is put into the stream. Each stream has one or
693
+ # more shards.
694
+ #
695
+ # You must specify the shard iterator type. For example, you can set the
696
+ # `ShardIteratorType` parameter to read exactly from the position
697
+ # denoted by a specific sequence number by using the
698
+ # `AT_SEQUENCE_NUMBER` shard iterator type, or right after the sequence
699
+ # number by using the `AFTER_SEQUENCE_NUMBER` shard iterator type, using
700
+ # sequence numbers returned by earlier calls to PutRecord, PutRecords,
701
+ # GetRecords, or DescribeStream. In the request, you can specify the
702
+ # shard iterator type `AT_TIMESTAMP` to read records from an arbitrary
703
+ # point in time, `TRIM_HORIZON` to cause `ShardIterator` to point to the
704
+ # last untrimmed record in the shard in the system (the oldest data
705
+ # record in the shard), or `LATEST` so that you always read the most
706
+ # recent data in the shard.
707
+ #
708
+ # When you read repeatedly from a stream, use a GetShardIterator request
709
+ # to get the first shard iterator for use in your first GetRecords
710
+ # request and for subsequent reads use the shard iterator returned by
711
+ # the GetRecords request in `NextShardIterator`. A new shard iterator is
712
+ # returned by every GetRecords request in `NextShardIterator`, which you
713
+ # use in the `ShardIterator` parameter of the next GetRecords request.
714
+ #
715
+ # If a GetShardIterator request is made too often, you receive a
716
+ # `ProvisionedThroughputExceededException`. For more information about
717
+ # throughput limits, see GetRecords, and [Streams Limits][1] in the
718
+ # *Amazon Kinesis Streams Developer Guide*.
719
+ #
720
+ # If the shard is closed, GetShardIterator returns a valid iterator for
721
+ # the last sequence number of the shard. Note that a shard can be closed
722
+ # as a result of using SplitShard or MergeShards.
723
+ #
724
+ # GetShardIterator has a limit of 5 transactions per second per account
725
+ # per open shard.
726
+ #
727
+ #
728
+ #
729
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
730
+ #
731
+ # @option params [required, String] :stream_name
732
+ # The name of the Amazon Kinesis stream.
733
+ #
734
+ # @option params [required, String] :shard_id
735
+ # The shard ID of the Amazon Kinesis shard to get the iterator for.
736
+ #
737
+ # @option params [required, String] :shard_iterator_type
738
+ # Determines how the shard iterator is used to start reading data
739
+ # records from the shard.
740
+ #
741
+ # The following are the valid Amazon Kinesis shard iterator types:
742
+ #
743
+ # * AT\_SEQUENCE\_NUMBER - Start reading from the position denoted by a
744
+ # specific sequence number, provided in the value
745
+ # `StartingSequenceNumber`.
746
+ #
747
+ # * AFTER\_SEQUENCE\_NUMBER - Start reading right after the position
748
+ # denoted by a specific sequence number, provided in the value
749
+ # `StartingSequenceNumber`.
750
+ #
751
+ # * AT\_TIMESTAMP - Start reading from the position denoted by a
752
+ # specific timestamp, provided in the value `Timestamp`.
753
+ #
754
+ # * TRIM\_HORIZON - Start reading at the last untrimmed record in the
755
+ # shard in the system, which is the oldest data record in the shard.
756
+ #
757
+ # * LATEST - Start reading just after the most recent record in the
758
+ # shard, so that you always read the most recent data in the shard.
759
+ #
760
+ # @option params [String] :starting_sequence_number
761
+ # The sequence number of the data record in the shard from which to
762
+ # start reading. Used with shard iterator type AT\_SEQUENCE\_NUMBER and
763
+ # AFTER\_SEQUENCE\_NUMBER.
764
+ #
765
+ # @option params [Time,DateTime,Date,Integer,String] :timestamp
766
+ # The timestamp of the data record from which to start reading. Used
767
+ # with shard iterator type AT\_TIMESTAMP. A timestamp is the Unix epoch
768
+ # date with precision in milliseconds. For example,
769
+ # `2016-04-04T19:58:46.480-00:00` or `1459799926.480`. If a record with
770
+ # this exact timestamp does not exist, the iterator returned is for the
771
+ # next (later) record. If the timestamp is older than the current trim
772
+ # horizon, the iterator returned is for the oldest untrimmed data record
773
+ # (TRIM\_HORIZON).
774
+ #
775
+ # @return [Types::GetShardIteratorOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
776
+ #
777
+ # * {Types::GetShardIteratorOutput#shard_iterator #shard_iterator} => String
778
+ #
779
+ # @example Request syntax with placeholder values
780
+ #
781
+ # resp = client.get_shard_iterator({
782
+ # stream_name: "StreamName", # required
783
+ # shard_id: "ShardId", # required
784
+ # shard_iterator_type: "AT_SEQUENCE_NUMBER", # required, accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST, AT_TIMESTAMP
785
+ # starting_sequence_number: "SequenceNumber",
786
+ # timestamp: Time.now,
787
+ # })
788
+ #
789
+ # @example Response structure
790
+ #
791
+ # resp.shard_iterator #=> String
792
+ #
793
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetShardIterator AWS API Documentation
794
+ #
795
+ # @overload get_shard_iterator(params = {})
796
+ # @param [Hash] params ({})
797
+ def get_shard_iterator(params = {}, options = {})
798
+ req = build_request(:get_shard_iterator, params)
799
+ req.send_request(options)
800
+ end
744
801
 
745
- # Lists your Amazon Kinesis streams.
746
- #
747
- # The number of streams may be too large to return from a single call to
748
- # `ListStreams`. You can limit the number of returned streams using the
749
- # `Limit` parameter. If you do not specify a value for the `Limit`
750
- # parameter, Amazon Kinesis uses the default limit, which is currently
751
- # 10.
752
- #
753
- # You can detect if there are more streams available to list by using
754
- # the `HasMoreStreams` flag from the returned output. If there are more
755
- # streams available, you can request more streams by using the name of
756
- # the last stream returned by the `ListStreams` request in the
757
- # `ExclusiveStartStreamName` parameter in a subsequent request to
758
- # `ListStreams`. The group of stream names returned by the subsequent
759
- # request is then added to the list. You can continue this process until
760
- # all the stream names have been collected in the list.
761
- #
762
- # ListStreams has a limit of 5 transactions per second per account.
763
- # @option params [Integer] :limit
764
- # The maximum number of streams to list.
765
- # @option params [String] :exclusive_start_stream_name
766
- # The name of the stream to start the list with.
767
- # @return [Types::ListStreamsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
768
- #
769
- # * {Types::ListStreamsOutput#stream_names #StreamNames} => Array&lt;String&gt;
770
- # * {Types::ListStreamsOutput#has_more_streams #HasMoreStreams} => Boolean
771
- #
772
- # @example Request syntax with placeholder values
773
- # resp = client.list_streams({
774
- # limit: 1,
775
- # exclusive_start_stream_name: "StreamName",
776
- # })
777
- #
778
- # @example Response structure
779
- # resp.stream_names #=> Array
780
- # resp.stream_names[0] #=> String
781
- # resp.has_more_streams #=> Boolean
782
- # @overload list_streams(params = {})
783
- # @param [Hash] params ({})
784
- def list_streams(params = {}, options = {})
785
- req = build_request(:list_streams, params)
786
- req.send_request(options)
787
- end
802
+ # Increases the Amazon Kinesis stream's retention period, which is the
803
+ # length of time data records are accessible after they are added to the
804
+ # stream. The maximum value of a stream's retention period is 168 hours
805
+ # (7 days).
806
+ #
807
+ # Upon choosing a longer stream retention period, this operation will
808
+ # increase the time period records are accessible that have not yet
809
+ # expired. However, it will not make previous data that has expired
810
+ # (older than the stream's previous retention period) accessible after
811
+ # the operation has been called. For example, if a stream's retention
812
+ # period is set to 24 hours and is increased to 168 hours, any data that
813
+ # is older than 24 hours will remain inaccessible to consumer
814
+ # applications.
815
+ #
816
+ # @option params [required, String] :stream_name
817
+ # The name of the stream to modify.
818
+ #
819
+ # @option params [required, Integer] :retention_period_hours
820
+ # The new retention period of the stream, in hours. Must be more than
821
+ # the current retention period.
822
+ #
823
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
824
+ #
825
+ # @example Request syntax with placeholder values
826
+ #
827
+ # resp = client.increase_stream_retention_period({
828
+ # stream_name: "StreamName", # required
829
+ # retention_period_hours: 1, # required
830
+ # })
831
+ #
832
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/IncreaseStreamRetentionPeriod AWS API Documentation
833
+ #
834
+ # @overload increase_stream_retention_period(params = {})
835
+ # @param [Hash] params ({})
836
+ def increase_stream_retention_period(params = {}, options = {})
837
+ req = build_request(:increase_stream_retention_period, params)
838
+ req.send_request(options)
839
+ end
788
840
 
789
- # Lists the tags for the specified Amazon Kinesis stream.
790
- # @option params [required, String] :stream_name
791
- # The name of the stream.
792
- # @option params [String] :exclusive_start_tag_key
793
- # The key to use as the starting point for the list of tags. If this
794
- # parameter is set, `ListTagsForStream` gets all tags that occur after
795
- # `ExclusiveStartTagKey`.
796
- # @option params [Integer] :limit
797
- # The number of tags to return. If this number is less than the total
798
- # number of tags associated with the stream, `HasMoreTags` is set to
799
- # `true`. To list additional tags, set `ExclusiveStartTagKey` to the
800
- # last key in the response.
801
- # @return [Types::ListTagsForStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
802
- #
803
- # * {Types::ListTagsForStreamOutput#tags #Tags} => Array&lt;Types::Tag&gt;
804
- # * {Types::ListTagsForStreamOutput#has_more_tags #HasMoreTags} => Boolean
805
- #
806
- # @example Request syntax with placeholder values
807
- # resp = client.list_tags_for_stream({
808
- # stream_name: "StreamName", # required
809
- # exclusive_start_tag_key: "TagKey",
810
- # limit: 1,
811
- # })
812
- #
813
- # @example Response structure
814
- # resp.tags #=> Array
815
- # resp.tags[0].key #=> String
816
- # resp.tags[0].value #=> String
817
- # resp.has_more_tags #=> Boolean
818
- # @overload list_tags_for_stream(params = {})
819
- # @param [Hash] params ({})
820
- def list_tags_for_stream(params = {}, options = {})
821
- req = build_request(:list_tags_for_stream, params)
822
- req.send_request(options)
823
- end
841
+ # Lists your Amazon Kinesis streams.
842
+ #
843
+ # The number of streams may be too large to return from a single call to
844
+ # `ListStreams`. You can limit the number of returned streams using the
845
+ # `Limit` parameter. If you do not specify a value for the `Limit`
846
+ # parameter, Amazon Kinesis uses the default limit, which is currently
847
+ # 10.
848
+ #
849
+ # You can detect if there are more streams available to list by using
850
+ # the `HasMoreStreams` flag from the returned output. If there are more
851
+ # streams available, you can request more streams by using the name of
852
+ # the last stream returned by the `ListStreams` request in the
853
+ # `ExclusiveStartStreamName` parameter in a subsequent request to
854
+ # `ListStreams`. The group of stream names returned by the subsequent
855
+ # request is then added to the list. You can continue this process until
856
+ # all the stream names have been collected in the list.
857
+ #
858
+ # ListStreams has a limit of 5 transactions per second per account.
859
+ #
860
+ # @option params [Integer] :limit
861
+ # The maximum number of streams to list.
862
+ #
863
+ # @option params [String] :exclusive_start_stream_name
864
+ # The name of the stream to start the list with.
865
+ #
866
+ # @return [Types::ListStreamsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
867
+ #
868
+ # * {Types::ListStreamsOutput#stream_names #stream_names} => Array&lt;String&gt;
869
+ # * {Types::ListStreamsOutput#has_more_streams #has_more_streams} => Boolean
870
+ #
871
+ # @example Request syntax with placeholder values
872
+ #
873
+ # resp = client.list_streams({
874
+ # limit: 1,
875
+ # exclusive_start_stream_name: "StreamName",
876
+ # })
877
+ #
878
+ # @example Response structure
879
+ #
880
+ # resp.stream_names #=> Array
881
+ # resp.stream_names[0] #=> String
882
+ # resp.has_more_streams #=> Boolean
883
+ #
884
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/ListStreams AWS API Documentation
885
+ #
886
+ # @overload list_streams(params = {})
887
+ # @param [Hash] params ({})
888
+ def list_streams(params = {}, options = {})
889
+ req = build_request(:list_streams, params)
890
+ req.send_request(options)
891
+ end
824
892
 
825
- # Merges two adjacent shards in an Amazon Kinesis stream and combines
826
- # them into a single shard to reduce the stream's capacity to ingest
827
- # and transport data. Two shards are considered adjacent if the union of
828
- # the hash key ranges for the two shards form a contiguous set with no
829
- # gaps. For example, if you have two shards, one with a hash key range
830
- # of 276...381 and the other with a hash key range of 382...454, then
831
- # you could merge these two shards into a single shard that would have a
832
- # hash key range of 276...454. After the merge, the single child shard
833
- # receives data for all hash key values covered by the two parent
834
- # shards.
835
- #
836
- # `MergeShards` is called when there is a need to reduce the overall
837
- # capacity of a stream because of excess capacity that is not being
838
- # used. You must specify the shard to be merged and the adjacent shard
839
- # for a stream. For more information about merging shards, see [Merge
840
- # Two Shards][1] in the *Amazon Kinesis Streams Developer Guide*.
841
- #
842
- # If the stream is in the `ACTIVE` state, you can call `MergeShards`. If
843
- # a stream is in the `CREATING`, `UPDATING`, or `DELETING` state,
844
- # `MergeShards` returns a `ResourceInUseException`. If the specified
845
- # stream does not exist, `MergeShards` returns a
846
- # `ResourceNotFoundException`.
847
- #
848
- # You can use DescribeStream to check the state of the stream, which is
849
- # returned in `StreamStatus`.
850
- #
851
- # `MergeShards` is an asynchronous operation. Upon receiving a
852
- # `MergeShards` request, Amazon Kinesis immediately returns a response
853
- # and sets the `StreamStatus` to `UPDATING`. After the operation is
854
- # completed, Amazon Kinesis sets the `StreamStatus` to `ACTIVE`. Read
855
- # and write operations continue to work while the stream is in the
856
- # `UPDATING` state.
857
- #
858
- # You use DescribeStream to determine the shard IDs that are specified
859
- # in the `MergeShards` request.
860
- #
861
- # If you try to operate on too many streams in parallel using
862
- # CreateStream, DeleteStream, `MergeShards` or SplitShard, you will
863
- # receive a `LimitExceededException`.
864
- #
865
- # `MergeShards` has limit of 5 transactions per second per account.
866
- #
867
- #
868
- #
869
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html
870
- # @option params [required, String] :stream_name
871
- # The name of the stream for the merge.
872
- # @option params [required, String] :shard_to_merge
873
- # The shard ID of the shard to combine with the adjacent shard for the
874
- # merge.
875
- # @option params [required, String] :adjacent_shard_to_merge
876
- # The shard ID of the adjacent shard for the merge.
877
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
878
- #
879
- # @example Request syntax with placeholder values
880
- # resp = client.merge_shards({
881
- # stream_name: "StreamName", # required
882
- # shard_to_merge: "ShardId", # required
883
- # adjacent_shard_to_merge: "ShardId", # required
884
- # })
885
- # @overload merge_shards(params = {})
886
- # @param [Hash] params ({})
887
- def merge_shards(params = {}, options = {})
888
- req = build_request(:merge_shards, params)
889
- req.send_request(options)
890
- end
893
+ # Lists the tags for the specified Amazon Kinesis stream.
894
+ #
895
+ # @option params [required, String] :stream_name
896
+ # The name of the stream.
897
+ #
898
+ # @option params [String] :exclusive_start_tag_key
899
+ # The key to use as the starting point for the list of tags. If this
900
+ # parameter is set, `ListTagsForStream` gets all tags that occur after
901
+ # `ExclusiveStartTagKey`.
902
+ #
903
+ # @option params [Integer] :limit
904
+ # The number of tags to return. If this number is less than the total
905
+ # number of tags associated with the stream, `HasMoreTags` is set to
906
+ # `true`. To list additional tags, set `ExclusiveStartTagKey` to the
907
+ # last key in the response.
908
+ #
909
+ # @return [Types::ListTagsForStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
910
+ #
911
+ # * {Types::ListTagsForStreamOutput#tags #tags} => Array&lt;Types::Tag&gt;
912
+ # * {Types::ListTagsForStreamOutput#has_more_tags #has_more_tags} => Boolean
913
+ #
914
+ # @example Request syntax with placeholder values
915
+ #
916
+ # resp = client.list_tags_for_stream({
917
+ # stream_name: "StreamName", # required
918
+ # exclusive_start_tag_key: "TagKey",
919
+ # limit: 1,
920
+ # })
921
+ #
922
+ # @example Response structure
923
+ #
924
+ # resp.tags #=> Array
925
+ # resp.tags[0].key #=> String
926
+ # resp.tags[0].value #=> String
927
+ # resp.has_more_tags #=> Boolean
928
+ #
929
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/ListTagsForStream AWS API Documentation
930
+ #
931
+ # @overload list_tags_for_stream(params = {})
932
+ # @param [Hash] params ({})
933
+ def list_tags_for_stream(params = {}, options = {})
934
+ req = build_request(:list_tags_for_stream, params)
935
+ req.send_request(options)
936
+ end
891
937
 
892
- # Writes a single data record into an Amazon Kinesis stream. Call
893
- # `PutRecord` to send data into the stream for real-time ingestion and
894
- # subsequent processing, one record at a time. Each shard can support
895
- # writes up to 1,000 records per second, up to a maximum data write
896
- # total of 1 MB per second.
897
- #
898
- # You must specify the name of the stream that captures, stores, and
899
- # transports the data; a partition key; and the data blob itself.
900
- #
901
- # The data blob can be any type of data; for example, a segment from a
902
- # log file, geographic/location data, website clickstream data, and so
903
- # on.
904
- #
905
- # The partition key is used by Amazon Kinesis to distribute data across
906
- # shards. Amazon Kinesis segregates the data records that belong to a
907
- # stream into multiple shards, using the partition key associated with
908
- # each data record to determine which shard a given data record belongs
909
- # to.
910
- #
911
- # Partition keys are Unicode strings, with a maximum length limit of 256
912
- # characters for each key. An MD5 hash function is used to map partition
913
- # keys to 128-bit integer values and to map associated data records to
914
- # shards using the hash key ranges of the shards. You can override
915
- # hashing the partition key to determine the shard by explicitly
916
- # specifying a hash value using the `ExplicitHashKey` parameter. For
917
- # more information, see [Adding Data to a Stream][1] in the *Amazon
918
- # Kinesis Streams Developer Guide*.
919
- #
920
- # `PutRecord` returns the shard ID of where the data record was placed
921
- # and the sequence number that was assigned to the data record.
922
- #
923
- # Sequence numbers increase over time and are specific to a shard within
924
- # a stream, not across all shards within a stream. To guarantee strictly
925
- # increasing ordering, write serially to a shard and use the
926
- # `SequenceNumberForOrdering` parameter. For more information, see
927
- # [Adding Data to a Stream][1] in the *Amazon Kinesis Streams Developer
928
- # Guide*.
929
- #
930
- # If a `PutRecord` request cannot be processed because of insufficient
931
- # provisioned throughput on the shard involved in the request,
932
- # `PutRecord` throws `ProvisionedThroughputExceededException`.
933
- #
934
- # Data records are accessible for only 24 hours from the time that they
935
- # are added to a stream.
936
- #
937
- #
938
- #
939
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
940
- # @option params [required, String] :stream_name
941
- # The name of the stream to put the data record into.
942
- # @option params [required, String, IO] :data
943
- # The data blob to put into the record, which is base64-encoded when the
944
- # blob is serialized. When the data blob (the payload before
945
- # base64-encoding) is added to the partition key size, the total size
946
- # must not exceed the maximum record size (1 MB).
947
- # @option params [required, String] :partition_key
948
- # Determines which shard in the stream the data record is assigned to.
949
- # Partition keys are Unicode strings with a maximum length limit of 256
950
- # characters for each key. Amazon Kinesis uses the partition key as
951
- # input to a hash function that maps the partition key and associated
952
- # data to a specific shard. Specifically, an MD5 hash function is used
953
- # to map partition keys to 128-bit integer values and to map associated
954
- # data records to shards. As a result of this hashing mechanism, all
955
- # data records with the same partition key map to the same shard within
956
- # the stream.
957
- # @option params [String] :explicit_hash_key
958
- # The hash value used to explicitly determine the shard the data record
959
- # is assigned to by overriding the partition key hash.
960
- # @option params [String] :sequence_number_for_ordering
961
- # Guarantees strictly increasing sequence numbers, for puts from the
962
- # same client and to the same partition key. Usage: set the
963
- # `SequenceNumberForOrdering` of record *n* to the sequence number of
964
- # record *n-1* (as returned in the result when putting record *n-1*). If
965
- # this parameter is not set, records will be coarsely ordered based on
966
- # arrival time.
967
- # @return [Types::PutRecordOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
968
- #
969
- # * {Types::PutRecordOutput#shard_id #ShardId} => String
970
- # * {Types::PutRecordOutput#sequence_number #SequenceNumber} => String
971
- #
972
- # @example Request syntax with placeholder values
973
- # resp = client.put_record({
974
- # stream_name: "StreamName", # required
975
- # data: "data", # required
976
- # partition_key: "PartitionKey", # required
977
- # explicit_hash_key: "HashKey",
978
- # sequence_number_for_ordering: "SequenceNumber",
979
- # })
980
- #
981
- # @example Response structure
982
- # resp.shard_id #=> String
983
- # resp.sequence_number #=> String
984
- # @overload put_record(params = {})
985
- # @param [Hash] params ({})
986
- def put_record(params = {}, options = {})
987
- req = build_request(:put_record, params)
988
- req.send_request(options)
989
- end
938
+ # Merges two adjacent shards in an Amazon Kinesis stream and combines
939
+ # them into a single shard to reduce the stream's capacity to ingest
940
+ # and transport data. Two shards are considered adjacent if the union of
941
+ # the hash key ranges for the two shards form a contiguous set with no
942
+ # gaps. For example, if you have two shards, one with a hash key range
943
+ # of 276...381 and the other with a hash key range of 382...454, then
944
+ # you could merge these two shards into a single shard that would have a
945
+ # hash key range of 276...454. After the merge, the single child shard
946
+ # receives data for all hash key values covered by the two parent
947
+ # shards.
948
+ #
949
+ # `MergeShards` is called when there is a need to reduce the overall
950
+ # capacity of a stream because of excess capacity that is not being
951
+ # used. You must specify the shard to be merged and the adjacent shard
952
+ # for a stream. For more information about merging shards, see [Merge
953
+ # Two Shards][1] in the *Amazon Kinesis Streams Developer Guide*.
954
+ #
955
+ # If the stream is in the `ACTIVE` state, you can call `MergeShards`. If
956
+ # a stream is in the `CREATING`, `UPDATING`, or `DELETING` state,
957
+ # `MergeShards` returns a `ResourceInUseException`. If the specified
958
+ # stream does not exist, `MergeShards` returns a
959
+ # `ResourceNotFoundException`.
960
+ #
961
+ # You can use DescribeStream to check the state of the stream, which is
962
+ # returned in `StreamStatus`.
963
+ #
964
+ # `MergeShards` is an asynchronous operation. Upon receiving a
965
+ # `MergeShards` request, Amazon Kinesis immediately returns a response
966
+ # and sets the `StreamStatus` to `UPDATING`. After the operation is
967
+ # completed, Amazon Kinesis sets the `StreamStatus` to `ACTIVE`. Read
968
+ # and write operations continue to work while the stream is in the
969
+ # `UPDATING` state.
970
+ #
971
+ # You use DescribeStream to determine the shard IDs that are specified
972
+ # in the `MergeShards` request.
973
+ #
974
+ # If you try to operate on too many streams in parallel using
975
+ # CreateStream, DeleteStream, `MergeShards` or SplitShard, you will
976
+ # receive a `LimitExceededException`.
977
+ #
978
+ # `MergeShards` has limit of 5 transactions per second per account.
979
+ #
980
+ #
981
+ #
982
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html
983
+ #
984
+ # @option params [required, String] :stream_name
985
+ # The name of the stream for the merge.
986
+ #
987
+ # @option params [required, String] :shard_to_merge
988
+ # The shard ID of the shard to combine with the adjacent shard for the
989
+ # merge.
990
+ #
991
+ # @option params [required, String] :adjacent_shard_to_merge
992
+ # The shard ID of the adjacent shard for the merge.
993
+ #
994
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
995
+ #
996
+ # @example Request syntax with placeholder values
997
+ #
998
+ # resp = client.merge_shards({
999
+ # stream_name: "StreamName", # required
1000
+ # shard_to_merge: "ShardId", # required
1001
+ # adjacent_shard_to_merge: "ShardId", # required
1002
+ # })
1003
+ #
1004
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/MergeShards AWS API Documentation
1005
+ #
1006
+ # @overload merge_shards(params = {})
1007
+ # @param [Hash] params ({})
1008
+ def merge_shards(params = {}, options = {})
1009
+ req = build_request(:merge_shards, params)
1010
+ req.send_request(options)
1011
+ end
990
1012
 
991
- # Writes multiple data records into an Amazon Kinesis stream in a single
992
- # call (also referred to as a `PutRecords` request). Use this operation
993
- # to send data into the stream for data ingestion and processing.
994
- #
995
- # Each `PutRecords` request can support up to 500 records. Each record
996
- # in the request can be as large as 1 MB, up to a limit of 5 MB for the
997
- # entire request, including partition keys. Each shard can support
998
- # writes up to 1,000 records per second, up to a maximum data write
999
- # total of 1 MB per second.
1000
- #
1001
- # You must specify the name of the stream that captures, stores, and
1002
- # transports the data; and an array of request `Records`, with each
1003
- # record in the array requiring a partition key and data blob. The
1004
- # record size limit applies to the total size of the partition key and
1005
- # data blob.
1006
- #
1007
- # The data blob can be any type of data; for example, a segment from a
1008
- # log file, geographic/location data, website clickstream data, and so
1009
- # on.
1010
- #
1011
- # The partition key is used by Amazon Kinesis as input to a hash
1012
- # function that maps the partition key and associated data to a specific
1013
- # shard. An MD5 hash function is used to map partition keys to 128-bit
1014
- # integer values and to map associated data records to shards. As a
1015
- # result of this hashing mechanism, all data records with the same
1016
- # partition key map to the same shard within the stream. For more
1017
- # information, see [Adding Data to a Stream][1] in the *Amazon Kinesis
1018
- # Streams Developer Guide*.
1019
- #
1020
- # Each record in the `Records` array may include an optional parameter,
1021
- # `ExplicitHashKey`, which overrides the partition key to shard mapping.
1022
- # This parameter allows a data producer to determine explicitly the
1023
- # shard where the record is stored. For more information, see [Adding
1024
- # Multiple Records with PutRecords][2] in the *Amazon Kinesis Streams
1025
- # Developer Guide*.
1026
- #
1027
- # The `PutRecords` response includes an array of response `Records`.
1028
- # Each record in the response array directly correlates with a record in
1029
- # the request array using natural ordering, from the top to the bottom
1030
- # of the request and response. The response `Records` array always
1031
- # includes the same number of records as the request array.
1032
- #
1033
- # The response `Records` array includes both successfully and
1034
- # unsuccessfully processed records. Amazon Kinesis attempts to process
1035
- # all records in each `PutRecords` request. A single record failure does
1036
- # not stop the processing of subsequent records.
1037
- #
1038
- # A successfully-processed record includes `ShardId` and
1039
- # `SequenceNumber` values. The `ShardId` parameter identifies the shard
1040
- # in the stream where the record is stored. The `SequenceNumber`
1041
- # parameter is an identifier assigned to the put record, unique to all
1042
- # records in the stream.
1043
- #
1044
- # An unsuccessfully-processed record includes `ErrorCode` and
1045
- # `ErrorMessage` values. `ErrorCode` reflects the type of error and can
1046
- # be one of the following values:
1047
- # `ProvisionedThroughputExceededException` or `InternalFailure`.
1048
- # `ErrorMessage` provides more detailed information about the
1049
- # `ProvisionedThroughputExceededException` exception including the
1050
- # account ID, stream name, and shard ID of the record that was
1051
- # throttled. For more information about partially successful responses,
1052
- # see [Adding Multiple Records with PutRecords][3] in the *Amazon
1053
- # Kinesis Streams Developer Guide*.
1054
- #
1055
- # By default, data records are accessible for only 24 hours from the
1056
- # time that they are added to an Amazon Kinesis stream. This retention
1057
- # period can be modified using the DecreaseStreamRetentionPeriod and
1058
- # IncreaseStreamRetentionPeriod operations.
1059
- #
1060
- #
1061
- #
1062
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
1063
- # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords
1064
- # [3]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords
1065
- # @option params [required, Array<Types::PutRecordsRequestEntry>] :records
1066
- # The records associated with the request.
1067
- # @option params [required, String] :stream_name
1068
- # The stream name associated with the request.
1069
- # @return [Types::PutRecordsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1070
- #
1071
- # * {Types::PutRecordsOutput#failed_record_count #FailedRecordCount} => Integer
1072
- # * {Types::PutRecordsOutput#records #Records} => Array&lt;Types::PutRecordsResultEntry&gt;
1073
- #
1074
- # @example Request syntax with placeholder values
1075
- # resp = client.put_records({
1076
- # records: [ # required
1077
- # {
1078
- # data: "data", # required
1079
- # explicit_hash_key: "HashKey",
1080
- # partition_key: "PartitionKey", # required
1081
- # },
1082
- # ],
1083
- # stream_name: "StreamName", # required
1084
- # })
1085
- #
1086
- # @example Response structure
1087
- # resp.failed_record_count #=> Integer
1088
- # resp.records #=> Array
1089
- # resp.records[0].sequence_number #=> String
1090
- # resp.records[0].shard_id #=> String
1091
- # resp.records[0].error_code #=> String
1092
- # resp.records[0].error_message #=> String
1093
- # @overload put_records(params = {})
1094
- # @param [Hash] params ({})
1095
- def put_records(params = {}, options = {})
1096
- req = build_request(:put_records, params)
1097
- req.send_request(options)
1098
- end
1013
+ # Writes a single data record into an Amazon Kinesis stream. Call
1014
+ # `PutRecord` to send data into the stream for real-time ingestion and
1015
+ # subsequent processing, one record at a time. Each shard can support
1016
+ # writes up to 1,000 records per second, up to a maximum data write
1017
+ # total of 1 MB per second.
1018
+ #
1019
+ # You must specify the name of the stream that captures, stores, and
1020
+ # transports the data; a partition key; and the data blob itself.
1021
+ #
1022
+ # The data blob can be any type of data; for example, a segment from a
1023
+ # log file, geographic/location data, website clickstream data, and so
1024
+ # on.
1025
+ #
1026
+ # The partition key is used by Amazon Kinesis to distribute data across
1027
+ # shards. Amazon Kinesis segregates the data records that belong to a
1028
+ # stream into multiple shards, using the partition key associated with
1029
+ # each data record to determine which shard a given data record belongs
1030
+ # to.
1031
+ #
1032
+ # Partition keys are Unicode strings, with a maximum length limit of 256
1033
+ # characters for each key. An MD5 hash function is used to map partition
1034
+ # keys to 128-bit integer values and to map associated data records to
1035
+ # shards using the hash key ranges of the shards. You can override
1036
+ # hashing the partition key to determine the shard by explicitly
1037
+ # specifying a hash value using the `ExplicitHashKey` parameter. For
1038
+ # more information, see [Adding Data to a Stream][1] in the *Amazon
1039
+ # Kinesis Streams Developer Guide*.
1040
+ #
1041
+ # `PutRecord` returns the shard ID of where the data record was placed
1042
+ # and the sequence number that was assigned to the data record.
1043
+ #
1044
+ # Sequence numbers increase over time and are specific to a shard within
1045
+ # a stream, not across all shards within a stream. To guarantee strictly
1046
+ # increasing ordering, write serially to a shard and use the
1047
+ # `SequenceNumberForOrdering` parameter. For more information, see
1048
+ # [Adding Data to a Stream][1] in the *Amazon Kinesis Streams Developer
1049
+ # Guide*.
1050
+ #
1051
+ # If a `PutRecord` request cannot be processed because of insufficient
1052
+ # provisioned throughput on the shard involved in the request,
1053
+ # `PutRecord` throws `ProvisionedThroughputExceededException`.
1054
+ #
1055
+ # Data records are accessible for only 24 hours from the time that they
1056
+ # are added to a stream.
1057
+ #
1058
+ #
1059
+ #
1060
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
1061
+ #
1062
+ # @option params [required, String] :stream_name
1063
+ # The name of the stream to put the data record into.
1064
+ #
1065
+ # @option params [required, String, IO] :data
1066
+ # The data blob to put into the record, which is base64-encoded when the
1067
+ # blob is serialized. When the data blob (the payload before
1068
+ # base64-encoding) is added to the partition key size, the total size
1069
+ # must not exceed the maximum record size (1 MB).
1070
+ #
1071
+ # @option params [required, String] :partition_key
1072
+ # Determines which shard in the stream the data record is assigned to.
1073
+ # Partition keys are Unicode strings with a maximum length limit of 256
1074
+ # characters for each key. Amazon Kinesis uses the partition key as
1075
+ # input to a hash function that maps the partition key and associated
1076
+ # data to a specific shard. Specifically, an MD5 hash function is used
1077
+ # to map partition keys to 128-bit integer values and to map associated
1078
+ # data records to shards. As a result of this hashing mechanism, all
1079
+ # data records with the same partition key map to the same shard within
1080
+ # the stream.
1081
+ #
1082
+ # @option params [String] :explicit_hash_key
1083
+ # The hash value used to explicitly determine the shard the data record
1084
+ # is assigned to by overriding the partition key hash.
1085
+ #
1086
+ # @option params [String] :sequence_number_for_ordering
1087
+ # Guarantees strictly increasing sequence numbers, for puts from the
1088
+ # same client and to the same partition key. Usage: set the
1089
+ # `SequenceNumberForOrdering` of record *n* to the sequence number of
1090
+ # record *n-1* (as returned in the result when putting record *n-1*). If
1091
+ # this parameter is not set, records will be coarsely ordered based on
1092
+ # arrival time.
1093
+ #
1094
+ # @return [Types::PutRecordOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1095
+ #
1096
+ # * {Types::PutRecordOutput#shard_id #shard_id} => String
1097
+ # * {Types::PutRecordOutput#sequence_number #sequence_number} => String
1098
+ #
1099
+ # @example Request syntax with placeholder values
1100
+ #
1101
+ # resp = client.put_record({
1102
+ # stream_name: "StreamName", # required
1103
+ # data: "data", # required
1104
+ # partition_key: "PartitionKey", # required
1105
+ # explicit_hash_key: "HashKey",
1106
+ # sequence_number_for_ordering: "SequenceNumber",
1107
+ # })
1108
+ #
1109
+ # @example Response structure
1110
+ #
1111
+ # resp.shard_id #=> String
1112
+ # resp.sequence_number #=> String
1113
+ #
1114
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecord AWS API Documentation
1115
+ #
1116
+ # @overload put_record(params = {})
1117
+ # @param [Hash] params ({})
1118
+ def put_record(params = {}, options = {})
1119
+ req = build_request(:put_record, params)
1120
+ req.send_request(options)
1121
+ end
1099
1122
 
1100
- # Removes tags from the specified Amazon Kinesis stream. Removed tags
1101
- # are deleted and cannot be recovered after this operation successfully
1102
- # completes.
1103
- #
1104
- # If you specify a tag that does not exist, it is ignored.
1105
- # @option params [required, String] :stream_name
1106
- # The name of the stream.
1107
- # @option params [required, Array<String>] :tag_keys
1108
- # A list of tag keys. Each corresponding tag is removed from the stream.
1109
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1110
- #
1111
- # @example Request syntax with placeholder values
1112
- # resp = client.remove_tags_from_stream({
1113
- # stream_name: "StreamName", # required
1114
- # tag_keys: ["TagKey"], # required
1115
- # })
1116
- # @overload remove_tags_from_stream(params = {})
1117
- # @param [Hash] params ({})
1118
- def remove_tags_from_stream(params = {}, options = {})
1119
- req = build_request(:remove_tags_from_stream, params)
1120
- req.send_request(options)
1121
- end
1123
+ # Writes multiple data records into an Amazon Kinesis stream in a single
1124
+ # call (also referred to as a `PutRecords` request). Use this operation
1125
+ # to send data into the stream for data ingestion and processing.
1126
+ #
1127
+ # Each `PutRecords` request can support up to 500 records. Each record
1128
+ # in the request can be as large as 1 MB, up to a limit of 5 MB for the
1129
+ # entire request, including partition keys. Each shard can support
1130
+ # writes up to 1,000 records per second, up to a maximum data write
1131
+ # total of 1 MB per second.
1132
+ #
1133
+ # You must specify the name of the stream that captures, stores, and
1134
+ # transports the data; and an array of request `Records`, with each
1135
+ # record in the array requiring a partition key and data blob. The
1136
+ # record size limit applies to the total size of the partition key and
1137
+ # data blob.
1138
+ #
1139
+ # The data blob can be any type of data; for example, a segment from a
1140
+ # log file, geographic/location data, website clickstream data, and so
1141
+ # on.
1142
+ #
1143
+ # The partition key is used by Amazon Kinesis as input to a hash
1144
+ # function that maps the partition key and associated data to a specific
1145
+ # shard. An MD5 hash function is used to map partition keys to 128-bit
1146
+ # integer values and to map associated data records to shards. As a
1147
+ # result of this hashing mechanism, all data records with the same
1148
+ # partition key map to the same shard within the stream. For more
1149
+ # information, see [Adding Data to a Stream][1] in the *Amazon Kinesis
1150
+ # Streams Developer Guide*.
1151
+ #
1152
+ # Each record in the `Records` array may include an optional parameter,
1153
+ # `ExplicitHashKey`, which overrides the partition key to shard mapping.
1154
+ # This parameter allows a data producer to determine explicitly the
1155
+ # shard where the record is stored. For more information, see [Adding
1156
+ # Multiple Records with PutRecords][2] in the *Amazon Kinesis Streams
1157
+ # Developer Guide*.
1158
+ #
1159
+ # The `PutRecords` response includes an array of response `Records`.
1160
+ # Each record in the response array directly correlates with a record in
1161
+ # the request array using natural ordering, from the top to the bottom
1162
+ # of the request and response. The response `Records` array always
1163
+ # includes the same number of records as the request array.
1164
+ #
1165
+ # The response `Records` array includes both successfully and
1166
+ # unsuccessfully processed records. Amazon Kinesis attempts to process
1167
+ # all records in each `PutRecords` request. A single record failure does
1168
+ # not stop the processing of subsequent records.
1169
+ #
1170
+ # A successfully-processed record includes `ShardId` and
1171
+ # `SequenceNumber` values. The `ShardId` parameter identifies the shard
1172
+ # in the stream where the record is stored. The `SequenceNumber`
1173
+ # parameter is an identifier assigned to the put record, unique to all
1174
+ # records in the stream.
1175
+ #
1176
+ # An unsuccessfully-processed record includes `ErrorCode` and
1177
+ # `ErrorMessage` values. `ErrorCode` reflects the type of error and can
1178
+ # be one of the following values:
1179
+ # `ProvisionedThroughputExceededException` or `InternalFailure`.
1180
+ # `ErrorMessage` provides more detailed information about the
1181
+ # `ProvisionedThroughputExceededException` exception including the
1182
+ # account ID, stream name, and shard ID of the record that was
1183
+ # throttled. For more information about partially successful responses,
1184
+ # see [Adding Multiple Records with PutRecords][3] in the *Amazon
1185
+ # Kinesis Streams Developer Guide*.
1186
+ #
1187
+ # By default, data records are accessible for only 24 hours from the
1188
+ # time that they are added to an Amazon Kinesis stream. This retention
1189
+ # period can be modified using the DecreaseStreamRetentionPeriod and
1190
+ # IncreaseStreamRetentionPeriod operations.
1191
+ #
1192
+ #
1193
+ #
1194
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
1195
+ # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords
1196
+ # [3]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords
1197
+ #
1198
+ # @option params [required, Array<Types::PutRecordsRequestEntry>] :records
1199
+ # The records associated with the request.
1200
+ #
1201
+ # @option params [required, String] :stream_name
1202
+ # The stream name associated with the request.
1203
+ #
1204
+ # @return [Types::PutRecordsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1205
+ #
1206
+ # * {Types::PutRecordsOutput#failed_record_count #failed_record_count} => Integer
1207
+ # * {Types::PutRecordsOutput#records #records} => Array&lt;Types::PutRecordsResultEntry&gt;
1208
+ #
1209
+ # @example Request syntax with placeholder values
1210
+ #
1211
+ # resp = client.put_records({
1212
+ # records: [ # required
1213
+ # {
1214
+ # data: "data", # required
1215
+ # explicit_hash_key: "HashKey",
1216
+ # partition_key: "PartitionKey", # required
1217
+ # },
1218
+ # ],
1219
+ # stream_name: "StreamName", # required
1220
+ # })
1221
+ #
1222
+ # @example Response structure
1223
+ #
1224
+ # resp.failed_record_count #=> Integer
1225
+ # resp.records #=> Array
1226
+ # resp.records[0].sequence_number #=> String
1227
+ # resp.records[0].shard_id #=> String
1228
+ # resp.records[0].error_code #=> String
1229
+ # resp.records[0].error_message #=> String
1230
+ #
1231
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecords AWS API Documentation
1232
+ #
1233
+ # @overload put_records(params = {})
1234
+ # @param [Hash] params ({})
1235
+ def put_records(params = {}, options = {})
1236
+ req = build_request(:put_records, params)
1237
+ req.send_request(options)
1238
+ end
1122
1239
 
1123
- # Splits a shard into two new shards in the Amazon Kinesis stream to
1124
- # increase the stream's capacity to ingest and transport data.
1125
- # `SplitShard` is called when there is a need to increase the overall
1126
- # capacity of a stream because of an expected increase in the volume of
1127
- # data records being ingested.
1128
- #
1129
- # You can also use `SplitShard` when a shard appears to be approaching
1130
- # its maximum utilization; for example, the producers sending data into
1131
- # the specific shard are suddenly sending more than previously
1132
- # anticipated. You can also call `SplitShard` to increase stream
1133
- # capacity, so that more Amazon Kinesis applications can simultaneously
1134
- # read data from the stream for real-time processing.
1135
- #
1136
- # You must specify the shard to be split and the new hash key, which is
1137
- # the position in the shard where the shard gets split in two. In many
1138
- # cases, the new hash key might simply be the average of the beginning
1139
- # and ending hash key, but it can be any hash key value in the range
1140
- # being mapped into the shard. For more information about splitting
1141
- # shards, see [Split a Shard][1] in the *Amazon Kinesis Streams
1142
- # Developer Guide*.
1143
- #
1144
- # You can use DescribeStream to determine the shard ID and hash key
1145
- # values for the `ShardToSplit` and `NewStartingHashKey` parameters that
1146
- # are specified in the `SplitShard` request.
1147
- #
1148
- # `SplitShard` is an asynchronous operation. Upon receiving a
1149
- # `SplitShard` request, Amazon Kinesis immediately returns a response
1150
- # and sets the stream status to `UPDATING`. After the operation is
1151
- # completed, Amazon Kinesis sets the stream status to `ACTIVE`. Read and
1152
- # write operations continue to work while the stream is in the
1153
- # `UPDATING` state.
1154
- #
1155
- # You can use `DescribeStream` to check the status of the stream, which
1156
- # is returned in `StreamStatus`. If the stream is in the `ACTIVE` state,
1157
- # you can call `SplitShard`. If a stream is in `CREATING` or `UPDATING`
1158
- # or `DELETING` states, `DescribeStream` returns a
1159
- # `ResourceInUseException`.
1160
- #
1161
- # If the specified stream does not exist, `DescribeStream` returns a
1162
- # `ResourceNotFoundException`. If you try to create more shards than are
1163
- # authorized for your account, you receive a `LimitExceededException`.
1164
- #
1165
- # For the default shard limit for an AWS account, see [Streams
1166
- # Limits][2] in the *Amazon Kinesis Streams Developer Guide*. If you
1167
- # need to increase this limit, [contact AWS Support][3].
1168
- #
1169
- # If you try to operate on too many streams simultaneously using
1170
- # CreateStream, DeleteStream, MergeShards, and/or SplitShard, you
1171
- # receive a `LimitExceededException`.
1172
- #
1173
- # `SplitShard` has limit of 5 transactions per second per account.
1174
- #
1175
- #
1176
- #
1177
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html
1178
- # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
1179
- # [3]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
1180
- # @option params [required, String] :stream_name
1181
- # The name of the stream for the shard split.
1182
- # @option params [required, String] :shard_to_split
1183
- # The shard ID of the shard to split.
1184
- # @option params [required, String] :new_starting_hash_key
1185
- # A hash key value for the starting hash key of one of the child shards
1186
- # created by the split. The hash key range for a given shard constitutes
1187
- # a set of ordered contiguous positive integers. The value for
1188
- # `NewStartingHashKey` must be in the range of hash keys being mapped
1189
- # into the shard. The `NewStartingHashKey` hash key value and all higher
1190
- # hash key values in hash key range are distributed to one of the child
1191
- # shards. All the lower hash key values in the range are distributed to
1192
- # the other child shard.
1193
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1194
- #
1195
- # @example Request syntax with placeholder values
1196
- # resp = client.split_shard({
1197
- # stream_name: "StreamName", # required
1198
- # shard_to_split: "ShardId", # required
1199
- # new_starting_hash_key: "HashKey", # required
1200
- # })
1201
- # @overload split_shard(params = {})
1202
- # @param [Hash] params ({})
1203
- def split_shard(params = {}, options = {})
1204
- req = build_request(:split_shard, params)
1205
- req.send_request(options)
1206
- end
1240
+ # Removes tags from the specified Amazon Kinesis stream. Removed tags
1241
+ # are deleted and cannot be recovered after this operation successfully
1242
+ # completes.
1243
+ #
1244
+ # If you specify a tag that does not exist, it is ignored.
1245
+ #
1246
+ # @option params [required, String] :stream_name
1247
+ # The name of the stream.
1248
+ #
1249
+ # @option params [required, Array<String>] :tag_keys
1250
+ # A list of tag keys. Each corresponding tag is removed from the stream.
1251
+ #
1252
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1253
+ #
1254
+ # @example Request syntax with placeholder values
1255
+ #
1256
+ # resp = client.remove_tags_from_stream({
1257
+ # stream_name: "StreamName", # required
1258
+ # tag_keys: ["TagKey"], # required
1259
+ # })
1260
+ #
1261
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/RemoveTagsFromStream AWS API Documentation
1262
+ #
1263
+ # @overload remove_tags_from_stream(params = {})
1264
+ # @param [Hash] params ({})
1265
+ def remove_tags_from_stream(params = {}, options = {})
1266
+ req = build_request(:remove_tags_from_stream, params)
1267
+ req.send_request(options)
1268
+ end
1207
1269
 
1208
- # Updates the shard count of the specified stream to the specified
1209
- # number of shards.
1210
- #
1211
- # Updating the shard count is an asynchronous operation. Upon receiving
1212
- # the request, Amazon Kinesis returns immediately and sets the status of
1213
- # the stream to `UPDATING`. After the update is complete, Amazon Kinesis
1214
- # sets the status of the stream back to `ACTIVE`. Depending on the size
1215
- # of the stream, the scaling action could take a few minutes to
1216
- # complete. You can continue to read and write data to your stream while
1217
- # its status is `UPDATING`.
1218
- #
1219
- # To update the shard count, Amazon Kinesis performs splits and merges
1220
- # and individual shards. This can cause short-lived shards to be
1221
- # created, in addition to the final shards. We recommend that you double
1222
- # or halve the shard count, as this results in the fewest number of
1223
- # splits or merges.
1224
- #
1225
- # This operation has a rate limit of twice per rolling 24 hour period.
1226
- # You cannot scale above double your current shard count, scale below
1227
- # half your current shard count, or exceed the shard limits for your
1228
- # account.
1229
- #
1230
- # For the default limits for an AWS account, see [Streams Limits][1] in
1231
- # the *Amazon Kinesis Streams Developer Guide*. If you need to increase
1232
- # a limit, [contact AWS Support][2].
1233
- #
1234
- #
1235
- #
1236
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
1237
- # [2]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
1238
- # @option params [required, String] :stream_name
1239
- # The name of the stream.
1240
- # @option params [required, Integer] :target_shard_count
1241
- # The new number of shards.
1242
- # @option params [required, String] :scaling_type
1243
- # The scaling type. Uniform scaling creates shards of equal size.
1244
- # @return [Types::UpdateShardCountOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1245
- #
1246
- # * {Types::UpdateShardCountOutput#stream_name #StreamName} => String
1247
- # * {Types::UpdateShardCountOutput#current_shard_count #CurrentShardCount} => Integer
1248
- # * {Types::UpdateShardCountOutput#target_shard_count #TargetShardCount} => Integer
1249
- #
1250
- # @example Request syntax with placeholder values
1251
- # resp = client.update_shard_count({
1252
- # stream_name: "StreamName", # required
1253
- # target_shard_count: 1, # required
1254
- # scaling_type: "UNIFORM_SCALING", # required, accepts UNIFORM_SCALING
1255
- # })
1256
- #
1257
- # @example Response structure
1258
- # resp.stream_name #=> String
1259
- # resp.current_shard_count #=> Integer
1260
- # resp.target_shard_count #=> Integer
1261
- # @overload update_shard_count(params = {})
1262
- # @param [Hash] params ({})
1263
- def update_shard_count(params = {}, options = {})
1264
- req = build_request(:update_shard_count, params)
1265
- req.send_request(options)
1266
- end
1270
+ # Splits a shard into two new shards in the Amazon Kinesis stream to
1271
+ # increase the stream's capacity to ingest and transport data.
1272
+ # `SplitShard` is called when there is a need to increase the overall
1273
+ # capacity of a stream because of an expected increase in the volume of
1274
+ # data records being ingested.
1275
+ #
1276
+ # You can also use `SplitShard` when a shard appears to be approaching
1277
+ # its maximum utilization; for example, the producers sending data into
1278
+ # the specific shard are suddenly sending more than previously
1279
+ # anticipated. You can also call `SplitShard` to increase stream
1280
+ # capacity, so that more Amazon Kinesis applications can simultaneously
1281
+ # read data from the stream for real-time processing.
1282
+ #
1283
+ # You must specify the shard to be split and the new hash key, which is
1284
+ # the position in the shard where the shard gets split in two. In many
1285
+ # cases, the new hash key might simply be the average of the beginning
1286
+ # and ending hash key, but it can be any hash key value in the range
1287
+ # being mapped into the shard. For more information about splitting
1288
+ # shards, see [Split a Shard][1] in the *Amazon Kinesis Streams
1289
+ # Developer Guide*.
1290
+ #
1291
+ # You can use DescribeStream to determine the shard ID and hash key
1292
+ # values for the `ShardToSplit` and `NewStartingHashKey` parameters that
1293
+ # are specified in the `SplitShard` request.
1294
+ #
1295
+ # `SplitShard` is an asynchronous operation. Upon receiving a
1296
+ # `SplitShard` request, Amazon Kinesis immediately returns a response
1297
+ # and sets the stream status to `UPDATING`. After the operation is
1298
+ # completed, Amazon Kinesis sets the stream status to `ACTIVE`. Read and
1299
+ # write operations continue to work while the stream is in the
1300
+ # `UPDATING` state.
1301
+ #
1302
+ # You can use `DescribeStream` to check the status of the stream, which
1303
+ # is returned in `StreamStatus`. If the stream is in the `ACTIVE` state,
1304
+ # you can call `SplitShard`. If a stream is in `CREATING` or `UPDATING`
1305
+ # or `DELETING` states, `DescribeStream` returns a
1306
+ # `ResourceInUseException`.
1307
+ #
1308
+ # If the specified stream does not exist, `DescribeStream` returns a
1309
+ # `ResourceNotFoundException`. If you try to create more shards than are
1310
+ # authorized for your account, you receive a `LimitExceededException`.
1311
+ #
1312
+ # For the default shard limit for an AWS account, see [Streams
1313
+ # Limits][2] in the *Amazon Kinesis Streams Developer Guide*. If you
1314
+ # need to increase this limit, [contact AWS Support][3].
1315
+ #
1316
+ # If you try to operate on too many streams simultaneously using
1317
+ # CreateStream, DeleteStream, MergeShards, and/or SplitShard, you
1318
+ # receive a `LimitExceededException`.
1319
+ #
1320
+ # `SplitShard` has limit of 5 transactions per second per account.
1321
+ #
1322
+ #
1323
+ #
1324
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html
1325
+ # [2]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
1326
+ # [3]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
1327
+ #
1328
+ # @option params [required, String] :stream_name
1329
+ # The name of the stream for the shard split.
1330
+ #
1331
+ # @option params [required, String] :shard_to_split
1332
+ # The shard ID of the shard to split.
1333
+ #
1334
+ # @option params [required, String] :new_starting_hash_key
1335
+ # A hash key value for the starting hash key of one of the child shards
1336
+ # created by the split. The hash key range for a given shard constitutes
1337
+ # a set of ordered contiguous positive integers. The value for
1338
+ # `NewStartingHashKey` must be in the range of hash keys being mapped
1339
+ # into the shard. The `NewStartingHashKey` hash key value and all higher
1340
+ # hash key values in hash key range are distributed to one of the child
1341
+ # shards. All the lower hash key values in the range are distributed to
1342
+ # the other child shard.
1343
+ #
1344
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1345
+ #
1346
+ # @example Request syntax with placeholder values
1347
+ #
1348
+ # resp = client.split_shard({
1349
+ # stream_name: "StreamName", # required
1350
+ # shard_to_split: "ShardId", # required
1351
+ # new_starting_hash_key: "HashKey", # required
1352
+ # })
1353
+ #
1354
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/SplitShard AWS API Documentation
1355
+ #
1356
+ # @overload split_shard(params = {})
1357
+ # @param [Hash] params ({})
1358
+ def split_shard(params = {}, options = {})
1359
+ req = build_request(:split_shard, params)
1360
+ req.send_request(options)
1361
+ end
1267
1362
 
1268
- # @!endgroup
1363
+ # Updates the shard count of the specified stream to the specified
1364
+ # number of shards.
1365
+ #
1366
+ # Updating the shard count is an asynchronous operation. Upon receiving
1367
+ # the request, Amazon Kinesis returns immediately and sets the status of
1368
+ # the stream to `UPDATING`. After the update is complete, Amazon Kinesis
1369
+ # sets the status of the stream back to `ACTIVE`. Depending on the size
1370
+ # of the stream, the scaling action could take a few minutes to
1371
+ # complete. You can continue to read and write data to your stream while
1372
+ # its status is `UPDATING`.
1373
+ #
1374
+ # To update the shard count, Amazon Kinesis performs splits and merges
1375
+ # and individual shards. This can cause short-lived shards to be
1376
+ # created, in addition to the final shards. We recommend that you double
1377
+ # or halve the shard count, as this results in the fewest number of
1378
+ # splits or merges.
1379
+ #
1380
+ # This operation has a rate limit of twice per rolling 24 hour period.
1381
+ # You cannot scale above double your current shard count, scale below
1382
+ # half your current shard count, or exceed the shard limits for your
1383
+ # account.
1384
+ #
1385
+ # For the default limits for an AWS account, see [Streams Limits][1] in
1386
+ # the *Amazon Kinesis Streams Developer Guide*. If you need to increase
1387
+ # a limit, [contact AWS Support][2].
1388
+ #
1389
+ #
1390
+ #
1391
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
1392
+ # [2]: http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
1393
+ #
1394
+ # @option params [required, String] :stream_name
1395
+ # The name of the stream.
1396
+ #
1397
+ # @option params [required, Integer] :target_shard_count
1398
+ # The new number of shards.
1399
+ #
1400
+ # @option params [required, String] :scaling_type
1401
+ # The scaling type. Uniform scaling creates shards of equal size.
1402
+ #
1403
+ # @return [Types::UpdateShardCountOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1404
+ #
1405
+ # * {Types::UpdateShardCountOutput#stream_name #stream_name} => String
1406
+ # * {Types::UpdateShardCountOutput#current_shard_count #current_shard_count} => Integer
1407
+ # * {Types::UpdateShardCountOutput#target_shard_count #target_shard_count} => Integer
1408
+ #
1409
+ # @example Request syntax with placeholder values
1410
+ #
1411
+ # resp = client.update_shard_count({
1412
+ # stream_name: "StreamName", # required
1413
+ # target_shard_count: 1, # required
1414
+ # scaling_type: "UNIFORM_SCALING", # required, accepts UNIFORM_SCALING
1415
+ # })
1416
+ #
1417
+ # @example Response structure
1418
+ #
1419
+ # resp.stream_name #=> String
1420
+ # resp.current_shard_count #=> Integer
1421
+ # resp.target_shard_count #=> Integer
1422
+ #
1423
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/UpdateShardCount AWS API Documentation
1424
+ #
1425
+ # @overload update_shard_count(params = {})
1426
+ # @param [Hash] params ({})
1427
+ def update_shard_count(params = {}, options = {})
1428
+ req = build_request(:update_shard_count, params)
1429
+ req.send_request(options)
1430
+ end
1269
1431
 
1270
- # @param params ({})
1271
- # @api private
1272
- def build_request(operation_name, params = {})
1273
- handlers = @handlers.for(operation_name)
1274
- context = Seahorse::Client::RequestContext.new(
1275
- operation_name: operation_name,
1276
- operation: config.api.operation(operation_name),
1277
- client: self,
1278
- params: params,
1279
- config: config)
1280
- context[:gem_name] = 'aws-sdk-kinesis'
1281
- context[:gem_version] = '1.0.0.rc1'
1282
- Seahorse::Client::Request.new(handlers, context)
1283
- end
1432
+ # @!endgroup
1284
1433
 
1285
- # Polls an API operation until a resource enters a desired state.
1286
- #
1287
- # ## Basic Usage
1288
- #
1289
- # A waiter will call an API operation until:
1290
- #
1291
- # * It is successful
1292
- # * It enters a terminal state
1293
- # * It makes the maximum number of attempts
1294
- #
1295
- # In between attempts, the waiter will sleep.
1296
- #
1297
- # # polls in a loop, sleeping between attempts
1298
- # client.waiter_until(waiter_name, params)
1299
- #
1300
- # ## Configuration
1301
- #
1302
- # You can configure the maximum number of polling attempts, and the
1303
- # delay (in seconds) between each polling attempt. You can pass
1304
- # configuration as the final arguments hash.
1305
- #
1306
- # # poll for ~25 seconds
1307
- # client.wait_until(waiter_name, params, {
1308
- # max_attempts: 5,
1309
- # delay: 5,
1310
- # })
1311
- #
1312
- # ## Callbacks
1313
- #
1314
- # You can be notified before each polling attempt and before each
1315
- # delay. If you throw `:success` or `:failure` from these callbacks,
1316
- # it will terminate the waiter.
1317
- #
1318
- # started_at = Time.now
1319
- # client.wait_until(waiter_name, params, {
1320
- #
1321
- # # disable max attempts
1322
- # max_attempts: nil,
1323
- #
1324
- # # poll for 1 hour, instead of a number of attempts
1325
- # before_wait: -> (attempts, response) do
1326
- # throw :failure if Time.now - started_at > 3600
1327
- # end
1328
- # })
1329
- #
1330
- # ## Handling Errors
1331
- #
1332
- # When a waiter is unsuccessful, it will raise an error.
1333
- # All of the failure errors extend from
1334
- # {Aws::Waiters::Errors::WaiterFailed}.
1335
- #
1336
- # begin
1337
- # client.wait_until(...)
1338
- # rescue Aws::Waiters::Errors::WaiterFailed
1339
- # # resource did not enter the desired state in time
1340
- # end
1341
- #
1342
- # ## Valid Waiters
1343
- #
1344
- # The following table lists the valid waiter names, the operations they call,
1345
- # and the default `:delay` and `:max_attempts` values.
1346
- #
1347
- # | waiter_name | params | :delay | :max_attempts |
1348
- # | ------------- | ------------------ | -------- | ------------- |
1349
- # | stream_exists | {#describe_stream} | 10 | 18 |
1350
- #
1351
- # @raise [Errors::FailureStateError] Raised when the waiter terminates
1352
- # because the waiter has entered a state that it will not transition
1353
- # out of, preventing success.
1354
- #
1355
- # @raise [Errors::TooManyAttemptsError] Raised when the configured
1356
- # maximum number of attempts have been made, and the waiter is not
1357
- # yet successful.
1358
- #
1359
- # @raise [Errors::UnexpectedError] Raised when an error is encounted
1360
- # while polling for a resource that is not expected.
1361
- #
1362
- # @raise [Errors::NoSuchWaiterError] Raised when you request to wait
1363
- # for an unknown state.
1364
- #
1365
- # @return [Boolean] Returns `true` if the waiter was successful.
1366
- # @param [Symbol] waiter_name
1367
- # @param [Hash] params ({})
1368
- # @param [Hash] options ({})
1369
- # @option options [Integer] :max_attempts
1370
- # @option options [Integer] :delay
1371
- # @option options [Proc] :before_attempt
1372
- # @option options [Proc] :before_wait
1373
- def wait_until(waiter_name, params = {}, options = {})
1374
- w = waiter(waiter_name, options)
1375
- yield(w.waiter) if block_given? # deprecated
1376
- w.wait(params)
1377
- end
1434
+ # @param params ({})
1435
+ # @api private
1436
+ def build_request(operation_name, params = {})
1437
+ handlers = @handlers.for(operation_name)
1438
+ context = Seahorse::Client::RequestContext.new(
1439
+ operation_name: operation_name,
1440
+ operation: config.api.operation(operation_name),
1441
+ client: self,
1442
+ params: params,
1443
+ config: config)
1444
+ context[:gem_name] = 'aws-sdk-kinesis'
1445
+ context[:gem_version] = '1.0.0.rc2'
1446
+ Seahorse::Client::Request.new(handlers, context)
1447
+ end
1378
1448
 
1379
- # @api private
1380
- # @deprecated
1381
- def waiter_names
1382
- waiters.keys
1383
- end
1449
+ # Polls an API operation until a resource enters a desired state.
1450
+ #
1451
+ # ## Basic Usage
1452
+ #
1453
+ # A waiter will call an API operation until:
1454
+ #
1455
+ # * It is successful
1456
+ # * It enters a terminal state
1457
+ # * It makes the maximum number of attempts
1458
+ #
1459
+ # In between attempts, the waiter will sleep.
1460
+ #
1461
+ # # polls in a loop, sleeping between attempts
1462
+ # client.waiter_until(waiter_name, params)
1463
+ #
1464
+ # ## Configuration
1465
+ #
1466
+ # You can configure the maximum number of polling attempts, and the
1467
+ # delay (in seconds) between each polling attempt. You can pass
1468
+ # configuration as the final arguments hash.
1469
+ #
1470
+ # # poll for ~25 seconds
1471
+ # client.wait_until(waiter_name, params, {
1472
+ # max_attempts: 5,
1473
+ # delay: 5,
1474
+ # })
1475
+ #
1476
+ # ## Callbacks
1477
+ #
1478
+ # You can be notified before each polling attempt and before each
1479
+ # delay. If you throw `:success` or `:failure` from these callbacks,
1480
+ # it will terminate the waiter.
1481
+ #
1482
+ # started_at = Time.now
1483
+ # client.wait_until(waiter_name, params, {
1484
+ #
1485
+ # # disable max attempts
1486
+ # max_attempts: nil,
1487
+ #
1488
+ # # poll for 1 hour, instead of a number of attempts
1489
+ # before_wait: -> (attempts, response) do
1490
+ # throw :failure if Time.now - started_at > 3600
1491
+ # end
1492
+ # })
1493
+ #
1494
+ # ## Handling Errors
1495
+ #
1496
+ # When a waiter is unsuccessful, it will raise an error.
1497
+ # All of the failure errors extend from
1498
+ # {Aws::Waiters::Errors::WaiterFailed}.
1499
+ #
1500
+ # begin
1501
+ # client.wait_until(...)
1502
+ # rescue Aws::Waiters::Errors::WaiterFailed
1503
+ # # resource did not enter the desired state in time
1504
+ # end
1505
+ #
1506
+ # ## Valid Waiters
1507
+ #
1508
+ # The following table lists the valid waiter names, the operations they call,
1509
+ # and the default `:delay` and `:max_attempts` values.
1510
+ #
1511
+ # | waiter_name | params | :delay | :max_attempts |
1512
+ # | ----------------- | ------------------ | -------- | ------------- |
1513
+ # | stream_exists | {#describe_stream} | 10 | 18 |
1514
+ # | stream_not_exists | {#describe_stream} | 10 | 18 |
1515
+ #
1516
+ # @raise [Errors::FailureStateError] Raised when the waiter terminates
1517
+ # because the waiter has entered a state that it will not transition
1518
+ # out of, preventing success.
1519
+ #
1520
+ # @raise [Errors::TooManyAttemptsError] Raised when the configured
1521
+ # maximum number of attempts have been made, and the waiter is not
1522
+ # yet successful.
1523
+ #
1524
+ # @raise [Errors::UnexpectedError] Raised when an error is encounted
1525
+ # while polling for a resource that is not expected.
1526
+ #
1527
+ # @raise [Errors::NoSuchWaiterError] Raised when you request to wait
1528
+ # for an unknown state.
1529
+ #
1530
+ # @return [Boolean] Returns `true` if the waiter was successful.
1531
+ # @param [Symbol] waiter_name
1532
+ # @param [Hash] params ({})
1533
+ # @param [Hash] options ({})
1534
+ # @option options [Integer] :max_attempts
1535
+ # @option options [Integer] :delay
1536
+ # @option options [Proc] :before_attempt
1537
+ # @option options [Proc] :before_wait
1538
+ def wait_until(waiter_name, params = {}, options = {})
1539
+ w = waiter(waiter_name, options)
1540
+ yield(w.waiter) if block_given? # deprecated
1541
+ w.wait(params)
1542
+ end
1384
1543
 
1385
- private
1544
+ # @api private
1545
+ # @deprecated
1546
+ def waiter_names
1547
+ waiters.keys
1548
+ end
1386
1549
 
1387
- # @param [Symbol] waiter_name
1388
- # @param [Hash] options ({})
1389
- def waiter(waiter_name, options = {})
1390
- waiter_class = waiters[waiter_name]
1391
- if waiter_class
1392
- waiter_class.new(options.merge(client: self))
1393
- else
1394
- raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
1395
- end
1396
- end
1550
+ private
1397
1551
 
1398
- def waiters
1399
- {
1400
- stream_exists: Waiters::StreamExists
1401
- }
1552
+ # @param [Symbol] waiter_name
1553
+ # @param [Hash] options ({})
1554
+ def waiter(waiter_name, options = {})
1555
+ waiter_class = waiters[waiter_name]
1556
+ if waiter_class
1557
+ waiter_class.new(options.merge(client: self))
1558
+ else
1559
+ raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
1402
1560
  end
1561
+ end
1403
1562
 
1404
- class << self
1563
+ def waiters
1564
+ {
1565
+ stream_exists: Waiters::StreamExists,
1566
+ stream_not_exists: Waiters::StreamNotExists
1567
+ }
1568
+ end
1405
1569
 
1406
- # @api private
1407
- attr_reader :identifier
1570
+ class << self
1408
1571
 
1409
- # @api private
1410
- def errors_module
1411
- Errors
1412
- end
1572
+ # @api private
1573
+ attr_reader :identifier
1413
1574
 
1575
+ # @api private
1576
+ def errors_module
1577
+ Errors
1414
1578
  end
1579
+
1415
1580
  end
1416
1581
  end
1417
1582
  end