aws-sdk-kinesis 1.0.0.rc1 → 1.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,23 +1,14 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
7
7
 
8
- module Aws
9
- module Kinesis
10
- module Errors
8
+ module Aws::Kinesis
9
+ module Errors
11
10
 
12
- extend Aws::Errors::DynamicErrors
11
+ extend Aws::Errors::DynamicErrors
13
12
 
14
- # Raised when calling #load or #data on a resource class that can not be
15
- # loaded. This can happen when:
16
- #
17
- # * A resource class has identifiers, but no data attributes.
18
- # * Resource data is only available when making an API call that
19
- # enumerates all resources of that type.
20
- class ResourceNotLoadable < RuntimeError; end
21
- end
22
13
  end
23
14
  end
@@ -1,25 +1,23 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
7
7
 
8
- module Aws
9
- module Kinesis
10
- class Resource
8
+ module Aws::Kinesis
9
+ class Resource
11
10
 
12
- # @param options ({})
13
- # @option options [Client] :client
14
- def initialize(options = {})
15
- @client = options[:client] || Client.new(options)
16
- end
17
-
18
- # @return [Client]
19
- def client
20
- @client
21
- end
11
+ # @param options ({})
12
+ # @option options [Client] :client
13
+ def initialize(options = {})
14
+ @client = options[:client] || Client.new(options)
15
+ end
22
16
 
17
+ # @return [Client]
18
+ def client
19
+ @client
23
20
  end
21
+
24
22
  end
25
23
  end
@@ -1,1038 +1,1203 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
7
7
 
8
- module Aws
9
- module Kinesis
10
- module Types
8
+ module Aws::Kinesis
9
+ module Types
11
10
 
12
- # Represents the input for `AddTagsToStream`.
13
- # @note When making an API call, pass AddTagsToStreamInput
14
- # data as a hash:
15
- #
16
- # {
17
- # stream_name: "StreamName", # required
18
- # tags: { # required
19
- # "TagKey" => "TagValue",
20
- # },
21
- # }
22
- # @!attribute [rw] stream_name
23
- # The name of the stream.
24
- # @return [String]
25
- #
26
- # @!attribute [rw] tags
27
- # The set of key-value pairs to use to create the tags.
28
- # @return [Hash<String,String>]
29
- class AddTagsToStreamInput < Struct.new(
30
- :stream_name,
31
- :tags)
32
- include Aws::Structure
33
- end
34
-
35
- # Represents the input for `CreateStream`.
36
- # @note When making an API call, pass CreateStreamInput
37
- # data as a hash:
38
- #
39
- # {
40
- # stream_name: "StreamName", # required
41
- # shard_count: 1, # required
42
- # }
43
- # @!attribute [rw] stream_name
44
- # A name to identify the stream. The stream name is scoped to the AWS
45
- # account used by the application that creates the stream. It is also
46
- # scoped by region. That is, two streams in two different AWS accounts
47
- # can have the same name, and two streams in the same AWS account but
48
- # in two different regions can have the same name.
49
- # @return [String]
50
- #
51
- # @!attribute [rw] shard_count
52
- # The number of shards that the stream will use. The throughput of the
53
- # stream is a function of the number of shards; more shards are
54
- # required for greater provisioned throughput.
55
- #
56
- # DefaultShardLimit;
57
- # @return [Integer]
58
- class CreateStreamInput < Struct.new(
59
- :stream_name,
60
- :shard_count)
61
- include Aws::Structure
62
- end
11
+ # Represents the input for `AddTagsToStream`.
12
+ #
13
+ # @note When making an API call, you may pass AddTagsToStreamInput
14
+ # data as a hash:
15
+ #
16
+ # {
17
+ # stream_name: "StreamName", # required
18
+ # tags: { # required
19
+ # "TagKey" => "TagValue",
20
+ # },
21
+ # }
22
+ #
23
+ # @!attribute [rw] stream_name
24
+ # The name of the stream.
25
+ # @return [String]
26
+ #
27
+ # @!attribute [rw] tags
28
+ # The set of key-value pairs to use to create the tags.
29
+ # @return [Hash<String,String>]
30
+ #
31
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/AddTagsToStreamInput AWS API Documentation
32
+ #
33
+ class AddTagsToStreamInput < Struct.new(
34
+ :stream_name,
35
+ :tags)
36
+ include Aws::Structure
37
+ end
63
38
 
64
- # Represents the input for DecreaseStreamRetentionPeriod.
65
- # @note When making an API call, pass DecreaseStreamRetentionPeriodInput
66
- # data as a hash:
67
- #
68
- # {
69
- # stream_name: "StreamName", # required
70
- # retention_period_hours: 1, # required
71
- # }
72
- # @!attribute [rw] stream_name
73
- # The name of the stream to modify.
74
- # @return [String]
75
- #
76
- # @!attribute [rw] retention_period_hours
77
- # The new retention period of the stream, in hours. Must be less than
78
- # the current retention period.
79
- # @return [Integer]
80
- class DecreaseStreamRetentionPeriodInput < Struct.new(
81
- :stream_name,
82
- :retention_period_hours)
83
- include Aws::Structure
84
- end
39
+ # Represents the input for `CreateStream`.
40
+ #
41
+ # @note When making an API call, you may pass CreateStreamInput
42
+ # data as a hash:
43
+ #
44
+ # {
45
+ # stream_name: "StreamName", # required
46
+ # shard_count: 1, # required
47
+ # }
48
+ #
49
+ # @!attribute [rw] stream_name
50
+ # A name to identify the stream. The stream name is scoped to the AWS
51
+ # account used by the application that creates the stream. It is also
52
+ # scoped by region. That is, two streams in two different AWS accounts
53
+ # can have the same name, and two streams in the same AWS account but
54
+ # in two different regions can have the same name.
55
+ # @return [String]
56
+ #
57
+ # @!attribute [rw] shard_count
58
+ # The number of shards that the stream will use. The throughput of the
59
+ # stream is a function of the number of shards; more shards are
60
+ # required for greater provisioned throughput.
61
+ #
62
+ # DefaultShardLimit;
63
+ # @return [Integer]
64
+ #
65
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/CreateStreamInput AWS API Documentation
66
+ #
67
+ class CreateStreamInput < Struct.new(
68
+ :stream_name,
69
+ :shard_count)
70
+ include Aws::Structure
71
+ end
85
72
 
86
- # Represents the input for DeleteStream.
87
- # @note When making an API call, pass DeleteStreamInput
88
- # data as a hash:
89
- #
90
- # {
91
- # stream_name: "StreamName", # required
92
- # }
93
- # @!attribute [rw] stream_name
94
- # The name of the stream to delete.
95
- # @return [String]
96
- class DeleteStreamInput < Struct.new(
97
- :stream_name)
98
- include Aws::Structure
99
- end
73
+ # Represents the input for DecreaseStreamRetentionPeriod.
74
+ #
75
+ # @note When making an API call, you may pass DecreaseStreamRetentionPeriodInput
76
+ # data as a hash:
77
+ #
78
+ # {
79
+ # stream_name: "StreamName", # required
80
+ # retention_period_hours: 1, # required
81
+ # }
82
+ #
83
+ # @!attribute [rw] stream_name
84
+ # The name of the stream to modify.
85
+ # @return [String]
86
+ #
87
+ # @!attribute [rw] retention_period_hours
88
+ # The new retention period of the stream, in hours. Must be less than
89
+ # the current retention period.
90
+ # @return [Integer]
91
+ #
92
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DecreaseStreamRetentionPeriodInput AWS API Documentation
93
+ #
94
+ class DecreaseStreamRetentionPeriodInput < Struct.new(
95
+ :stream_name,
96
+ :retention_period_hours)
97
+ include Aws::Structure
98
+ end
100
99
 
101
- # @api private
102
- class DescribeLimitsInput < Aws::EmptyStructure; end
100
+ # Represents the input for DeleteStream.
101
+ #
102
+ # @note When making an API call, you may pass DeleteStreamInput
103
+ # data as a hash:
104
+ #
105
+ # {
106
+ # stream_name: "StreamName", # required
107
+ # }
108
+ #
109
+ # @!attribute [rw] stream_name
110
+ # The name of the stream to delete.
111
+ # @return [String]
112
+ #
113
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DeleteStreamInput AWS API Documentation
114
+ #
115
+ class DeleteStreamInput < Struct.new(
116
+ :stream_name)
117
+ include Aws::Structure
118
+ end
103
119
 
104
- # @!attribute [rw] shard_limit
105
- # The maximum number of shards.
106
- # @return [Integer]
107
- #
108
- # @!attribute [rw] open_shard_count
109
- # The number of open shards.
110
- # @return [Integer]
111
- class DescribeLimitsOutput < Struct.new(
112
- :shard_limit,
113
- :open_shard_count)
114
- include Aws::Structure
115
- end
120
+ # @api private
121
+ #
122
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DescribeLimitsInput AWS API Documentation
123
+ #
124
+ class DescribeLimitsInput < Aws::EmptyStructure; end
116
125
 
117
- # Represents the input for `DescribeStream`.
118
- # @note When making an API call, pass DescribeStreamInput
119
- # data as a hash:
120
- #
121
- # {
122
- # stream_name: "StreamName", # required
123
- # limit: 1,
124
- # exclusive_start_shard_id: "ShardId",
125
- # }
126
- # @!attribute [rw] stream_name
127
- # The name of the stream to describe.
128
- # @return [String]
129
- #
130
- # @!attribute [rw] limit
131
- # The maximum number of shards to return in a single call. The default
132
- # value is 100. If you specify a value greater than 100, at most 100
133
- # shards are returned.
134
- # @return [Integer]
135
- #
136
- # @!attribute [rw] exclusive_start_shard_id
137
- # The shard ID of the shard to start with.
138
- # @return [String]
139
- class DescribeStreamInput < Struct.new(
140
- :stream_name,
141
- :limit,
142
- :exclusive_start_shard_id)
143
- include Aws::Structure
144
- end
126
+ # @!attribute [rw] shard_limit
127
+ # The maximum number of shards.
128
+ # @return [Integer]
129
+ #
130
+ # @!attribute [rw] open_shard_count
131
+ # The number of open shards.
132
+ # @return [Integer]
133
+ #
134
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DescribeLimitsOutput AWS API Documentation
135
+ #
136
+ class DescribeLimitsOutput < Struct.new(
137
+ :shard_limit,
138
+ :open_shard_count)
139
+ include Aws::Structure
140
+ end
145
141
 
146
- # Represents the output for `DescribeStream`.
147
- # @!attribute [rw] stream_description
148
- # The current status of the stream, the stream ARN, an array of shard
149
- # objects that comprise the stream, and whether there are more shards
150
- # available.
151
- # @return [Types::StreamDescription]
152
- class DescribeStreamOutput < Struct.new(
153
- :stream_description)
154
- include Aws::Structure
155
- end
142
+ # Represents the input for `DescribeStream`.
143
+ #
144
+ # @note When making an API call, you may pass DescribeStreamInput
145
+ # data as a hash:
146
+ #
147
+ # {
148
+ # stream_name: "StreamName", # required
149
+ # limit: 1,
150
+ # exclusive_start_shard_id: "ShardId",
151
+ # }
152
+ #
153
+ # @!attribute [rw] stream_name
154
+ # The name of the stream to describe.
155
+ # @return [String]
156
+ #
157
+ # @!attribute [rw] limit
158
+ # The maximum number of shards to return in a single call. The default
159
+ # value is 100. If you specify a value greater than 100, at most 100
160
+ # shards are returned.
161
+ # @return [Integer]
162
+ #
163
+ # @!attribute [rw] exclusive_start_shard_id
164
+ # The shard ID of the shard to start with.
165
+ # @return [String]
166
+ #
167
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DescribeStreamInput AWS API Documentation
168
+ #
169
+ class DescribeStreamInput < Struct.new(
170
+ :stream_name,
171
+ :limit,
172
+ :exclusive_start_shard_id)
173
+ include Aws::Structure
174
+ end
156
175
 
157
- # Represents the input for DisableEnhancedMonitoring.
158
- # @note When making an API call, pass DisableEnhancedMonitoringInput
159
- # data as a hash:
160
- #
161
- # {
162
- # stream_name: "StreamName", # required
163
- # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
164
- # }
165
- # @!attribute [rw] stream_name
166
- # The name of the Amazon Kinesis stream for which to disable enhanced
167
- # monitoring.
168
- # @return [String]
169
- #
170
- # @!attribute [rw] shard_level_metrics
171
- # List of shard-level metrics to disable.
172
- #
173
- # The following are the valid shard-level metrics. The value "`ALL`"
174
- # disables every metric.
175
- #
176
- # * `IncomingBytes`
177
- #
178
- # * `IncomingRecords`
179
- #
180
- # * `OutgoingBytes`
181
- #
182
- # * `OutgoingRecords`
183
- #
184
- # * `WriteProvisionedThroughputExceeded`
185
- #
186
- # * `ReadProvisionedThroughputExceeded`
187
- #
188
- # * `IteratorAgeMilliseconds`
189
- #
190
- # * `ALL`
191
- #
192
- # For more information, see [Monitoring the Amazon Kinesis Streams
193
- # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
194
- # Developer Guide*.
195
- #
196
- #
197
- #
198
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
199
- # @return [Array<String>]
200
- class DisableEnhancedMonitoringInput < Struct.new(
201
- :stream_name,
202
- :shard_level_metrics)
203
- include Aws::Structure
204
- end
176
+ # Represents the output for `DescribeStream`.
177
+ #
178
+ # @!attribute [rw] stream_description
179
+ # The current status of the stream, the stream ARN, an array of shard
180
+ # objects that comprise the stream, and whether there are more shards
181
+ # available.
182
+ # @return [Types::StreamDescription]
183
+ #
184
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DescribeStreamOutput AWS API Documentation
185
+ #
186
+ class DescribeStreamOutput < Struct.new(
187
+ :stream_description)
188
+ include Aws::Structure
189
+ end
205
190
 
206
- # Represents the input for EnableEnhancedMonitoring.
207
- # @note When making an API call, pass EnableEnhancedMonitoringInput
208
- # data as a hash:
209
- #
210
- # {
211
- # stream_name: "StreamName", # required
212
- # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
213
- # }
214
- # @!attribute [rw] stream_name
215
- # The name of the stream for which to enable enhanced monitoring.
216
- # @return [String]
217
- #
218
- # @!attribute [rw] shard_level_metrics
219
- # List of shard-level metrics to enable.
220
- #
221
- # The following are the valid shard-level metrics. The value "`ALL`"
222
- # enables every metric.
223
- #
224
- # * `IncomingBytes`
225
- #
226
- # * `IncomingRecords`
227
- #
228
- # * `OutgoingBytes`
229
- #
230
- # * `OutgoingRecords`
231
- #
232
- # * `WriteProvisionedThroughputExceeded`
233
- #
234
- # * `ReadProvisionedThroughputExceeded`
235
- #
236
- # * `IteratorAgeMilliseconds`
237
- #
238
- # * `ALL`
239
- #
240
- # For more information, see [Monitoring the Amazon Kinesis Streams
241
- # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
242
- # Developer Guide*.
243
- #
244
- #
245
- #
246
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
247
- # @return [Array<String>]
248
- class EnableEnhancedMonitoringInput < Struct.new(
249
- :stream_name,
250
- :shard_level_metrics)
251
- include Aws::Structure
252
- end
191
+ # Represents the input for DisableEnhancedMonitoring.
192
+ #
193
+ # @note When making an API call, you may pass DisableEnhancedMonitoringInput
194
+ # data as a hash:
195
+ #
196
+ # {
197
+ # stream_name: "StreamName", # required
198
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
199
+ # }
200
+ #
201
+ # @!attribute [rw] stream_name
202
+ # The name of the Amazon Kinesis stream for which to disable enhanced
203
+ # monitoring.
204
+ # @return [String]
205
+ #
206
+ # @!attribute [rw] shard_level_metrics
207
+ # List of shard-level metrics to disable.
208
+ #
209
+ # The following are the valid shard-level metrics. The value "`ALL`"
210
+ # disables every metric.
211
+ #
212
+ # * `IncomingBytes`
213
+ #
214
+ # * `IncomingRecords`
215
+ #
216
+ # * `OutgoingBytes`
217
+ #
218
+ # * `OutgoingRecords`
219
+ #
220
+ # * `WriteProvisionedThroughputExceeded`
221
+ #
222
+ # * `ReadProvisionedThroughputExceeded`
223
+ #
224
+ # * `IteratorAgeMilliseconds`
225
+ #
226
+ # * `ALL`
227
+ #
228
+ # For more information, see [Monitoring the Amazon Kinesis Streams
229
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
230
+ # Developer Guide*.
231
+ #
232
+ #
233
+ #
234
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
235
+ # @return [Array<String>]
236
+ #
237
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/DisableEnhancedMonitoringInput AWS API Documentation
238
+ #
239
+ class DisableEnhancedMonitoringInput < Struct.new(
240
+ :stream_name,
241
+ :shard_level_metrics)
242
+ include Aws::Structure
243
+ end
253
244
 
254
- # Represents enhanced metrics types.
255
- # @!attribute [rw] shard_level_metrics
256
- # List of shard-level metrics.
257
- #
258
- # The following are the valid shard-level metrics. The value "`ALL`"
259
- # enhances every metric.
260
- #
261
- # * `IncomingBytes`
262
- #
263
- # * `IncomingRecords`
264
- #
265
- # * `OutgoingBytes`
266
- #
267
- # * `OutgoingRecords`
268
- #
269
- # * `WriteProvisionedThroughputExceeded`
270
- #
271
- # * `ReadProvisionedThroughputExceeded`
272
- #
273
- # * `IteratorAgeMilliseconds`
274
- #
275
- # * `ALL`
276
- #
277
- # For more information, see [Monitoring the Amazon Kinesis Streams
278
- # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
279
- # Developer Guide*.
280
- #
281
- #
282
- #
283
- # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
284
- # @return [Array<String>]
285
- class EnhancedMetrics < Struct.new(
286
- :shard_level_metrics)
287
- include Aws::Structure
288
- end
245
+ # Represents the input for EnableEnhancedMonitoring.
246
+ #
247
+ # @note When making an API call, you may pass EnableEnhancedMonitoringInput
248
+ # data as a hash:
249
+ #
250
+ # {
251
+ # stream_name: "StreamName", # required
252
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
253
+ # }
254
+ #
255
+ # @!attribute [rw] stream_name
256
+ # The name of the stream for which to enable enhanced monitoring.
257
+ # @return [String]
258
+ #
259
+ # @!attribute [rw] shard_level_metrics
260
+ # List of shard-level metrics to enable.
261
+ #
262
+ # The following are the valid shard-level metrics. The value "`ALL`"
263
+ # enables every metric.
264
+ #
265
+ # * `IncomingBytes`
266
+ #
267
+ # * `IncomingRecords`
268
+ #
269
+ # * `OutgoingBytes`
270
+ #
271
+ # * `OutgoingRecords`
272
+ #
273
+ # * `WriteProvisionedThroughputExceeded`
274
+ #
275
+ # * `ReadProvisionedThroughputExceeded`
276
+ #
277
+ # * `IteratorAgeMilliseconds`
278
+ #
279
+ # * `ALL`
280
+ #
281
+ # For more information, see [Monitoring the Amazon Kinesis Streams
282
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
283
+ # Developer Guide*.
284
+ #
285
+ #
286
+ #
287
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
288
+ # @return [Array<String>]
289
+ #
290
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/EnableEnhancedMonitoringInput AWS API Documentation
291
+ #
292
+ class EnableEnhancedMonitoringInput < Struct.new(
293
+ :stream_name,
294
+ :shard_level_metrics)
295
+ include Aws::Structure
296
+ end
289
297
 
290
- # Represents the output for EnableEnhancedMonitoring and
291
- # DisableEnhancedMonitoring.
292
- # @!attribute [rw] stream_name
293
- # The name of the Amazon Kinesis stream.
294
- # @return [String]
295
- #
296
- # @!attribute [rw] current_shard_level_metrics
297
- # Represents the current state of the metrics that are in the enhanced
298
- # state before the operation.
299
- # @return [Array<String>]
300
- #
301
- # @!attribute [rw] desired_shard_level_metrics
302
- # Represents the list of all the metrics that would be in the enhanced
303
- # state after the operation.
304
- # @return [Array<String>]
305
- class EnhancedMonitoringOutput < Struct.new(
306
- :stream_name,
307
- :current_shard_level_metrics,
308
- :desired_shard_level_metrics)
309
- include Aws::Structure
310
- end
298
+ # Represents enhanced metrics types.
299
+ #
300
+ # @!attribute [rw] shard_level_metrics
301
+ # List of shard-level metrics.
302
+ #
303
+ # The following are the valid shard-level metrics. The value "`ALL`"
304
+ # enhances every metric.
305
+ #
306
+ # * `IncomingBytes`
307
+ #
308
+ # * `IncomingRecords`
309
+ #
310
+ # * `OutgoingBytes`
311
+ #
312
+ # * `OutgoingRecords`
313
+ #
314
+ # * `WriteProvisionedThroughputExceeded`
315
+ #
316
+ # * `ReadProvisionedThroughputExceeded`
317
+ #
318
+ # * `IteratorAgeMilliseconds`
319
+ #
320
+ # * `ALL`
321
+ #
322
+ # For more information, see [Monitoring the Amazon Kinesis Streams
323
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
324
+ # Developer Guide*.
325
+ #
326
+ #
327
+ #
328
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
329
+ # @return [Array<String>]
330
+ #
331
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/EnhancedMetrics AWS API Documentation
332
+ #
333
+ class EnhancedMetrics < Struct.new(
334
+ :shard_level_metrics)
335
+ include Aws::Structure
336
+ end
311
337
 
312
- # Represents the input for GetRecords.
313
- # @note When making an API call, pass GetRecordsInput
314
- # data as a hash:
315
- #
316
- # {
317
- # shard_iterator: "ShardIterator", # required
318
- # limit: 1,
319
- # }
320
- # @!attribute [rw] shard_iterator
321
- # The position in the shard from which you want to start sequentially
322
- # reading data records. A shard iterator specifies this position using
323
- # the sequence number of a data record in the shard.
324
- # @return [String]
325
- #
326
- # @!attribute [rw] limit
327
- # The maximum number of records to return. Specify a value of up to
328
- # 10,000. If you specify a value that is greater than 10,000,
329
- # GetRecords throws `InvalidArgumentException`.
330
- # @return [Integer]
331
- class GetRecordsInput < Struct.new(
332
- :shard_iterator,
333
- :limit)
334
- include Aws::Structure
335
- end
338
+ # Represents the output for EnableEnhancedMonitoring and
339
+ # DisableEnhancedMonitoring.
340
+ #
341
+ # @!attribute [rw] stream_name
342
+ # The name of the Amazon Kinesis stream.
343
+ # @return [String]
344
+ #
345
+ # @!attribute [rw] current_shard_level_metrics
346
+ # Represents the current state of the metrics that are in the enhanced
347
+ # state before the operation.
348
+ # @return [Array<String>]
349
+ #
350
+ # @!attribute [rw] desired_shard_level_metrics
351
+ # Represents the list of all the metrics that would be in the enhanced
352
+ # state after the operation.
353
+ # @return [Array<String>]
354
+ #
355
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/EnhancedMonitoringOutput AWS API Documentation
356
+ #
357
+ class EnhancedMonitoringOutput < Struct.new(
358
+ :stream_name,
359
+ :current_shard_level_metrics,
360
+ :desired_shard_level_metrics)
361
+ include Aws::Structure
362
+ end
336
363
 
337
- # Represents the output for GetRecords.
338
- # @!attribute [rw] records
339
- # The data records retrieved from the shard.
340
- # @return [Array<Types::Record>]
341
- #
342
- # @!attribute [rw] next_shard_iterator
343
- # The next position in the shard from which to start sequentially
344
- # reading data records. If set to `null`, the shard has been closed
345
- # and the requested iterator will not return any more data.
346
- # @return [String]
347
- #
348
- # @!attribute [rw] millis_behind_latest
349
- # The number of milliseconds the GetRecords response is from the tip
350
- # of the stream, indicating how far behind current time the consumer
351
- # is. A value of zero indicates record processing is caught up, and
352
- # there are no new records to process at this moment.
353
- # @return [Integer]
354
- class GetRecordsOutput < Struct.new(
355
- :records,
356
- :next_shard_iterator,
357
- :millis_behind_latest)
358
- include Aws::Structure
359
- end
364
+ # Represents the input for GetRecords.
365
+ #
366
+ # @note When making an API call, you may pass GetRecordsInput
367
+ # data as a hash:
368
+ #
369
+ # {
370
+ # shard_iterator: "ShardIterator", # required
371
+ # limit: 1,
372
+ # }
373
+ #
374
+ # @!attribute [rw] shard_iterator
375
+ # The position in the shard from which you want to start sequentially
376
+ # reading data records. A shard iterator specifies this position using
377
+ # the sequence number of a data record in the shard.
378
+ # @return [String]
379
+ #
380
+ # @!attribute [rw] limit
381
+ # The maximum number of records to return. Specify a value of up to
382
+ # 10,000. If you specify a value that is greater than 10,000,
383
+ # GetRecords throws `InvalidArgumentException`.
384
+ # @return [Integer]
385
+ #
386
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetRecordsInput AWS API Documentation
387
+ #
388
+ class GetRecordsInput < Struct.new(
389
+ :shard_iterator,
390
+ :limit)
391
+ include Aws::Structure
392
+ end
360
393
 
361
- # Represents the input for `GetShardIterator`.
362
- # @note When making an API call, pass GetShardIteratorInput
363
- # data as a hash:
364
- #
365
- # {
366
- # stream_name: "StreamName", # required
367
- # shard_id: "ShardId", # required
368
- # shard_iterator_type: "AT_SEQUENCE_NUMBER", # required, accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST, AT_TIMESTAMP
369
- # starting_sequence_number: "SequenceNumber",
370
- # timestamp: Time.now,
371
- # }
372
- # @!attribute [rw] stream_name
373
- # The name of the Amazon Kinesis stream.
374
- # @return [String]
375
- #
376
- # @!attribute [rw] shard_id
377
- # The shard ID of the Amazon Kinesis shard to get the iterator for.
378
- # @return [String]
379
- #
380
- # @!attribute [rw] shard_iterator_type
381
- # Determines how the shard iterator is used to start reading data
382
- # records from the shard.
383
- #
384
- # The following are the valid Amazon Kinesis shard iterator types:
385
- #
386
- # * AT\_SEQUENCE\_NUMBER - Start reading from the position denoted by
387
- # a specific sequence number, provided in the value
388
- # `StartingSequenceNumber`.
389
- #
390
- # * AFTER\_SEQUENCE\_NUMBER - Start reading right after the position
391
- # denoted by a specific sequence number, provided in the value
392
- # `StartingSequenceNumber`.
393
- #
394
- # * AT\_TIMESTAMP - Start reading from the position denoted by a
395
- # specific timestamp, provided in the value `Timestamp`.
396
- #
397
- # * TRIM\_HORIZON - Start reading at the last untrimmed record in the
398
- # shard in the system, which is the oldest data record in the shard.
399
- #
400
- # * LATEST - Start reading just after the most recent record in the
401
- # shard, so that you always read the most recent data in the shard.
402
- # @return [String]
403
- #
404
- # @!attribute [rw] starting_sequence_number
405
- # The sequence number of the data record in the shard from which to
406
- # start reading. Used with shard iterator type AT\_SEQUENCE\_NUMBER
407
- # and AFTER\_SEQUENCE\_NUMBER.
408
- # @return [String]
409
- #
410
- # @!attribute [rw] timestamp
411
- # The timestamp of the data record from which to start reading. Used
412
- # with shard iterator type AT\_TIMESTAMP. A timestamp is the Unix
413
- # epoch date with precision in milliseconds. For example,
414
- # `2016-04-04T19:58:46.480-00:00` or `1459799926.480`. If a record
415
- # with this exact timestamp does not exist, the iterator returned is
416
- # for the next (later) record. If the timestamp is older than the
417
- # current trim horizon, the iterator returned is for the oldest
418
- # untrimmed data record (TRIM\_HORIZON).
419
- # @return [Time]
420
- class GetShardIteratorInput < Struct.new(
421
- :stream_name,
422
- :shard_id,
423
- :shard_iterator_type,
424
- :starting_sequence_number,
425
- :timestamp)
426
- include Aws::Structure
427
- end
394
+ # Represents the output for GetRecords.
395
+ #
396
+ # @!attribute [rw] records
397
+ # The data records retrieved from the shard.
398
+ # @return [Array<Types::Record>]
399
+ #
400
+ # @!attribute [rw] next_shard_iterator
401
+ # The next position in the shard from which to start sequentially
402
+ # reading data records. If set to `null`, the shard has been closed
403
+ # and the requested iterator will not return any more data.
404
+ # @return [String]
405
+ #
406
+ # @!attribute [rw] millis_behind_latest
407
+ # The number of milliseconds the GetRecords response is from the tip
408
+ # of the stream, indicating how far behind current time the consumer
409
+ # is. A value of zero indicates record processing is caught up, and
410
+ # there are no new records to process at this moment.
411
+ # @return [Integer]
412
+ #
413
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetRecordsOutput AWS API Documentation
414
+ #
415
+ class GetRecordsOutput < Struct.new(
416
+ :records,
417
+ :next_shard_iterator,
418
+ :millis_behind_latest)
419
+ include Aws::Structure
420
+ end
428
421
 
429
- # Represents the output for `GetShardIterator`.
430
- # @!attribute [rw] shard_iterator
431
- # The position in the shard from which to start reading data records
432
- # sequentially. A shard iterator specifies this position using the
433
- # sequence number of a data record in a shard.
434
- # @return [String]
435
- class GetShardIteratorOutput < Struct.new(
436
- :shard_iterator)
437
- include Aws::Structure
438
- end
422
+ # Represents the input for `GetShardIterator`.
423
+ #
424
+ # @note When making an API call, you may pass GetShardIteratorInput
425
+ # data as a hash:
426
+ #
427
+ # {
428
+ # stream_name: "StreamName", # required
429
+ # shard_id: "ShardId", # required
430
+ # shard_iterator_type: "AT_SEQUENCE_NUMBER", # required, accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST, AT_TIMESTAMP
431
+ # starting_sequence_number: "SequenceNumber",
432
+ # timestamp: Time.now,
433
+ # }
434
+ #
435
+ # @!attribute [rw] stream_name
436
+ # The name of the Amazon Kinesis stream.
437
+ # @return [String]
438
+ #
439
+ # @!attribute [rw] shard_id
440
+ # The shard ID of the Amazon Kinesis shard to get the iterator for.
441
+ # @return [String]
442
+ #
443
+ # @!attribute [rw] shard_iterator_type
444
+ # Determines how the shard iterator is used to start reading data
445
+ # records from the shard.
446
+ #
447
+ # The following are the valid Amazon Kinesis shard iterator types:
448
+ #
449
+ # * AT\_SEQUENCE\_NUMBER - Start reading from the position denoted by
450
+ # a specific sequence number, provided in the value
451
+ # `StartingSequenceNumber`.
452
+ #
453
+ # * AFTER\_SEQUENCE\_NUMBER - Start reading right after the position
454
+ # denoted by a specific sequence number, provided in the value
455
+ # `StartingSequenceNumber`.
456
+ #
457
+ # * AT\_TIMESTAMP - Start reading from the position denoted by a
458
+ # specific timestamp, provided in the value `Timestamp`.
459
+ #
460
+ # * TRIM\_HORIZON - Start reading at the last untrimmed record in the
461
+ # shard in the system, which is the oldest data record in the shard.
462
+ #
463
+ # * LATEST - Start reading just after the most recent record in the
464
+ # shard, so that you always read the most recent data in the shard.
465
+ # @return [String]
466
+ #
467
+ # @!attribute [rw] starting_sequence_number
468
+ # The sequence number of the data record in the shard from which to
469
+ # start reading. Used with shard iterator type AT\_SEQUENCE\_NUMBER
470
+ # and AFTER\_SEQUENCE\_NUMBER.
471
+ # @return [String]
472
+ #
473
+ # @!attribute [rw] timestamp
474
+ # The timestamp of the data record from which to start reading. Used
475
+ # with shard iterator type AT\_TIMESTAMP. A timestamp is the Unix
476
+ # epoch date with precision in milliseconds. For example,
477
+ # `2016-04-04T19:58:46.480-00:00` or `1459799926.480`. If a record
478
+ # with this exact timestamp does not exist, the iterator returned is
479
+ # for the next (later) record. If the timestamp is older than the
480
+ # current trim horizon, the iterator returned is for the oldest
481
+ # untrimmed data record (TRIM\_HORIZON).
482
+ # @return [Time]
483
+ #
484
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetShardIteratorInput AWS API Documentation
485
+ #
486
+ class GetShardIteratorInput < Struct.new(
487
+ :stream_name,
488
+ :shard_id,
489
+ :shard_iterator_type,
490
+ :starting_sequence_number,
491
+ :timestamp)
492
+ include Aws::Structure
493
+ end
439
494
 
440
- # The range of possible hash key values for the shard, which is a set of
441
- # ordered contiguous positive integers.
442
- # @!attribute [rw] starting_hash_key
443
- # The starting hash key of the hash key range.
444
- # @return [String]
445
- #
446
- # @!attribute [rw] ending_hash_key
447
- # The ending hash key of the hash key range.
448
- # @return [String]
449
- class HashKeyRange < Struct.new(
450
- :starting_hash_key,
451
- :ending_hash_key)
452
- include Aws::Structure
453
- end
495
+ # Represents the output for `GetShardIterator`.
496
+ #
497
+ # @!attribute [rw] shard_iterator
498
+ # The position in the shard from which to start reading data records
499
+ # sequentially. A shard iterator specifies this position using the
500
+ # sequence number of a data record in a shard.
501
+ # @return [String]
502
+ #
503
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetShardIteratorOutput AWS API Documentation
504
+ #
505
+ class GetShardIteratorOutput < Struct.new(
506
+ :shard_iterator)
507
+ include Aws::Structure
508
+ end
454
509
 
455
- # Represents the input for IncreaseStreamRetentionPeriod.
456
- # @note When making an API call, pass IncreaseStreamRetentionPeriodInput
457
- # data as a hash:
458
- #
459
- # {
460
- # stream_name: "StreamName", # required
461
- # retention_period_hours: 1, # required
462
- # }
463
- # @!attribute [rw] stream_name
464
- # The name of the stream to modify.
465
- # @return [String]
466
- #
467
- # @!attribute [rw] retention_period_hours
468
- # The new retention period of the stream, in hours. Must be more than
469
- # the current retention period.
470
- # @return [Integer]
471
- class IncreaseStreamRetentionPeriodInput < Struct.new(
472
- :stream_name,
473
- :retention_period_hours)
474
- include Aws::Structure
475
- end
510
+ # The range of possible hash key values for the shard, which is a set of
511
+ # ordered contiguous positive integers.
512
+ #
513
+ # @!attribute [rw] starting_hash_key
514
+ # The starting hash key of the hash key range.
515
+ # @return [String]
516
+ #
517
+ # @!attribute [rw] ending_hash_key
518
+ # The ending hash key of the hash key range.
519
+ # @return [String]
520
+ #
521
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/HashKeyRange AWS API Documentation
522
+ #
523
+ class HashKeyRange < Struct.new(
524
+ :starting_hash_key,
525
+ :ending_hash_key)
526
+ include Aws::Structure
527
+ end
476
528
 
477
- # Represents the input for `ListStreams`.
478
- # @note When making an API call, pass ListStreamsInput
479
- # data as a hash:
480
- #
481
- # {
482
- # limit: 1,
483
- # exclusive_start_stream_name: "StreamName",
484
- # }
485
- # @!attribute [rw] limit
486
- # The maximum number of streams to list.
487
- # @return [Integer]
488
- #
489
- # @!attribute [rw] exclusive_start_stream_name
490
- # The name of the stream to start the list with.
491
- # @return [String]
492
- class ListStreamsInput < Struct.new(
493
- :limit,
494
- :exclusive_start_stream_name)
495
- include Aws::Structure
496
- end
529
+ # Represents the input for IncreaseStreamRetentionPeriod.
530
+ #
531
+ # @note When making an API call, you may pass IncreaseStreamRetentionPeriodInput
532
+ # data as a hash:
533
+ #
534
+ # {
535
+ # stream_name: "StreamName", # required
536
+ # retention_period_hours: 1, # required
537
+ # }
538
+ #
539
+ # @!attribute [rw] stream_name
540
+ # The name of the stream to modify.
541
+ # @return [String]
542
+ #
543
+ # @!attribute [rw] retention_period_hours
544
+ # The new retention period of the stream, in hours. Must be more than
545
+ # the current retention period.
546
+ # @return [Integer]
547
+ #
548
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/IncreaseStreamRetentionPeriodInput AWS API Documentation
549
+ #
550
+ class IncreaseStreamRetentionPeriodInput < Struct.new(
551
+ :stream_name,
552
+ :retention_period_hours)
553
+ include Aws::Structure
554
+ end
497
555
 
498
- # Represents the output for `ListStreams`.
499
- # @!attribute [rw] stream_names
500
- # The names of the streams that are associated with the AWS account
501
- # making the `ListStreams` request.
502
- # @return [Array<String>]
503
- #
504
- # @!attribute [rw] has_more_streams
505
- # If set to `true`, there are more streams available to list.
506
- # @return [Boolean]
507
- class ListStreamsOutput < Struct.new(
508
- :stream_names,
509
- :has_more_streams)
510
- include Aws::Structure
511
- end
556
+ # Represents the input for `ListStreams`.
557
+ #
558
+ # @note When making an API call, you may pass ListStreamsInput
559
+ # data as a hash:
560
+ #
561
+ # {
562
+ # limit: 1,
563
+ # exclusive_start_stream_name: "StreamName",
564
+ # }
565
+ #
566
+ # @!attribute [rw] limit
567
+ # The maximum number of streams to list.
568
+ # @return [Integer]
569
+ #
570
+ # @!attribute [rw] exclusive_start_stream_name
571
+ # The name of the stream to start the list with.
572
+ # @return [String]
573
+ #
574
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/ListStreamsInput AWS API Documentation
575
+ #
576
+ class ListStreamsInput < Struct.new(
577
+ :limit,
578
+ :exclusive_start_stream_name)
579
+ include Aws::Structure
580
+ end
512
581
 
513
- # Represents the input for `ListTagsForStream`.
514
- # @note When making an API call, pass ListTagsForStreamInput
515
- # data as a hash:
516
- #
517
- # {
518
- # stream_name: "StreamName", # required
519
- # exclusive_start_tag_key: "TagKey",
520
- # limit: 1,
521
- # }
522
- # @!attribute [rw] stream_name
523
- # The name of the stream.
524
- # @return [String]
525
- #
526
- # @!attribute [rw] exclusive_start_tag_key
527
- # The key to use as the starting point for the list of tags. If this
528
- # parameter is set, `ListTagsForStream` gets all tags that occur after
529
- # `ExclusiveStartTagKey`.
530
- # @return [String]
531
- #
532
- # @!attribute [rw] limit
533
- # The number of tags to return. If this number is less than the total
534
- # number of tags associated with the stream, `HasMoreTags` is set to
535
- # `true`. To list additional tags, set `ExclusiveStartTagKey` to the
536
- # last key in the response.
537
- # @return [Integer]
538
- class ListTagsForStreamInput < Struct.new(
539
- :stream_name,
540
- :exclusive_start_tag_key,
541
- :limit)
542
- include Aws::Structure
543
- end
582
+ # Represents the output for `ListStreams`.
583
+ #
584
+ # @!attribute [rw] stream_names
585
+ # The names of the streams that are associated with the AWS account
586
+ # making the `ListStreams` request.
587
+ # @return [Array<String>]
588
+ #
589
+ # @!attribute [rw] has_more_streams
590
+ # If set to `true`, there are more streams available to list.
591
+ # @return [Boolean]
592
+ #
593
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/ListStreamsOutput AWS API Documentation
594
+ #
595
+ class ListStreamsOutput < Struct.new(
596
+ :stream_names,
597
+ :has_more_streams)
598
+ include Aws::Structure
599
+ end
544
600
 
545
- # Represents the output for `ListTagsForStream`.
546
- # @!attribute [rw] tags
547
- # A list of tags associated with `StreamName`, starting with the first
548
- # tag after `ExclusiveStartTagKey` and up to the specified `Limit`.
549
- # @return [Array<Types::Tag>]
550
- #
551
- # @!attribute [rw] has_more_tags
552
- # If set to `true`, more tags are available. To request additional
553
- # tags, set `ExclusiveStartTagKey` to the key of the last tag
554
- # returned.
555
- # @return [Boolean]
556
- class ListTagsForStreamOutput < Struct.new(
557
- :tags,
558
- :has_more_tags)
559
- include Aws::Structure
560
- end
601
+ # Represents the input for `ListTagsForStream`.
602
+ #
603
+ # @note When making an API call, you may pass ListTagsForStreamInput
604
+ # data as a hash:
605
+ #
606
+ # {
607
+ # stream_name: "StreamName", # required
608
+ # exclusive_start_tag_key: "TagKey",
609
+ # limit: 1,
610
+ # }
611
+ #
612
+ # @!attribute [rw] stream_name
613
+ # The name of the stream.
614
+ # @return [String]
615
+ #
616
+ # @!attribute [rw] exclusive_start_tag_key
617
+ # The key to use as the starting point for the list of tags. If this
618
+ # parameter is set, `ListTagsForStream` gets all tags that occur after
619
+ # `ExclusiveStartTagKey`.
620
+ # @return [String]
621
+ #
622
+ # @!attribute [rw] limit
623
+ # The number of tags to return. If this number is less than the total
624
+ # number of tags associated with the stream, `HasMoreTags` is set to
625
+ # `true`. To list additional tags, set `ExclusiveStartTagKey` to the
626
+ # last key in the response.
627
+ # @return [Integer]
628
+ #
629
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/ListTagsForStreamInput AWS API Documentation
630
+ #
631
+ class ListTagsForStreamInput < Struct.new(
632
+ :stream_name,
633
+ :exclusive_start_tag_key,
634
+ :limit)
635
+ include Aws::Structure
636
+ end
561
637
 
562
- # Represents the input for `MergeShards`.
563
- # @note When making an API call, pass MergeShardsInput
564
- # data as a hash:
565
- #
566
- # {
567
- # stream_name: "StreamName", # required
568
- # shard_to_merge: "ShardId", # required
569
- # adjacent_shard_to_merge: "ShardId", # required
570
- # }
571
- # @!attribute [rw] stream_name
572
- # The name of the stream for the merge.
573
- # @return [String]
574
- #
575
- # @!attribute [rw] shard_to_merge
576
- # The shard ID of the shard to combine with the adjacent shard for the
577
- # merge.
578
- # @return [String]
579
- #
580
- # @!attribute [rw] adjacent_shard_to_merge
581
- # The shard ID of the adjacent shard for the merge.
582
- # @return [String]
583
- class MergeShardsInput < Struct.new(
584
- :stream_name,
585
- :shard_to_merge,
586
- :adjacent_shard_to_merge)
587
- include Aws::Structure
588
- end
638
+ # Represents the output for `ListTagsForStream`.
639
+ #
640
+ # @!attribute [rw] tags
641
+ # A list of tags associated with `StreamName`, starting with the first
642
+ # tag after `ExclusiveStartTagKey` and up to the specified `Limit`.
643
+ # @return [Array<Types::Tag>]
644
+ #
645
+ # @!attribute [rw] has_more_tags
646
+ # If set to `true`, more tags are available. To request additional
647
+ # tags, set `ExclusiveStartTagKey` to the key of the last tag
648
+ # returned.
649
+ # @return [Boolean]
650
+ #
651
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/ListTagsForStreamOutput AWS API Documentation
652
+ #
653
+ class ListTagsForStreamOutput < Struct.new(
654
+ :tags,
655
+ :has_more_tags)
656
+ include Aws::Structure
657
+ end
589
658
 
590
- # Represents the input for `PutRecord`.
591
- # @note When making an API call, pass PutRecordInput
592
- # data as a hash:
593
- #
594
- # {
595
- # stream_name: "StreamName", # required
596
- # data: "data", # required
597
- # partition_key: "PartitionKey", # required
598
- # explicit_hash_key: "HashKey",
599
- # sequence_number_for_ordering: "SequenceNumber",
600
- # }
601
- # @!attribute [rw] stream_name
602
- # The name of the stream to put the data record into.
603
- # @return [String]
604
- #
605
- # @!attribute [rw] data
606
- # The data blob to put into the record, which is base64-encoded when
607
- # the blob is serialized. When the data blob (the payload before
608
- # base64-encoding) is added to the partition key size, the total size
609
- # must not exceed the maximum record size (1 MB).
610
- # @return [String]
611
- #
612
- # @!attribute [rw] partition_key
613
- # Determines which shard in the stream the data record is assigned to.
614
- # Partition keys are Unicode strings with a maximum length limit of
615
- # 256 characters for each key. Amazon Kinesis uses the partition key
616
- # as input to a hash function that maps the partition key and
617
- # associated data to a specific shard. Specifically, an MD5 hash
618
- # function is used to map partition keys to 128-bit integer values and
619
- # to map associated data records to shards. As a result of this
620
- # hashing mechanism, all data records with the same partition key map
621
- # to the same shard within the stream.
622
- # @return [String]
623
- #
624
- # @!attribute [rw] explicit_hash_key
625
- # The hash value used to explicitly determine the shard the data
626
- # record is assigned to by overriding the partition key hash.
627
- # @return [String]
628
- #
629
- # @!attribute [rw] sequence_number_for_ordering
630
- # Guarantees strictly increasing sequence numbers, for puts from the
631
- # same client and to the same partition key. Usage: set the
632
- # `SequenceNumberForOrdering` of record *n* to the sequence number of
633
- # record *n-1* (as returned in the result when putting record *n-1*).
634
- # If this parameter is not set, records will be coarsely ordered based
635
- # on arrival time.
636
- # @return [String]
637
- class PutRecordInput < Struct.new(
638
- :stream_name,
639
- :data,
640
- :partition_key,
641
- :explicit_hash_key,
642
- :sequence_number_for_ordering)
643
- include Aws::Structure
644
- end
659
+ # Represents the input for `MergeShards`.
660
+ #
661
+ # @note When making an API call, you may pass MergeShardsInput
662
+ # data as a hash:
663
+ #
664
+ # {
665
+ # stream_name: "StreamName", # required
666
+ # shard_to_merge: "ShardId", # required
667
+ # adjacent_shard_to_merge: "ShardId", # required
668
+ # }
669
+ #
670
+ # @!attribute [rw] stream_name
671
+ # The name of the stream for the merge.
672
+ # @return [String]
673
+ #
674
+ # @!attribute [rw] shard_to_merge
675
+ # The shard ID of the shard to combine with the adjacent shard for the
676
+ # merge.
677
+ # @return [String]
678
+ #
679
+ # @!attribute [rw] adjacent_shard_to_merge
680
+ # The shard ID of the adjacent shard for the merge.
681
+ # @return [String]
682
+ #
683
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/MergeShardsInput AWS API Documentation
684
+ #
685
+ class MergeShardsInput < Struct.new(
686
+ :stream_name,
687
+ :shard_to_merge,
688
+ :adjacent_shard_to_merge)
689
+ include Aws::Structure
690
+ end
645
691
 
646
- # Represents the output for `PutRecord`.
647
- # @!attribute [rw] shard_id
648
- # The shard ID of the shard where the data record was placed.
649
- # @return [String]
650
- #
651
- # @!attribute [rw] sequence_number
652
- # The sequence number identifier that was assigned to the put data
653
- # record. The sequence number for the record is unique across all
654
- # records in the stream. A sequence number is the identifier
655
- # associated with every record put into the stream.
656
- # @return [String]
657
- class PutRecordOutput < Struct.new(
658
- :shard_id,
659
- :sequence_number)
660
- include Aws::Structure
661
- end
692
+ # Represents the input for `PutRecord`.
693
+ #
694
+ # @note When making an API call, you may pass PutRecordInput
695
+ # data as a hash:
696
+ #
697
+ # {
698
+ # stream_name: "StreamName", # required
699
+ # data: "data", # required
700
+ # partition_key: "PartitionKey", # required
701
+ # explicit_hash_key: "HashKey",
702
+ # sequence_number_for_ordering: "SequenceNumber",
703
+ # }
704
+ #
705
+ # @!attribute [rw] stream_name
706
+ # The name of the stream to put the data record into.
707
+ # @return [String]
708
+ #
709
+ # @!attribute [rw] data
710
+ # The data blob to put into the record, which is base64-encoded when
711
+ # the blob is serialized. When the data blob (the payload before
712
+ # base64-encoding) is added to the partition key size, the total size
713
+ # must not exceed the maximum record size (1 MB).
714
+ # @return [String]
715
+ #
716
+ # @!attribute [rw] partition_key
717
+ # Determines which shard in the stream the data record is assigned to.
718
+ # Partition keys are Unicode strings with a maximum length limit of
719
+ # 256 characters for each key. Amazon Kinesis uses the partition key
720
+ # as input to a hash function that maps the partition key and
721
+ # associated data to a specific shard. Specifically, an MD5 hash
722
+ # function is used to map partition keys to 128-bit integer values and
723
+ # to map associated data records to shards. As a result of this
724
+ # hashing mechanism, all data records with the same partition key map
725
+ # to the same shard within the stream.
726
+ # @return [String]
727
+ #
728
+ # @!attribute [rw] explicit_hash_key
729
+ # The hash value used to explicitly determine the shard the data
730
+ # record is assigned to by overriding the partition key hash.
731
+ # @return [String]
732
+ #
733
+ # @!attribute [rw] sequence_number_for_ordering
734
+ # Guarantees strictly increasing sequence numbers, for puts from the
735
+ # same client and to the same partition key. Usage: set the
736
+ # `SequenceNumberForOrdering` of record *n* to the sequence number of
737
+ # record *n-1* (as returned in the result when putting record *n-1*).
738
+ # If this parameter is not set, records will be coarsely ordered based
739
+ # on arrival time.
740
+ # @return [String]
741
+ #
742
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecordInput AWS API Documentation
743
+ #
744
+ class PutRecordInput < Struct.new(
745
+ :stream_name,
746
+ :data,
747
+ :partition_key,
748
+ :explicit_hash_key,
749
+ :sequence_number_for_ordering)
750
+ include Aws::Structure
751
+ end
662
752
 
663
- # A `PutRecords` request.
664
- # @note When making an API call, pass PutRecordsInput
665
- # data as a hash:
666
- #
667
- # {
668
- # records: [ # required
669
- # {
670
- # data: "data", # required
671
- # explicit_hash_key: "HashKey",
672
- # partition_key: "PartitionKey", # required
673
- # },
674
- # ],
675
- # stream_name: "StreamName", # required
676
- # }
677
- # @!attribute [rw] records
678
- # The records associated with the request.
679
- # @return [Array<Types::PutRecordsRequestEntry>]
680
- #
681
- # @!attribute [rw] stream_name
682
- # The stream name associated with the request.
683
- # @return [String]
684
- class PutRecordsInput < Struct.new(
685
- :records,
686
- :stream_name)
687
- include Aws::Structure
688
- end
753
+ # Represents the output for `PutRecord`.
754
+ #
755
+ # @!attribute [rw] shard_id
756
+ # The shard ID of the shard where the data record was placed.
757
+ # @return [String]
758
+ #
759
+ # @!attribute [rw] sequence_number
760
+ # The sequence number identifier that was assigned to the put data
761
+ # record. The sequence number for the record is unique across all
762
+ # records in the stream. A sequence number is the identifier
763
+ # associated with every record put into the stream.
764
+ # @return [String]
765
+ #
766
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecordOutput AWS API Documentation
767
+ #
768
+ class PutRecordOutput < Struct.new(
769
+ :shard_id,
770
+ :sequence_number)
771
+ include Aws::Structure
772
+ end
689
773
 
690
- # `PutRecords` results.
691
- # @!attribute [rw] failed_record_count
692
- # The number of unsuccessfully processed records in a `PutRecords`
693
- # request.
694
- # @return [Integer]
695
- #
696
- # @!attribute [rw] records
697
- # An array of successfully and unsuccessfully processed record
698
- # results, correlated with the request by natural ordering. A record
699
- # that is successfully added to a stream includes `SequenceNumber` and
700
- # `ShardId` in the result. A record that fails to be added to a stream
701
- # includes `ErrorCode` and `ErrorMessage` in the result.
702
- # @return [Array<Types::PutRecordsResultEntry>]
703
- class PutRecordsOutput < Struct.new(
704
- :failed_record_count,
705
- :records)
706
- include Aws::Structure
707
- end
774
+ # A `PutRecords` request.
775
+ #
776
+ # @note When making an API call, you may pass PutRecordsInput
777
+ # data as a hash:
778
+ #
779
+ # {
780
+ # records: [ # required
781
+ # {
782
+ # data: "data", # required
783
+ # explicit_hash_key: "HashKey",
784
+ # partition_key: "PartitionKey", # required
785
+ # },
786
+ # ],
787
+ # stream_name: "StreamName", # required
788
+ # }
789
+ #
790
+ # @!attribute [rw] records
791
+ # The records associated with the request.
792
+ # @return [Array<Types::PutRecordsRequestEntry>]
793
+ #
794
+ # @!attribute [rw] stream_name
795
+ # The stream name associated with the request.
796
+ # @return [String]
797
+ #
798
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecordsInput AWS API Documentation
799
+ #
800
+ class PutRecordsInput < Struct.new(
801
+ :records,
802
+ :stream_name)
803
+ include Aws::Structure
804
+ end
708
805
 
709
- # Represents the output for `PutRecords`.
710
- # @note When making an API call, pass PutRecordsRequestEntry
711
- # data as a hash:
712
- #
713
- # {
714
- # data: "data", # required
715
- # explicit_hash_key: "HashKey",
716
- # partition_key: "PartitionKey", # required
717
- # }
718
- # @!attribute [rw] data
719
- # The data blob to put into the record, which is base64-encoded when
720
- # the blob is serialized. When the data blob (the payload before
721
- # base64-encoding) is added to the partition key size, the total size
722
- # must not exceed the maximum record size (1 MB).
723
- # @return [String]
724
- #
725
- # @!attribute [rw] explicit_hash_key
726
- # The hash value used to determine explicitly the shard that the data
727
- # record is assigned to by overriding the partition key hash.
728
- # @return [String]
729
- #
730
- # @!attribute [rw] partition_key
731
- # Determines which shard in the stream the data record is assigned to.
732
- # Partition keys are Unicode strings with a maximum length limit of
733
- # 256 characters for each key. Amazon Kinesis uses the partition key
734
- # as input to a hash function that maps the partition key and
735
- # associated data to a specific shard. Specifically, an MD5 hash
736
- # function is used to map partition keys to 128-bit integer values and
737
- # to map associated data records to shards. As a result of this
738
- # hashing mechanism, all data records with the same partition key map
739
- # to the same shard within the stream.
740
- # @return [String]
741
- class PutRecordsRequestEntry < Struct.new(
742
- :data,
743
- :explicit_hash_key,
744
- :partition_key)
745
- include Aws::Structure
746
- end
806
+ # `PutRecords` results.
807
+ #
808
+ # @!attribute [rw] failed_record_count
809
+ # The number of unsuccessfully processed records in a `PutRecords`
810
+ # request.
811
+ # @return [Integer]
812
+ #
813
+ # @!attribute [rw] records
814
+ # An array of successfully and unsuccessfully processed record
815
+ # results, correlated with the request by natural ordering. A record
816
+ # that is successfully added to a stream includes `SequenceNumber` and
817
+ # `ShardId` in the result. A record that fails to be added to a stream
818
+ # includes `ErrorCode` and `ErrorMessage` in the result.
819
+ # @return [Array<Types::PutRecordsResultEntry>]
820
+ #
821
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecordsOutput AWS API Documentation
822
+ #
823
+ class PutRecordsOutput < Struct.new(
824
+ :failed_record_count,
825
+ :records)
826
+ include Aws::Structure
827
+ end
747
828
 
748
- # Represents the result of an individual record from a `PutRecords`
749
- # request. A record that is successfully added to a stream includes
750
- # `SequenceNumber` and `ShardId` in the result. A record that fails to
751
- # be added to the stream includes `ErrorCode` and `ErrorMessage` in the
752
- # result.
753
- # @!attribute [rw] sequence_number
754
- # The sequence number for an individual record result.
755
- # @return [String]
756
- #
757
- # @!attribute [rw] shard_id
758
- # The shard ID for an individual record result.
759
- # @return [String]
760
- #
761
- # @!attribute [rw] error_code
762
- # The error code for an individual record result. `ErrorCodes` can be
763
- # either `ProvisionedThroughputExceededException` or
764
- # `InternalFailure`.
765
- # @return [String]
766
- #
767
- # @!attribute [rw] error_message
768
- # The error message for an individual record result. An `ErrorCode`
769
- # value of `ProvisionedThroughputExceededException` has an error
770
- # message that includes the account ID, stream name, and shard ID. An
771
- # `ErrorCode` value of `InternalFailure` has the error message
772
- # `"Internal Service Failure"`.
773
- # @return [String]
774
- class PutRecordsResultEntry < Struct.new(
775
- :sequence_number,
776
- :shard_id,
777
- :error_code,
778
- :error_message)
779
- include Aws::Structure
780
- end
829
+ # Represents the output for `PutRecords`.
830
+ #
831
+ # @note When making an API call, you may pass PutRecordsRequestEntry
832
+ # data as a hash:
833
+ #
834
+ # {
835
+ # data: "data", # required
836
+ # explicit_hash_key: "HashKey",
837
+ # partition_key: "PartitionKey", # required
838
+ # }
839
+ #
840
+ # @!attribute [rw] data
841
+ # The data blob to put into the record, which is base64-encoded when
842
+ # the blob is serialized. When the data blob (the payload before
843
+ # base64-encoding) is added to the partition key size, the total size
844
+ # must not exceed the maximum record size (1 MB).
845
+ # @return [String]
846
+ #
847
+ # @!attribute [rw] explicit_hash_key
848
+ # The hash value used to determine explicitly the shard that the data
849
+ # record is assigned to by overriding the partition key hash.
850
+ # @return [String]
851
+ #
852
+ # @!attribute [rw] partition_key
853
+ # Determines which shard in the stream the data record is assigned to.
854
+ # Partition keys are Unicode strings with a maximum length limit of
855
+ # 256 characters for each key. Amazon Kinesis uses the partition key
856
+ # as input to a hash function that maps the partition key and
857
+ # associated data to a specific shard. Specifically, an MD5 hash
858
+ # function is used to map partition keys to 128-bit integer values and
859
+ # to map associated data records to shards. As a result of this
860
+ # hashing mechanism, all data records with the same partition key map
861
+ # to the same shard within the stream.
862
+ # @return [String]
863
+ #
864
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecordsRequestEntry AWS API Documentation
865
+ #
866
+ class PutRecordsRequestEntry < Struct.new(
867
+ :data,
868
+ :explicit_hash_key,
869
+ :partition_key)
870
+ include Aws::Structure
871
+ end
781
872
 
782
- # The unit of data of the Amazon Kinesis stream, which is composed of a
783
- # sequence number, a partition key, and a data blob.
784
- # @!attribute [rw] sequence_number
785
- # The unique identifier of the record in the stream.
786
- # @return [String]
787
- #
788
- # @!attribute [rw] approximate_arrival_timestamp
789
- # The approximate time that the record was inserted into the stream.
790
- # @return [Time]
791
- #
792
- # @!attribute [rw] data
793
- # The data blob. The data in the blob is both opaque and immutable to
794
- # the Amazon Kinesis service, which does not inspect, interpret, or
795
- # change the data in the blob in any way. When the data blob (the
796
- # payload before base64-encoding) is added to the partition key size,
797
- # the total size must not exceed the maximum record size (1 MB).
798
- # @return [String]
799
- #
800
- # @!attribute [rw] partition_key
801
- # Identifies which shard in the stream the data record is assigned to.
802
- # @return [String]
803
- class Record < Struct.new(
804
- :sequence_number,
805
- :approximate_arrival_timestamp,
806
- :data,
807
- :partition_key)
808
- include Aws::Structure
809
- end
873
+ # Represents the result of an individual record from a `PutRecords`
874
+ # request. A record that is successfully added to a stream includes
875
+ # `SequenceNumber` and `ShardId` in the result. A record that fails to
876
+ # be added to the stream includes `ErrorCode` and `ErrorMessage` in the
877
+ # result.
878
+ #
879
+ # @!attribute [rw] sequence_number
880
+ # The sequence number for an individual record result.
881
+ # @return [String]
882
+ #
883
+ # @!attribute [rw] shard_id
884
+ # The shard ID for an individual record result.
885
+ # @return [String]
886
+ #
887
+ # @!attribute [rw] error_code
888
+ # The error code for an individual record result. `ErrorCodes` can be
889
+ # either `ProvisionedThroughputExceededException` or
890
+ # `InternalFailure`.
891
+ # @return [String]
892
+ #
893
+ # @!attribute [rw] error_message
894
+ # The error message for an individual record result. An `ErrorCode`
895
+ # value of `ProvisionedThroughputExceededException` has an error
896
+ # message that includes the account ID, stream name, and shard ID. An
897
+ # `ErrorCode` value of `InternalFailure` has the error message
898
+ # `"Internal Service Failure"`.
899
+ # @return [String]
900
+ #
901
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecordsResultEntry AWS API Documentation
902
+ #
903
+ class PutRecordsResultEntry < Struct.new(
904
+ :sequence_number,
905
+ :shard_id,
906
+ :error_code,
907
+ :error_message)
908
+ include Aws::Structure
909
+ end
810
910
 
811
- # Represents the input for `RemoveTagsFromStream`.
812
- # @note When making an API call, pass RemoveTagsFromStreamInput
813
- # data as a hash:
814
- #
815
- # {
816
- # stream_name: "StreamName", # required
817
- # tag_keys: ["TagKey"], # required
818
- # }
819
- # @!attribute [rw] stream_name
820
- # The name of the stream.
821
- # @return [String]
822
- #
823
- # @!attribute [rw] tag_keys
824
- # A list of tag keys. Each corresponding tag is removed from the
825
- # stream.
826
- # @return [Array<String>]
827
- class RemoveTagsFromStreamInput < Struct.new(
828
- :stream_name,
829
- :tag_keys)
830
- include Aws::Structure
831
- end
911
+ # The unit of data of the Amazon Kinesis stream, which is composed of a
912
+ # sequence number, a partition key, and a data blob.
913
+ #
914
+ # @!attribute [rw] sequence_number
915
+ # The unique identifier of the record in the stream.
916
+ # @return [String]
917
+ #
918
+ # @!attribute [rw] approximate_arrival_timestamp
919
+ # The approximate time that the record was inserted into the stream.
920
+ # @return [Time]
921
+ #
922
+ # @!attribute [rw] data
923
+ # The data blob. The data in the blob is both opaque and immutable to
924
+ # the Amazon Kinesis service, which does not inspect, interpret, or
925
+ # change the data in the blob in any way. When the data blob (the
926
+ # payload before base64-encoding) is added to the partition key size,
927
+ # the total size must not exceed the maximum record size (1 MB).
928
+ # @return [String]
929
+ #
930
+ # @!attribute [rw] partition_key
931
+ # Identifies which shard in the stream the data record is assigned to.
932
+ # @return [String]
933
+ #
934
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/Record AWS API Documentation
935
+ #
936
+ class Record < Struct.new(
937
+ :sequence_number,
938
+ :approximate_arrival_timestamp,
939
+ :data,
940
+ :partition_key)
941
+ include Aws::Structure
942
+ end
832
943
 
833
- # The range of possible sequence numbers for the shard.
834
- # @!attribute [rw] starting_sequence_number
835
- # The starting sequence number for the range.
836
- # @return [String]
837
- #
838
- # @!attribute [rw] ending_sequence_number
839
- # The ending sequence number for the range. Shards that are in the
840
- # OPEN state have an ending sequence number of `null`.
841
- # @return [String]
842
- class SequenceNumberRange < Struct.new(
843
- :starting_sequence_number,
844
- :ending_sequence_number)
845
- include Aws::Structure
846
- end
944
+ # Represents the input for `RemoveTagsFromStream`.
945
+ #
946
+ # @note When making an API call, you may pass RemoveTagsFromStreamInput
947
+ # data as a hash:
948
+ #
949
+ # {
950
+ # stream_name: "StreamName", # required
951
+ # tag_keys: ["TagKey"], # required
952
+ # }
953
+ #
954
+ # @!attribute [rw] stream_name
955
+ # The name of the stream.
956
+ # @return [String]
957
+ #
958
+ # @!attribute [rw] tag_keys
959
+ # A list of tag keys. Each corresponding tag is removed from the
960
+ # stream.
961
+ # @return [Array<String>]
962
+ #
963
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/RemoveTagsFromStreamInput AWS API Documentation
964
+ #
965
+ class RemoveTagsFromStreamInput < Struct.new(
966
+ :stream_name,
967
+ :tag_keys)
968
+ include Aws::Structure
969
+ end
847
970
 
848
- # A uniquely identified group of data records in an Amazon Kinesis
849
- # stream.
850
- # @!attribute [rw] shard_id
851
- # The unique identifier of the shard within the stream.
852
- # @return [String]
853
- #
854
- # @!attribute [rw] parent_shard_id
855
- # The shard ID of the shard's parent.
856
- # @return [String]
857
- #
858
- # @!attribute [rw] adjacent_parent_shard_id
859
- # The shard ID of the shard adjacent to the shard's parent.
860
- # @return [String]
861
- #
862
- # @!attribute [rw] hash_key_range
863
- # The range of possible hash key values for the shard, which is a set
864
- # of ordered contiguous positive integers.
865
- # @return [Types::HashKeyRange]
866
- #
867
- # @!attribute [rw] sequence_number_range
868
- # The range of possible sequence numbers for the shard.
869
- # @return [Types::SequenceNumberRange]
870
- class Shard < Struct.new(
871
- :shard_id,
872
- :parent_shard_id,
873
- :adjacent_parent_shard_id,
874
- :hash_key_range,
875
- :sequence_number_range)
876
- include Aws::Structure
877
- end
971
+ # The range of possible sequence numbers for the shard.
972
+ #
973
+ # @!attribute [rw] starting_sequence_number
974
+ # The starting sequence number for the range.
975
+ # @return [String]
976
+ #
977
+ # @!attribute [rw] ending_sequence_number
978
+ # The ending sequence number for the range. Shards that are in the
979
+ # OPEN state have an ending sequence number of `null`.
980
+ # @return [String]
981
+ #
982
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/SequenceNumberRange AWS API Documentation
983
+ #
984
+ class SequenceNumberRange < Struct.new(
985
+ :starting_sequence_number,
986
+ :ending_sequence_number)
987
+ include Aws::Structure
988
+ end
878
989
 
879
- # Represents the input for `SplitShard`.
880
- # @note When making an API call, pass SplitShardInput
881
- # data as a hash:
882
- #
883
- # {
884
- # stream_name: "StreamName", # required
885
- # shard_to_split: "ShardId", # required
886
- # new_starting_hash_key: "HashKey", # required
887
- # }
888
- # @!attribute [rw] stream_name
889
- # The name of the stream for the shard split.
890
- # @return [String]
891
- #
892
- # @!attribute [rw] shard_to_split
893
- # The shard ID of the shard to split.
894
- # @return [String]
895
- #
896
- # @!attribute [rw] new_starting_hash_key
897
- # A hash key value for the starting hash key of one of the child
898
- # shards created by the split. The hash key range for a given shard
899
- # constitutes a set of ordered contiguous positive integers. The value
900
- # for `NewStartingHashKey` must be in the range of hash keys being
901
- # mapped into the shard. The `NewStartingHashKey` hash key value and
902
- # all higher hash key values in hash key range are distributed to one
903
- # of the child shards. All the lower hash key values in the range are
904
- # distributed to the other child shard.
905
- # @return [String]
906
- class SplitShardInput < Struct.new(
907
- :stream_name,
908
- :shard_to_split,
909
- :new_starting_hash_key)
910
- include Aws::Structure
911
- end
990
+ # A uniquely identified group of data records in an Amazon Kinesis
991
+ # stream.
992
+ #
993
+ # @!attribute [rw] shard_id
994
+ # The unique identifier of the shard within the stream.
995
+ # @return [String]
996
+ #
997
+ # @!attribute [rw] parent_shard_id
998
+ # The shard ID of the shard's parent.
999
+ # @return [String]
1000
+ #
1001
+ # @!attribute [rw] adjacent_parent_shard_id
1002
+ # The shard ID of the shard adjacent to the shard's parent.
1003
+ # @return [String]
1004
+ #
1005
+ # @!attribute [rw] hash_key_range
1006
+ # The range of possible hash key values for the shard, which is a set
1007
+ # of ordered contiguous positive integers.
1008
+ # @return [Types::HashKeyRange]
1009
+ #
1010
+ # @!attribute [rw] sequence_number_range
1011
+ # The range of possible sequence numbers for the shard.
1012
+ # @return [Types::SequenceNumberRange]
1013
+ #
1014
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/Shard AWS API Documentation
1015
+ #
1016
+ class Shard < Struct.new(
1017
+ :shard_id,
1018
+ :parent_shard_id,
1019
+ :adjacent_parent_shard_id,
1020
+ :hash_key_range,
1021
+ :sequence_number_range)
1022
+ include Aws::Structure
1023
+ end
912
1024
 
913
- # Represents the output for DescribeStream.
914
- # @!attribute [rw] stream_name
915
- # The name of the stream being described.
916
- # @return [String]
917
- #
918
- # @!attribute [rw] stream_arn
919
- # The Amazon Resource Name (ARN) for the stream being described.
920
- # @return [String]
921
- #
922
- # @!attribute [rw] stream_status
923
- # The current status of the stream being described. The stream status
924
- # is one of the following states:
925
- #
926
- # * `CREATING` - The stream is being created. Amazon Kinesis
927
- # immediately returns and sets `StreamStatus` to `CREATING`.
928
- #
929
- # * `DELETING` - The stream is being deleted. The specified stream is
930
- # in the `DELETING` state until Amazon Kinesis completes the
931
- # deletion.
932
- #
933
- # * `ACTIVE` - The stream exists and is ready for read and write
934
- # operations or deletion. You should perform read and write
935
- # operations only on an `ACTIVE` stream.
936
- #
937
- # * `UPDATING` - Shards in the stream are being merged or split. Read
938
- # and write operations continue to work while the stream is in the
939
- # `UPDATING` state.
940
- # @return [String]
941
- #
942
- # @!attribute [rw] shards
943
- # The shards that comprise the stream.
944
- # @return [Array<Types::Shard>]
945
- #
946
- # @!attribute [rw] has_more_shards
947
- # If set to `true`, more shards in the stream are available to
948
- # describe.
949
- # @return [Boolean]
950
- #
951
- # @!attribute [rw] retention_period_hours
952
- # The current retention period, in hours.
953
- # @return [Integer]
954
- #
955
- # @!attribute [rw] stream_creation_timestamp
956
- # The approximate time that the stream was created.
957
- # @return [Time]
958
- #
959
- # @!attribute [rw] enhanced_monitoring
960
- # Represents the current enhanced monitoring settings of the stream.
961
- # @return [Array<Types::EnhancedMetrics>]
962
- class StreamDescription < Struct.new(
963
- :stream_name,
964
- :stream_arn,
965
- :stream_status,
966
- :shards,
967
- :has_more_shards,
968
- :retention_period_hours,
969
- :stream_creation_timestamp,
970
- :enhanced_monitoring)
971
- include Aws::Structure
972
- end
1025
+ # Represents the input for `SplitShard`.
1026
+ #
1027
+ # @note When making an API call, you may pass SplitShardInput
1028
+ # data as a hash:
1029
+ #
1030
+ # {
1031
+ # stream_name: "StreamName", # required
1032
+ # shard_to_split: "ShardId", # required
1033
+ # new_starting_hash_key: "HashKey", # required
1034
+ # }
1035
+ #
1036
+ # @!attribute [rw] stream_name
1037
+ # The name of the stream for the shard split.
1038
+ # @return [String]
1039
+ #
1040
+ # @!attribute [rw] shard_to_split
1041
+ # The shard ID of the shard to split.
1042
+ # @return [String]
1043
+ #
1044
+ # @!attribute [rw] new_starting_hash_key
1045
+ # A hash key value for the starting hash key of one of the child
1046
+ # shards created by the split. The hash key range for a given shard
1047
+ # constitutes a set of ordered contiguous positive integers. The value
1048
+ # for `NewStartingHashKey` must be in the range of hash keys being
1049
+ # mapped into the shard. The `NewStartingHashKey` hash key value and
1050
+ # all higher hash key values in hash key range are distributed to one
1051
+ # of the child shards. All the lower hash key values in the range are
1052
+ # distributed to the other child shard.
1053
+ # @return [String]
1054
+ #
1055
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/SplitShardInput AWS API Documentation
1056
+ #
1057
+ class SplitShardInput < Struct.new(
1058
+ :stream_name,
1059
+ :shard_to_split,
1060
+ :new_starting_hash_key)
1061
+ include Aws::Structure
1062
+ end
973
1063
 
974
- # Metadata assigned to the stream, consisting of a key-value pair.
975
- # @!attribute [rw] key
976
- # A unique identifier for the tag. Maximum length: 128 characters.
977
- # Valid characters: Unicode letters, digits, white space, \_ . / = + -
978
- # % @
979
- # @return [String]
980
- #
981
- # @!attribute [rw] value
982
- # An optional string, typically used to describe or define the tag.
983
- # Maximum length: 256 characters. Valid characters: Unicode letters,
984
- # digits, white space, \_ . / = + - % @
985
- # @return [String]
986
- class Tag < Struct.new(
987
- :key,
988
- :value)
989
- include Aws::Structure
990
- end
1064
+ # Represents the output for DescribeStream.
1065
+ #
1066
+ # @!attribute [rw] stream_name
1067
+ # The name of the stream being described.
1068
+ # @return [String]
1069
+ #
1070
+ # @!attribute [rw] stream_arn
1071
+ # The Amazon Resource Name (ARN) for the stream being described.
1072
+ # @return [String]
1073
+ #
1074
+ # @!attribute [rw] stream_status
1075
+ # The current status of the stream being described. The stream status
1076
+ # is one of the following states:
1077
+ #
1078
+ # * `CREATING` - The stream is being created. Amazon Kinesis
1079
+ # immediately returns and sets `StreamStatus` to `CREATING`.
1080
+ #
1081
+ # * `DELETING` - The stream is being deleted. The specified stream is
1082
+ # in the `DELETING` state until Amazon Kinesis completes the
1083
+ # deletion.
1084
+ #
1085
+ # * `ACTIVE` - The stream exists and is ready for read and write
1086
+ # operations or deletion. You should perform read and write
1087
+ # operations only on an `ACTIVE` stream.
1088
+ #
1089
+ # * `UPDATING` - Shards in the stream are being merged or split. Read
1090
+ # and write operations continue to work while the stream is in the
1091
+ # `UPDATING` state.
1092
+ # @return [String]
1093
+ #
1094
+ # @!attribute [rw] shards
1095
+ # The shards that comprise the stream.
1096
+ # @return [Array<Types::Shard>]
1097
+ #
1098
+ # @!attribute [rw] has_more_shards
1099
+ # If set to `true`, more shards in the stream are available to
1100
+ # describe.
1101
+ # @return [Boolean]
1102
+ #
1103
+ # @!attribute [rw] retention_period_hours
1104
+ # The current retention period, in hours.
1105
+ # @return [Integer]
1106
+ #
1107
+ # @!attribute [rw] stream_creation_timestamp
1108
+ # The approximate time that the stream was created.
1109
+ # @return [Time]
1110
+ #
1111
+ # @!attribute [rw] enhanced_monitoring
1112
+ # Represents the current enhanced monitoring settings of the stream.
1113
+ # @return [Array<Types::EnhancedMetrics>]
1114
+ #
1115
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StreamDescription AWS API Documentation
1116
+ #
1117
+ class StreamDescription < Struct.new(
1118
+ :stream_name,
1119
+ :stream_arn,
1120
+ :stream_status,
1121
+ :shards,
1122
+ :has_more_shards,
1123
+ :retention_period_hours,
1124
+ :stream_creation_timestamp,
1125
+ :enhanced_monitoring)
1126
+ include Aws::Structure
1127
+ end
991
1128
 
992
- # @note When making an API call, pass UpdateShardCountInput
993
- # data as a hash:
994
- #
995
- # {
996
- # stream_name: "StreamName", # required
997
- # target_shard_count: 1, # required
998
- # scaling_type: "UNIFORM_SCALING", # required, accepts UNIFORM_SCALING
999
- # }
1000
- # @!attribute [rw] stream_name
1001
- # The name of the stream.
1002
- # @return [String]
1003
- #
1004
- # @!attribute [rw] target_shard_count
1005
- # The new number of shards.
1006
- # @return [Integer]
1007
- #
1008
- # @!attribute [rw] scaling_type
1009
- # The scaling type. Uniform scaling creates shards of equal size.
1010
- # @return [String]
1011
- class UpdateShardCountInput < Struct.new(
1012
- :stream_name,
1013
- :target_shard_count,
1014
- :scaling_type)
1015
- include Aws::Structure
1016
- end
1129
+ # Metadata assigned to the stream, consisting of a key-value pair.
1130
+ #
1131
+ # @!attribute [rw] key
1132
+ # A unique identifier for the tag. Maximum length: 128 characters.
1133
+ # Valid characters: Unicode letters, digits, white space, \_ . / = + -
1134
+ # % @
1135
+ # @return [String]
1136
+ #
1137
+ # @!attribute [rw] value
1138
+ # An optional string, typically used to describe or define the tag.
1139
+ # Maximum length: 256 characters. Valid characters: Unicode letters,
1140
+ # digits, white space, \_ . / = + - % @
1141
+ # @return [String]
1142
+ #
1143
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/Tag AWS API Documentation
1144
+ #
1145
+ class Tag < Struct.new(
1146
+ :key,
1147
+ :value)
1148
+ include Aws::Structure
1149
+ end
1017
1150
 
1018
- # @!attribute [rw] stream_name
1019
- # The name of the stream.
1020
- # @return [String]
1021
- #
1022
- # @!attribute [rw] current_shard_count
1023
- # The current number of shards.
1024
- # @return [Integer]
1025
- #
1026
- # @!attribute [rw] target_shard_count
1027
- # The updated number of shards.
1028
- # @return [Integer]
1029
- class UpdateShardCountOutput < Struct.new(
1030
- :stream_name,
1031
- :current_shard_count,
1032
- :target_shard_count)
1033
- include Aws::Structure
1034
- end
1151
+ # @note When making an API call, you may pass UpdateShardCountInput
1152
+ # data as a hash:
1153
+ #
1154
+ # {
1155
+ # stream_name: "StreamName", # required
1156
+ # target_shard_count: 1, # required
1157
+ # scaling_type: "UNIFORM_SCALING", # required, accepts UNIFORM_SCALING
1158
+ # }
1159
+ #
1160
+ # @!attribute [rw] stream_name
1161
+ # The name of the stream.
1162
+ # @return [String]
1163
+ #
1164
+ # @!attribute [rw] target_shard_count
1165
+ # The new number of shards.
1166
+ # @return [Integer]
1167
+ #
1168
+ # @!attribute [rw] scaling_type
1169
+ # The scaling type. Uniform scaling creates shards of equal size.
1170
+ # @return [String]
1171
+ #
1172
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/UpdateShardCountInput AWS API Documentation
1173
+ #
1174
+ class UpdateShardCountInput < Struct.new(
1175
+ :stream_name,
1176
+ :target_shard_count,
1177
+ :scaling_type)
1178
+ include Aws::Structure
1179
+ end
1035
1180
 
1181
+ # @!attribute [rw] stream_name
1182
+ # The name of the stream.
1183
+ # @return [String]
1184
+ #
1185
+ # @!attribute [rw] current_shard_count
1186
+ # The current number of shards.
1187
+ # @return [Integer]
1188
+ #
1189
+ # @!attribute [rw] target_shard_count
1190
+ # The updated number of shards.
1191
+ # @return [Integer]
1192
+ #
1193
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/UpdateShardCountOutput AWS API Documentation
1194
+ #
1195
+ class UpdateShardCountOutput < Struct.new(
1196
+ :stream_name,
1197
+ :current_shard_count,
1198
+ :target_shard_count)
1199
+ include Aws::Structure
1036
1200
  end
1201
+
1037
1202
  end
1038
1203
  end