aws-sdk-kinesis 1.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
@@ -0,0 +1,23 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ module Aws
9
+ module Kinesis
10
+ module Errors
11
+
12
+ extend Aws::Errors::DynamicErrors
13
+
14
+ # Raised when calling #load or #data on a resource class that can not be
15
+ # loaded. This can happen when:
16
+ #
17
+ # * A resource class has identifiers, but no data attributes.
18
+ # * Resource data is only available when making an API call that
19
+ # enumerates all resources of that type.
20
+ class ResourceNotLoadable < RuntimeError; end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,25 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ module Aws
9
+ module Kinesis
10
+ class Resource
11
+
12
+ # @param options ({})
13
+ # @option options [Client] :client
14
+ def initialize(options = {})
15
+ @client = options[:client] || Client.new(options)
16
+ end
17
+
18
+ # @return [Client]
19
+ def client
20
+ @client
21
+ end
22
+
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,1038 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ module Aws
9
+ module Kinesis
10
+ module Types
11
+
12
+ # Represents the input for `AddTagsToStream`.
13
+ # @note When making an API call, pass AddTagsToStreamInput
14
+ # data as a hash:
15
+ #
16
+ # {
17
+ # stream_name: "StreamName", # required
18
+ # tags: { # required
19
+ # "TagKey" => "TagValue",
20
+ # },
21
+ # }
22
+ # @!attribute [rw] stream_name
23
+ # The name of the stream.
24
+ # @return [String]
25
+ #
26
+ # @!attribute [rw] tags
27
+ # The set of key-value pairs to use to create the tags.
28
+ # @return [Hash<String,String>]
29
+ class AddTagsToStreamInput < Struct.new(
30
+ :stream_name,
31
+ :tags)
32
+ include Aws::Structure
33
+ end
34
+
35
+ # Represents the input for `CreateStream`.
36
+ # @note When making an API call, pass CreateStreamInput
37
+ # data as a hash:
38
+ #
39
+ # {
40
+ # stream_name: "StreamName", # required
41
+ # shard_count: 1, # required
42
+ # }
43
+ # @!attribute [rw] stream_name
44
+ # A name to identify the stream. The stream name is scoped to the AWS
45
+ # account used by the application that creates the stream. It is also
46
+ # scoped by region. That is, two streams in two different AWS accounts
47
+ # can have the same name, and two streams in the same AWS account but
48
+ # in two different regions can have the same name.
49
+ # @return [String]
50
+ #
51
+ # @!attribute [rw] shard_count
52
+ # The number of shards that the stream will use. The throughput of the
53
+ # stream is a function of the number of shards; more shards are
54
+ # required for greater provisioned throughput.
55
+ #
56
+ # DefaultShardLimit;
57
+ # @return [Integer]
58
+ class CreateStreamInput < Struct.new(
59
+ :stream_name,
60
+ :shard_count)
61
+ include Aws::Structure
62
+ end
63
+
64
+ # Represents the input for DecreaseStreamRetentionPeriod.
65
+ # @note When making an API call, pass DecreaseStreamRetentionPeriodInput
66
+ # data as a hash:
67
+ #
68
+ # {
69
+ # stream_name: "StreamName", # required
70
+ # retention_period_hours: 1, # required
71
+ # }
72
+ # @!attribute [rw] stream_name
73
+ # The name of the stream to modify.
74
+ # @return [String]
75
+ #
76
+ # @!attribute [rw] retention_period_hours
77
+ # The new retention period of the stream, in hours. Must be less than
78
+ # the current retention period.
79
+ # @return [Integer]
80
+ class DecreaseStreamRetentionPeriodInput < Struct.new(
81
+ :stream_name,
82
+ :retention_period_hours)
83
+ include Aws::Structure
84
+ end
85
+
86
+ # Represents the input for DeleteStream.
87
+ # @note When making an API call, pass DeleteStreamInput
88
+ # data as a hash:
89
+ #
90
+ # {
91
+ # stream_name: "StreamName", # required
92
+ # }
93
+ # @!attribute [rw] stream_name
94
+ # The name of the stream to delete.
95
+ # @return [String]
96
+ class DeleteStreamInput < Struct.new(
97
+ :stream_name)
98
+ include Aws::Structure
99
+ end
100
+
101
+ # @api private
102
+ class DescribeLimitsInput < Aws::EmptyStructure; end
103
+
104
+ # @!attribute [rw] shard_limit
105
+ # The maximum number of shards.
106
+ # @return [Integer]
107
+ #
108
+ # @!attribute [rw] open_shard_count
109
+ # The number of open shards.
110
+ # @return [Integer]
111
+ class DescribeLimitsOutput < Struct.new(
112
+ :shard_limit,
113
+ :open_shard_count)
114
+ include Aws::Structure
115
+ end
116
+
117
+ # Represents the input for `DescribeStream`.
118
+ # @note When making an API call, pass DescribeStreamInput
119
+ # data as a hash:
120
+ #
121
+ # {
122
+ # stream_name: "StreamName", # required
123
+ # limit: 1,
124
+ # exclusive_start_shard_id: "ShardId",
125
+ # }
126
+ # @!attribute [rw] stream_name
127
+ # The name of the stream to describe.
128
+ # @return [String]
129
+ #
130
+ # @!attribute [rw] limit
131
+ # The maximum number of shards to return in a single call. The default
132
+ # value is 100. If you specify a value greater than 100, at most 100
133
+ # shards are returned.
134
+ # @return [Integer]
135
+ #
136
+ # @!attribute [rw] exclusive_start_shard_id
137
+ # The shard ID of the shard to start with.
138
+ # @return [String]
139
+ class DescribeStreamInput < Struct.new(
140
+ :stream_name,
141
+ :limit,
142
+ :exclusive_start_shard_id)
143
+ include Aws::Structure
144
+ end
145
+
146
+ # Represents the output for `DescribeStream`.
147
+ # @!attribute [rw] stream_description
148
+ # The current status of the stream, the stream ARN, an array of shard
149
+ # objects that comprise the stream, and whether there are more shards
150
+ # available.
151
+ # @return [Types::StreamDescription]
152
+ class DescribeStreamOutput < Struct.new(
153
+ :stream_description)
154
+ include Aws::Structure
155
+ end
156
+
157
+ # Represents the input for DisableEnhancedMonitoring.
158
+ # @note When making an API call, pass DisableEnhancedMonitoringInput
159
+ # data as a hash:
160
+ #
161
+ # {
162
+ # stream_name: "StreamName", # required
163
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
164
+ # }
165
+ # @!attribute [rw] stream_name
166
+ # The name of the Amazon Kinesis stream for which to disable enhanced
167
+ # monitoring.
168
+ # @return [String]
169
+ #
170
+ # @!attribute [rw] shard_level_metrics
171
+ # List of shard-level metrics to disable.
172
+ #
173
+ # The following are the valid shard-level metrics. The value "`ALL`"
174
+ # disables every metric.
175
+ #
176
+ # * `IncomingBytes`
177
+ #
178
+ # * `IncomingRecords`
179
+ #
180
+ # * `OutgoingBytes`
181
+ #
182
+ # * `OutgoingRecords`
183
+ #
184
+ # * `WriteProvisionedThroughputExceeded`
185
+ #
186
+ # * `ReadProvisionedThroughputExceeded`
187
+ #
188
+ # * `IteratorAgeMilliseconds`
189
+ #
190
+ # * `ALL`
191
+ #
192
+ # For more information, see [Monitoring the Amazon Kinesis Streams
193
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
194
+ # Developer Guide*.
195
+ #
196
+ #
197
+ #
198
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
199
+ # @return [Array<String>]
200
+ class DisableEnhancedMonitoringInput < Struct.new(
201
+ :stream_name,
202
+ :shard_level_metrics)
203
+ include Aws::Structure
204
+ end
205
+
206
+ # Represents the input for EnableEnhancedMonitoring.
207
+ # @note When making an API call, pass EnableEnhancedMonitoringInput
208
+ # data as a hash:
209
+ #
210
+ # {
211
+ # stream_name: "StreamName", # required
212
+ # shard_level_metrics: ["IncomingBytes"], # required, accepts IncomingBytes, IncomingRecords, OutgoingBytes, OutgoingRecords, WriteProvisionedThroughputExceeded, ReadProvisionedThroughputExceeded, IteratorAgeMilliseconds, ALL
213
+ # }
214
+ # @!attribute [rw] stream_name
215
+ # The name of the stream for which to enable enhanced monitoring.
216
+ # @return [String]
217
+ #
218
+ # @!attribute [rw] shard_level_metrics
219
+ # List of shard-level metrics to enable.
220
+ #
221
+ # The following are the valid shard-level metrics. The value "`ALL`"
222
+ # enables every metric.
223
+ #
224
+ # * `IncomingBytes`
225
+ #
226
+ # * `IncomingRecords`
227
+ #
228
+ # * `OutgoingBytes`
229
+ #
230
+ # * `OutgoingRecords`
231
+ #
232
+ # * `WriteProvisionedThroughputExceeded`
233
+ #
234
+ # * `ReadProvisionedThroughputExceeded`
235
+ #
236
+ # * `IteratorAgeMilliseconds`
237
+ #
238
+ # * `ALL`
239
+ #
240
+ # For more information, see [Monitoring the Amazon Kinesis Streams
241
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
242
+ # Developer Guide*.
243
+ #
244
+ #
245
+ #
246
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
247
+ # @return [Array<String>]
248
+ class EnableEnhancedMonitoringInput < Struct.new(
249
+ :stream_name,
250
+ :shard_level_metrics)
251
+ include Aws::Structure
252
+ end
253
+
254
+ # Represents enhanced metrics types.
255
+ # @!attribute [rw] shard_level_metrics
256
+ # List of shard-level metrics.
257
+ #
258
+ # The following are the valid shard-level metrics. The value "`ALL`"
259
+ # enhances every metric.
260
+ #
261
+ # * `IncomingBytes`
262
+ #
263
+ # * `IncomingRecords`
264
+ #
265
+ # * `OutgoingBytes`
266
+ #
267
+ # * `OutgoingRecords`
268
+ #
269
+ # * `WriteProvisionedThroughputExceeded`
270
+ #
271
+ # * `ReadProvisionedThroughputExceeded`
272
+ #
273
+ # * `IteratorAgeMilliseconds`
274
+ #
275
+ # * `ALL`
276
+ #
277
+ # For more information, see [Monitoring the Amazon Kinesis Streams
278
+ # Service with Amazon CloudWatch][1] in the *Amazon Kinesis Streams
279
+ # Developer Guide*.
280
+ #
281
+ #
282
+ #
283
+ # [1]: http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
284
+ # @return [Array<String>]
285
+ class EnhancedMetrics < Struct.new(
286
+ :shard_level_metrics)
287
+ include Aws::Structure
288
+ end
289
+
290
+ # Represents the output for EnableEnhancedMonitoring and
291
+ # DisableEnhancedMonitoring.
292
+ # @!attribute [rw] stream_name
293
+ # The name of the Amazon Kinesis stream.
294
+ # @return [String]
295
+ #
296
+ # @!attribute [rw] current_shard_level_metrics
297
+ # Represents the current state of the metrics that are in the enhanced
298
+ # state before the operation.
299
+ # @return [Array<String>]
300
+ #
301
+ # @!attribute [rw] desired_shard_level_metrics
302
+ # Represents the list of all the metrics that would be in the enhanced
303
+ # state after the operation.
304
+ # @return [Array<String>]
305
+ class EnhancedMonitoringOutput < Struct.new(
306
+ :stream_name,
307
+ :current_shard_level_metrics,
308
+ :desired_shard_level_metrics)
309
+ include Aws::Structure
310
+ end
311
+
312
+ # Represents the input for GetRecords.
313
+ # @note When making an API call, pass GetRecordsInput
314
+ # data as a hash:
315
+ #
316
+ # {
317
+ # shard_iterator: "ShardIterator", # required
318
+ # limit: 1,
319
+ # }
320
+ # @!attribute [rw] shard_iterator
321
+ # The position in the shard from which you want to start sequentially
322
+ # reading data records. A shard iterator specifies this position using
323
+ # the sequence number of a data record in the shard.
324
+ # @return [String]
325
+ #
326
+ # @!attribute [rw] limit
327
+ # The maximum number of records to return. Specify a value of up to
328
+ # 10,000. If you specify a value that is greater than 10,000,
329
+ # GetRecords throws `InvalidArgumentException`.
330
+ # @return [Integer]
331
+ class GetRecordsInput < Struct.new(
332
+ :shard_iterator,
333
+ :limit)
334
+ include Aws::Structure
335
+ end
336
+
337
+ # Represents the output for GetRecords.
338
+ # @!attribute [rw] records
339
+ # The data records retrieved from the shard.
340
+ # @return [Array<Types::Record>]
341
+ #
342
+ # @!attribute [rw] next_shard_iterator
343
+ # The next position in the shard from which to start sequentially
344
+ # reading data records. If set to `null`, the shard has been closed
345
+ # and the requested iterator will not return any more data.
346
+ # @return [String]
347
+ #
348
+ # @!attribute [rw] millis_behind_latest
349
+ # The number of milliseconds the GetRecords response is from the tip
350
+ # of the stream, indicating how far behind current time the consumer
351
+ # is. A value of zero indicates record processing is caught up, and
352
+ # there are no new records to process at this moment.
353
+ # @return [Integer]
354
+ class GetRecordsOutput < Struct.new(
355
+ :records,
356
+ :next_shard_iterator,
357
+ :millis_behind_latest)
358
+ include Aws::Structure
359
+ end
360
+
361
+ # Represents the input for `GetShardIterator`.
362
+ # @note When making an API call, pass GetShardIteratorInput
363
+ # data as a hash:
364
+ #
365
+ # {
366
+ # stream_name: "StreamName", # required
367
+ # shard_id: "ShardId", # required
368
+ # shard_iterator_type: "AT_SEQUENCE_NUMBER", # required, accepts AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER, TRIM_HORIZON, LATEST, AT_TIMESTAMP
369
+ # starting_sequence_number: "SequenceNumber",
370
+ # timestamp: Time.now,
371
+ # }
372
+ # @!attribute [rw] stream_name
373
+ # The name of the Amazon Kinesis stream.
374
+ # @return [String]
375
+ #
376
+ # @!attribute [rw] shard_id
377
+ # The shard ID of the Amazon Kinesis shard to get the iterator for.
378
+ # @return [String]
379
+ #
380
+ # @!attribute [rw] shard_iterator_type
381
+ # Determines how the shard iterator is used to start reading data
382
+ # records from the shard.
383
+ #
384
+ # The following are the valid Amazon Kinesis shard iterator types:
385
+ #
386
+ # * AT\_SEQUENCE\_NUMBER - Start reading from the position denoted by
387
+ # a specific sequence number, provided in the value
388
+ # `StartingSequenceNumber`.
389
+ #
390
+ # * AFTER\_SEQUENCE\_NUMBER - Start reading right after the position
391
+ # denoted by a specific sequence number, provided in the value
392
+ # `StartingSequenceNumber`.
393
+ #
394
+ # * AT\_TIMESTAMP - Start reading from the position denoted by a
395
+ # specific timestamp, provided in the value `Timestamp`.
396
+ #
397
+ # * TRIM\_HORIZON - Start reading at the last untrimmed record in the
398
+ # shard in the system, which is the oldest data record in the shard.
399
+ #
400
+ # * LATEST - Start reading just after the most recent record in the
401
+ # shard, so that you always read the most recent data in the shard.
402
+ # @return [String]
403
+ #
404
+ # @!attribute [rw] starting_sequence_number
405
+ # The sequence number of the data record in the shard from which to
406
+ # start reading. Used with shard iterator type AT\_SEQUENCE\_NUMBER
407
+ # and AFTER\_SEQUENCE\_NUMBER.
408
+ # @return [String]
409
+ #
410
+ # @!attribute [rw] timestamp
411
+ # The timestamp of the data record from which to start reading. Used
412
+ # with shard iterator type AT\_TIMESTAMP. A timestamp is the Unix
413
+ # epoch date with precision in milliseconds. For example,
414
+ # `2016-04-04T19:58:46.480-00:00` or `1459799926.480`. If a record
415
+ # with this exact timestamp does not exist, the iterator returned is
416
+ # for the next (later) record. If the timestamp is older than the
417
+ # current trim horizon, the iterator returned is for the oldest
418
+ # untrimmed data record (TRIM\_HORIZON).
419
+ # @return [Time]
420
+ class GetShardIteratorInput < Struct.new(
421
+ :stream_name,
422
+ :shard_id,
423
+ :shard_iterator_type,
424
+ :starting_sequence_number,
425
+ :timestamp)
426
+ include Aws::Structure
427
+ end
428
+
429
+ # Represents the output for `GetShardIterator`.
430
+ # @!attribute [rw] shard_iterator
431
+ # The position in the shard from which to start reading data records
432
+ # sequentially. A shard iterator specifies this position using the
433
+ # sequence number of a data record in a shard.
434
+ # @return [String]
435
+ class GetShardIteratorOutput < Struct.new(
436
+ :shard_iterator)
437
+ include Aws::Structure
438
+ end
439
+
440
+ # The range of possible hash key values for the shard, which is a set of
441
+ # ordered contiguous positive integers.
442
+ # @!attribute [rw] starting_hash_key
443
+ # The starting hash key of the hash key range.
444
+ # @return [String]
445
+ #
446
+ # @!attribute [rw] ending_hash_key
447
+ # The ending hash key of the hash key range.
448
+ # @return [String]
449
+ class HashKeyRange < Struct.new(
450
+ :starting_hash_key,
451
+ :ending_hash_key)
452
+ include Aws::Structure
453
+ end
454
+
455
+ # Represents the input for IncreaseStreamRetentionPeriod.
456
+ # @note When making an API call, pass IncreaseStreamRetentionPeriodInput
457
+ # data as a hash:
458
+ #
459
+ # {
460
+ # stream_name: "StreamName", # required
461
+ # retention_period_hours: 1, # required
462
+ # }
463
+ # @!attribute [rw] stream_name
464
+ # The name of the stream to modify.
465
+ # @return [String]
466
+ #
467
+ # @!attribute [rw] retention_period_hours
468
+ # The new retention period of the stream, in hours. Must be more than
469
+ # the current retention period.
470
+ # @return [Integer]
471
+ class IncreaseStreamRetentionPeriodInput < Struct.new(
472
+ :stream_name,
473
+ :retention_period_hours)
474
+ include Aws::Structure
475
+ end
476
+
477
+ # Represents the input for `ListStreams`.
478
+ # @note When making an API call, pass ListStreamsInput
479
+ # data as a hash:
480
+ #
481
+ # {
482
+ # limit: 1,
483
+ # exclusive_start_stream_name: "StreamName",
484
+ # }
485
+ # @!attribute [rw] limit
486
+ # The maximum number of streams to list.
487
+ # @return [Integer]
488
+ #
489
+ # @!attribute [rw] exclusive_start_stream_name
490
+ # The name of the stream to start the list with.
491
+ # @return [String]
492
+ class ListStreamsInput < Struct.new(
493
+ :limit,
494
+ :exclusive_start_stream_name)
495
+ include Aws::Structure
496
+ end
497
+
498
+ # Represents the output for `ListStreams`.
499
+ # @!attribute [rw] stream_names
500
+ # The names of the streams that are associated with the AWS account
501
+ # making the `ListStreams` request.
502
+ # @return [Array<String>]
503
+ #
504
+ # @!attribute [rw] has_more_streams
505
+ # If set to `true`, there are more streams available to list.
506
+ # @return [Boolean]
507
+ class ListStreamsOutput < Struct.new(
508
+ :stream_names,
509
+ :has_more_streams)
510
+ include Aws::Structure
511
+ end
512
+
513
+ # Represents the input for `ListTagsForStream`.
514
+ # @note When making an API call, pass ListTagsForStreamInput
515
+ # data as a hash:
516
+ #
517
+ # {
518
+ # stream_name: "StreamName", # required
519
+ # exclusive_start_tag_key: "TagKey",
520
+ # limit: 1,
521
+ # }
522
+ # @!attribute [rw] stream_name
523
+ # The name of the stream.
524
+ # @return [String]
525
+ #
526
+ # @!attribute [rw] exclusive_start_tag_key
527
+ # The key to use as the starting point for the list of tags. If this
528
+ # parameter is set, `ListTagsForStream` gets all tags that occur after
529
+ # `ExclusiveStartTagKey`.
530
+ # @return [String]
531
+ #
532
+ # @!attribute [rw] limit
533
+ # The number of tags to return. If this number is less than the total
534
+ # number of tags associated with the stream, `HasMoreTags` is set to
535
+ # `true`. To list additional tags, set `ExclusiveStartTagKey` to the
536
+ # last key in the response.
537
+ # @return [Integer]
538
+ class ListTagsForStreamInput < Struct.new(
539
+ :stream_name,
540
+ :exclusive_start_tag_key,
541
+ :limit)
542
+ include Aws::Structure
543
+ end
544
+
545
+ # Represents the output for `ListTagsForStream`.
546
+ # @!attribute [rw] tags
547
+ # A list of tags associated with `StreamName`, starting with the first
548
+ # tag after `ExclusiveStartTagKey` and up to the specified `Limit`.
549
+ # @return [Array<Types::Tag>]
550
+ #
551
+ # @!attribute [rw] has_more_tags
552
+ # If set to `true`, more tags are available. To request additional
553
+ # tags, set `ExclusiveStartTagKey` to the key of the last tag
554
+ # returned.
555
+ # @return [Boolean]
556
+ class ListTagsForStreamOutput < Struct.new(
557
+ :tags,
558
+ :has_more_tags)
559
+ include Aws::Structure
560
+ end
561
+
562
+ # Represents the input for `MergeShards`.
563
+ # @note When making an API call, pass MergeShardsInput
564
+ # data as a hash:
565
+ #
566
+ # {
567
+ # stream_name: "StreamName", # required
568
+ # shard_to_merge: "ShardId", # required
569
+ # adjacent_shard_to_merge: "ShardId", # required
570
+ # }
571
+ # @!attribute [rw] stream_name
572
+ # The name of the stream for the merge.
573
+ # @return [String]
574
+ #
575
+ # @!attribute [rw] shard_to_merge
576
+ # The shard ID of the shard to combine with the adjacent shard for the
577
+ # merge.
578
+ # @return [String]
579
+ #
580
+ # @!attribute [rw] adjacent_shard_to_merge
581
+ # The shard ID of the adjacent shard for the merge.
582
+ # @return [String]
583
+ class MergeShardsInput < Struct.new(
584
+ :stream_name,
585
+ :shard_to_merge,
586
+ :adjacent_shard_to_merge)
587
+ include Aws::Structure
588
+ end
589
+
590
+ # Represents the input for `PutRecord`.
591
+ # @note When making an API call, pass PutRecordInput
592
+ # data as a hash:
593
+ #
594
+ # {
595
+ # stream_name: "StreamName", # required
596
+ # data: "data", # required
597
+ # partition_key: "PartitionKey", # required
598
+ # explicit_hash_key: "HashKey",
599
+ # sequence_number_for_ordering: "SequenceNumber",
600
+ # }
601
+ # @!attribute [rw] stream_name
602
+ # The name of the stream to put the data record into.
603
+ # @return [String]
604
+ #
605
+ # @!attribute [rw] data
606
+ # The data blob to put into the record, which is base64-encoded when
607
+ # the blob is serialized. When the data blob (the payload before
608
+ # base64-encoding) is added to the partition key size, the total size
609
+ # must not exceed the maximum record size (1 MB).
610
+ # @return [String]
611
+ #
612
+ # @!attribute [rw] partition_key
613
+ # Determines which shard in the stream the data record is assigned to.
614
+ # Partition keys are Unicode strings with a maximum length limit of
615
+ # 256 characters for each key. Amazon Kinesis uses the partition key
616
+ # as input to a hash function that maps the partition key and
617
+ # associated data to a specific shard. Specifically, an MD5 hash
618
+ # function is used to map partition keys to 128-bit integer values and
619
+ # to map associated data records to shards. As a result of this
620
+ # hashing mechanism, all data records with the same partition key map
621
+ # to the same shard within the stream.
622
+ # @return [String]
623
+ #
624
+ # @!attribute [rw] explicit_hash_key
625
+ # The hash value used to explicitly determine the shard the data
626
+ # record is assigned to by overriding the partition key hash.
627
+ # @return [String]
628
+ #
629
+ # @!attribute [rw] sequence_number_for_ordering
630
+ # Guarantees strictly increasing sequence numbers, for puts from the
631
+ # same client and to the same partition key. Usage: set the
632
+ # `SequenceNumberForOrdering` of record *n* to the sequence number of
633
+ # record *n-1* (as returned in the result when putting record *n-1*).
634
+ # If this parameter is not set, records will be coarsely ordered based
635
+ # on arrival time.
636
+ # @return [String]
637
+ class PutRecordInput < Struct.new(
638
+ :stream_name,
639
+ :data,
640
+ :partition_key,
641
+ :explicit_hash_key,
642
+ :sequence_number_for_ordering)
643
+ include Aws::Structure
644
+ end
645
+
646
+ # Represents the output for `PutRecord`.
647
+ # @!attribute [rw] shard_id
648
+ # The shard ID of the shard where the data record was placed.
649
+ # @return [String]
650
+ #
651
+ # @!attribute [rw] sequence_number
652
+ # The sequence number identifier that was assigned to the put data
653
+ # record. The sequence number for the record is unique across all
654
+ # records in the stream. A sequence number is the identifier
655
+ # associated with every record put into the stream.
656
+ # @return [String]
657
+ class PutRecordOutput < Struct.new(
658
+ :shard_id,
659
+ :sequence_number)
660
+ include Aws::Structure
661
+ end
662
+
663
+ # A `PutRecords` request.
664
+ # @note When making an API call, pass PutRecordsInput
665
+ # data as a hash:
666
+ #
667
+ # {
668
+ # records: [ # required
669
+ # {
670
+ # data: "data", # required
671
+ # explicit_hash_key: "HashKey",
672
+ # partition_key: "PartitionKey", # required
673
+ # },
674
+ # ],
675
+ # stream_name: "StreamName", # required
676
+ # }
677
+ # @!attribute [rw] records
678
+ # The records associated with the request.
679
+ # @return [Array<Types::PutRecordsRequestEntry>]
680
+ #
681
+ # @!attribute [rw] stream_name
682
+ # The stream name associated with the request.
683
+ # @return [String]
684
+ class PutRecordsInput < Struct.new(
685
+ :records,
686
+ :stream_name)
687
+ include Aws::Structure
688
+ end
689
+
690
+ # `PutRecords` results.
691
+ # @!attribute [rw] failed_record_count
692
+ # The number of unsuccessfully processed records in a `PutRecords`
693
+ # request.
694
+ # @return [Integer]
695
+ #
696
+ # @!attribute [rw] records
697
+ # An array of successfully and unsuccessfully processed record
698
+ # results, correlated with the request by natural ordering. A record
699
+ # that is successfully added to a stream includes `SequenceNumber` and
700
+ # `ShardId` in the result. A record that fails to be added to a stream
701
+ # includes `ErrorCode` and `ErrorMessage` in the result.
702
+ # @return [Array<Types::PutRecordsResultEntry>]
703
+ class PutRecordsOutput < Struct.new(
704
+ :failed_record_count,
705
+ :records)
706
+ include Aws::Structure
707
+ end
708
+
709
+ # Represents the output for `PutRecords`.
710
+ # @note When making an API call, pass PutRecordsRequestEntry
711
+ # data as a hash:
712
+ #
713
+ # {
714
+ # data: "data", # required
715
+ # explicit_hash_key: "HashKey",
716
+ # partition_key: "PartitionKey", # required
717
+ # }
718
+ # @!attribute [rw] data
719
+ # The data blob to put into the record, which is base64-encoded when
720
+ # the blob is serialized. When the data blob (the payload before
721
+ # base64-encoding) is added to the partition key size, the total size
722
+ # must not exceed the maximum record size (1 MB).
723
+ # @return [String]
724
+ #
725
+ # @!attribute [rw] explicit_hash_key
726
+ # The hash value used to determine explicitly the shard that the data
727
+ # record is assigned to by overriding the partition key hash.
728
+ # @return [String]
729
+ #
730
+ # @!attribute [rw] partition_key
731
+ # Determines which shard in the stream the data record is assigned to.
732
+ # Partition keys are Unicode strings with a maximum length limit of
733
+ # 256 characters for each key. Amazon Kinesis uses the partition key
734
+ # as input to a hash function that maps the partition key and
735
+ # associated data to a specific shard. Specifically, an MD5 hash
736
+ # function is used to map partition keys to 128-bit integer values and
737
+ # to map associated data records to shards. As a result of this
738
+ # hashing mechanism, all data records with the same partition key map
739
+ # to the same shard within the stream.
740
+ # @return [String]
741
+ class PutRecordsRequestEntry < Struct.new(
742
+ :data,
743
+ :explicit_hash_key,
744
+ :partition_key)
745
+ include Aws::Structure
746
+ end
747
+
748
+ # Represents the result of an individual record from a `PutRecords`
749
+ # request. A record that is successfully added to a stream includes
750
+ # `SequenceNumber` and `ShardId` in the result. A record that fails to
751
+ # be added to the stream includes `ErrorCode` and `ErrorMessage` in the
752
+ # result.
753
+ # @!attribute [rw] sequence_number
754
+ # The sequence number for an individual record result.
755
+ # @return [String]
756
+ #
757
+ # @!attribute [rw] shard_id
758
+ # The shard ID for an individual record result.
759
+ # @return [String]
760
+ #
761
+ # @!attribute [rw] error_code
762
+ # The error code for an individual record result. `ErrorCodes` can be
763
+ # either `ProvisionedThroughputExceededException` or
764
+ # `InternalFailure`.
765
+ # @return [String]
766
+ #
767
+ # @!attribute [rw] error_message
768
+ # The error message for an individual record result. An `ErrorCode`
769
+ # value of `ProvisionedThroughputExceededException` has an error
770
+ # message that includes the account ID, stream name, and shard ID. An
771
+ # `ErrorCode` value of `InternalFailure` has the error message
772
+ # `"Internal Service Failure"`.
773
+ # @return [String]
774
+ class PutRecordsResultEntry < Struct.new(
775
+ :sequence_number,
776
+ :shard_id,
777
+ :error_code,
778
+ :error_message)
779
+ include Aws::Structure
780
+ end
781
+
782
+ # The unit of data of the Amazon Kinesis stream, which is composed of a
783
+ # sequence number, a partition key, and a data blob.
784
+ # @!attribute [rw] sequence_number
785
+ # The unique identifier of the record in the stream.
786
+ # @return [String]
787
+ #
788
+ # @!attribute [rw] approximate_arrival_timestamp
789
+ # The approximate time that the record was inserted into the stream.
790
+ # @return [Time]
791
+ #
792
+ # @!attribute [rw] data
793
+ # The data blob. The data in the blob is both opaque and immutable to
794
+ # the Amazon Kinesis service, which does not inspect, interpret, or
795
+ # change the data in the blob in any way. When the data blob (the
796
+ # payload before base64-encoding) is added to the partition key size,
797
+ # the total size must not exceed the maximum record size (1 MB).
798
+ # @return [String]
799
+ #
800
+ # @!attribute [rw] partition_key
801
+ # Identifies which shard in the stream the data record is assigned to.
802
+ # @return [String]
803
+ class Record < Struct.new(
804
+ :sequence_number,
805
+ :approximate_arrival_timestamp,
806
+ :data,
807
+ :partition_key)
808
+ include Aws::Structure
809
+ end
810
+
811
+ # Represents the input for `RemoveTagsFromStream`.
812
+ # @note When making an API call, pass RemoveTagsFromStreamInput
813
+ # data as a hash:
814
+ #
815
+ # {
816
+ # stream_name: "StreamName", # required
817
+ # tag_keys: ["TagKey"], # required
818
+ # }
819
+ # @!attribute [rw] stream_name
820
+ # The name of the stream.
821
+ # @return [String]
822
+ #
823
+ # @!attribute [rw] tag_keys
824
+ # A list of tag keys. Each corresponding tag is removed from the
825
+ # stream.
826
+ # @return [Array<String>]
827
+ class RemoveTagsFromStreamInput < Struct.new(
828
+ :stream_name,
829
+ :tag_keys)
830
+ include Aws::Structure
831
+ end
832
+
833
+ # The range of possible sequence numbers for the shard.
834
+ # @!attribute [rw] starting_sequence_number
835
+ # The starting sequence number for the range.
836
+ # @return [String]
837
+ #
838
+ # @!attribute [rw] ending_sequence_number
839
+ # The ending sequence number for the range. Shards that are in the
840
+ # OPEN state have an ending sequence number of `null`.
841
+ # @return [String]
842
+ class SequenceNumberRange < Struct.new(
843
+ :starting_sequence_number,
844
+ :ending_sequence_number)
845
+ include Aws::Structure
846
+ end
847
+
848
+ # A uniquely identified group of data records in an Amazon Kinesis
849
+ # stream.
850
+ # @!attribute [rw] shard_id
851
+ # The unique identifier of the shard within the stream.
852
+ # @return [String]
853
+ #
854
+ # @!attribute [rw] parent_shard_id
855
+ # The shard ID of the shard's parent.
856
+ # @return [String]
857
+ #
858
+ # @!attribute [rw] adjacent_parent_shard_id
859
+ # The shard ID of the shard adjacent to the shard's parent.
860
+ # @return [String]
861
+ #
862
+ # @!attribute [rw] hash_key_range
863
+ # The range of possible hash key values for the shard, which is a set
864
+ # of ordered contiguous positive integers.
865
+ # @return [Types::HashKeyRange]
866
+ #
867
+ # @!attribute [rw] sequence_number_range
868
+ # The range of possible sequence numbers for the shard.
869
+ # @return [Types::SequenceNumberRange]
870
+ class Shard < Struct.new(
871
+ :shard_id,
872
+ :parent_shard_id,
873
+ :adjacent_parent_shard_id,
874
+ :hash_key_range,
875
+ :sequence_number_range)
876
+ include Aws::Structure
877
+ end
878
+
879
+ # Represents the input for `SplitShard`.
880
+ # @note When making an API call, pass SplitShardInput
881
+ # data as a hash:
882
+ #
883
+ # {
884
+ # stream_name: "StreamName", # required
885
+ # shard_to_split: "ShardId", # required
886
+ # new_starting_hash_key: "HashKey", # required
887
+ # }
888
+ # @!attribute [rw] stream_name
889
+ # The name of the stream for the shard split.
890
+ # @return [String]
891
+ #
892
+ # @!attribute [rw] shard_to_split
893
+ # The shard ID of the shard to split.
894
+ # @return [String]
895
+ #
896
+ # @!attribute [rw] new_starting_hash_key
897
+ # A hash key value for the starting hash key of one of the child
898
+ # shards created by the split. The hash key range for a given shard
899
+ # constitutes a set of ordered contiguous positive integers. The value
900
+ # for `NewStartingHashKey` must be in the range of hash keys being
901
+ # mapped into the shard. The `NewStartingHashKey` hash key value and
902
+ # all higher hash key values in hash key range are distributed to one
903
+ # of the child shards. All the lower hash key values in the range are
904
+ # distributed to the other child shard.
905
+ # @return [String]
906
+ class SplitShardInput < Struct.new(
907
+ :stream_name,
908
+ :shard_to_split,
909
+ :new_starting_hash_key)
910
+ include Aws::Structure
911
+ end
912
+
913
+ # Represents the output for DescribeStream.
914
+ # @!attribute [rw] stream_name
915
+ # The name of the stream being described.
916
+ # @return [String]
917
+ #
918
+ # @!attribute [rw] stream_arn
919
+ # The Amazon Resource Name (ARN) for the stream being described.
920
+ # @return [String]
921
+ #
922
+ # @!attribute [rw] stream_status
923
+ # The current status of the stream being described. The stream status
924
+ # is one of the following states:
925
+ #
926
+ # * `CREATING` - The stream is being created. Amazon Kinesis
927
+ # immediately returns and sets `StreamStatus` to `CREATING`.
928
+ #
929
+ # * `DELETING` - The stream is being deleted. The specified stream is
930
+ # in the `DELETING` state until Amazon Kinesis completes the
931
+ # deletion.
932
+ #
933
+ # * `ACTIVE` - The stream exists and is ready for read and write
934
+ # operations or deletion. You should perform read and write
935
+ # operations only on an `ACTIVE` stream.
936
+ #
937
+ # * `UPDATING` - Shards in the stream are being merged or split. Read
938
+ # and write operations continue to work while the stream is in the
939
+ # `UPDATING` state.
940
+ # @return [String]
941
+ #
942
+ # @!attribute [rw] shards
943
+ # The shards that comprise the stream.
944
+ # @return [Array<Types::Shard>]
945
+ #
946
+ # @!attribute [rw] has_more_shards
947
+ # If set to `true`, more shards in the stream are available to
948
+ # describe.
949
+ # @return [Boolean]
950
+ #
951
+ # @!attribute [rw] retention_period_hours
952
+ # The current retention period, in hours.
953
+ # @return [Integer]
954
+ #
955
+ # @!attribute [rw] stream_creation_timestamp
956
+ # The approximate time that the stream was created.
957
+ # @return [Time]
958
+ #
959
+ # @!attribute [rw] enhanced_monitoring
960
+ # Represents the current enhanced monitoring settings of the stream.
961
+ # @return [Array<Types::EnhancedMetrics>]
962
+ class StreamDescription < Struct.new(
963
+ :stream_name,
964
+ :stream_arn,
965
+ :stream_status,
966
+ :shards,
967
+ :has_more_shards,
968
+ :retention_period_hours,
969
+ :stream_creation_timestamp,
970
+ :enhanced_monitoring)
971
+ include Aws::Structure
972
+ end
973
+
974
+ # Metadata assigned to the stream, consisting of a key-value pair.
975
+ # @!attribute [rw] key
976
+ # A unique identifier for the tag. Maximum length: 128 characters.
977
+ # Valid characters: Unicode letters, digits, white space, \_ . / = + -
978
+ # % @
979
+ # @return [String]
980
+ #
981
+ # @!attribute [rw] value
982
+ # An optional string, typically used to describe or define the tag.
983
+ # Maximum length: 256 characters. Valid characters: Unicode letters,
984
+ # digits, white space, \_ . / = + - % @
985
+ # @return [String]
986
+ class Tag < Struct.new(
987
+ :key,
988
+ :value)
989
+ include Aws::Structure
990
+ end
991
+
992
+ # @note When making an API call, pass UpdateShardCountInput
993
+ # data as a hash:
994
+ #
995
+ # {
996
+ # stream_name: "StreamName", # required
997
+ # target_shard_count: 1, # required
998
+ # scaling_type: "UNIFORM_SCALING", # required, accepts UNIFORM_SCALING
999
+ # }
1000
+ # @!attribute [rw] stream_name
1001
+ # The name of the stream.
1002
+ # @return [String]
1003
+ #
1004
+ # @!attribute [rw] target_shard_count
1005
+ # The new number of shards.
1006
+ # @return [Integer]
1007
+ #
1008
+ # @!attribute [rw] scaling_type
1009
+ # The scaling type. Uniform scaling creates shards of equal size.
1010
+ # @return [String]
1011
+ class UpdateShardCountInput < Struct.new(
1012
+ :stream_name,
1013
+ :target_shard_count,
1014
+ :scaling_type)
1015
+ include Aws::Structure
1016
+ end
1017
+
1018
+ # @!attribute [rw] stream_name
1019
+ # The name of the stream.
1020
+ # @return [String]
1021
+ #
1022
+ # @!attribute [rw] current_shard_count
1023
+ # The current number of shards.
1024
+ # @return [Integer]
1025
+ #
1026
+ # @!attribute [rw] target_shard_count
1027
+ # The updated number of shards.
1028
+ # @return [Integer]
1029
+ class UpdateShardCountOutput < Struct.new(
1030
+ :stream_name,
1031
+ :current_shard_count,
1032
+ :target_shard_count)
1033
+ include Aws::Structure
1034
+ end
1035
+
1036
+ end
1037
+ end
1038
+ end