@aws-sdk/client-kinesis 3.231.0 → 3.232.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/dist-cjs/commands/AddTagsToStreamCommand.js +2 -0
  2. package/dist-cjs/commands/DecreaseStreamRetentionPeriodCommand.js +2 -0
  3. package/dist-cjs/commands/DeleteStreamCommand.js +2 -0
  4. package/dist-cjs/commands/DeregisterStreamConsumerCommand.js +3 -0
  5. package/dist-cjs/commands/DescribeStreamCommand.js +2 -0
  6. package/dist-cjs/commands/DescribeStreamConsumerCommand.js +3 -0
  7. package/dist-cjs/commands/DescribeStreamSummaryCommand.js +2 -0
  8. package/dist-cjs/commands/DisableEnhancedMonitoringCommand.js +2 -0
  9. package/dist-cjs/commands/EnableEnhancedMonitoringCommand.js +2 -0
  10. package/dist-cjs/commands/GetRecordsCommand.js +2 -0
  11. package/dist-cjs/commands/GetShardIteratorCommand.js +2 -0
  12. package/dist-cjs/commands/IncreaseStreamRetentionPeriodCommand.js +2 -0
  13. package/dist-cjs/commands/ListShardsCommand.js +2 -0
  14. package/dist-cjs/commands/ListStreamConsumersCommand.js +2 -0
  15. package/dist-cjs/commands/ListTagsForStreamCommand.js +2 -0
  16. package/dist-cjs/commands/MergeShardsCommand.js +2 -0
  17. package/dist-cjs/commands/PutRecordCommand.js +2 -0
  18. package/dist-cjs/commands/PutRecordsCommand.js +2 -0
  19. package/dist-cjs/commands/RegisterStreamConsumerCommand.js +2 -0
  20. package/dist-cjs/commands/RemoveTagsFromStreamCommand.js +2 -0
  21. package/dist-cjs/commands/SplitShardCommand.js +2 -0
  22. package/dist-cjs/commands/StartStreamEncryptionCommand.js +2 -0
  23. package/dist-cjs/commands/StopStreamEncryptionCommand.js +2 -0
  24. package/dist-cjs/commands/SubscribeToShardCommand.js +2 -0
  25. package/dist-cjs/commands/UpdateShardCountCommand.js +2 -0
  26. package/dist-cjs/commands/UpdateStreamModeCommand.js +2 -0
  27. package/dist-cjs/endpoint/ruleset.js +1069 -5
  28. package/dist-cjs/models/models_0.js +19 -2
  29. package/dist-cjs/pagination/ListStreamsPaginator.js +36 -0
  30. package/dist-cjs/pagination/index.js +1 -0
  31. package/dist-cjs/protocols/Aws_json1_1.js +143 -0
  32. package/dist-es/commands/AddTagsToStreamCommand.js +2 -0
  33. package/dist-es/commands/DecreaseStreamRetentionPeriodCommand.js +2 -0
  34. package/dist-es/commands/DeleteStreamCommand.js +2 -0
  35. package/dist-es/commands/DeregisterStreamConsumerCommand.js +3 -0
  36. package/dist-es/commands/DescribeStreamCommand.js +2 -0
  37. package/dist-es/commands/DescribeStreamConsumerCommand.js +3 -0
  38. package/dist-es/commands/DescribeStreamSummaryCommand.js +2 -0
  39. package/dist-es/commands/DisableEnhancedMonitoringCommand.js +2 -0
  40. package/dist-es/commands/EnableEnhancedMonitoringCommand.js +2 -0
  41. package/dist-es/commands/GetRecordsCommand.js +2 -0
  42. package/dist-es/commands/GetShardIteratorCommand.js +2 -0
  43. package/dist-es/commands/IncreaseStreamRetentionPeriodCommand.js +2 -0
  44. package/dist-es/commands/ListShardsCommand.js +2 -0
  45. package/dist-es/commands/ListStreamConsumersCommand.js +2 -0
  46. package/dist-es/commands/ListTagsForStreamCommand.js +2 -0
  47. package/dist-es/commands/MergeShardsCommand.js +2 -0
  48. package/dist-es/commands/PutRecordCommand.js +2 -0
  49. package/dist-es/commands/PutRecordsCommand.js +2 -0
  50. package/dist-es/commands/RegisterStreamConsumerCommand.js +2 -0
  51. package/dist-es/commands/RemoveTagsFromStreamCommand.js +2 -0
  52. package/dist-es/commands/SplitShardCommand.js +2 -0
  53. package/dist-es/commands/StartStreamEncryptionCommand.js +2 -0
  54. package/dist-es/commands/StopStreamEncryptionCommand.js +2 -0
  55. package/dist-es/commands/SubscribeToShardCommand.js +2 -0
  56. package/dist-es/commands/UpdateShardCountCommand.js +2 -0
  57. package/dist-es/commands/UpdateStreamModeCommand.js +2 -0
  58. package/dist-es/endpoint/ruleset.js +1069 -5
  59. package/dist-es/models/models_0.js +15 -0
  60. package/dist-es/pagination/ListStreamsPaginator.js +32 -0
  61. package/dist-es/pagination/index.js +1 -0
  62. package/dist-es/protocols/Aws_json1_1.js +144 -1
  63. package/dist-types/Kinesis.d.ts +213 -138
  64. package/dist-types/KinesisClient.d.ts +1 -1
  65. package/dist-types/commands/AddTagsToStreamCommand.d.ts +6 -2
  66. package/dist-types/commands/CreateStreamCommand.d.ts +19 -16
  67. package/dist-types/commands/DecreaseStreamRetentionPeriodCommand.d.ts +5 -1
  68. package/dist-types/commands/DeleteStreamCommand.d.ts +9 -6
  69. package/dist-types/commands/DeregisterStreamConsumerCommand.d.ts +1 -1
  70. package/dist-types/commands/DescribeLimitsCommand.d.ts +2 -2
  71. package/dist-types/commands/DescribeStreamCommand.d.ts +10 -7
  72. package/dist-types/commands/DescribeStreamConsumerCommand.d.ts +1 -1
  73. package/dist-types/commands/DescribeStreamSummaryCommand.d.ts +6 -2
  74. package/dist-types/commands/DisableEnhancedMonitoringCommand.d.ts +4 -0
  75. package/dist-types/commands/EnableEnhancedMonitoringCommand.d.ts +4 -0
  76. package/dist-types/commands/GetRecordsCommand.d.ts +11 -16
  77. package/dist-types/commands/GetShardIteratorCommand.d.ts +10 -6
  78. package/dist-types/commands/IncreaseStreamRetentionPeriodCommand.d.ts +5 -1
  79. package/dist-types/commands/ListShardsCommand.d.ts +7 -3
  80. package/dist-types/commands/ListStreamConsumersCommand.d.ts +1 -1
  81. package/dist-types/commands/ListStreamsCommand.d.ts +3 -3
  82. package/dist-types/commands/ListTagsForStreamCommand.d.ts +4 -0
  83. package/dist-types/commands/MergeShardsCommand.d.ts +13 -8
  84. package/dist-types/commands/PutRecordCommand.d.ts +14 -10
  85. package/dist-types/commands/PutRecordsCommand.d.ts +16 -12
  86. package/dist-types/commands/RegisterStreamConsumerCommand.d.ts +3 -3
  87. package/dist-types/commands/RemoveTagsFromStreamCommand.d.ts +6 -2
  88. package/dist-types/commands/SplitShardCommand.d.ts +15 -10
  89. package/dist-types/commands/StartStreamEncryptionCommand.d.ts +7 -3
  90. package/dist-types/commands/StopStreamEncryptionCommand.d.ts +7 -3
  91. package/dist-types/commands/SubscribeToShardCommand.d.ts +4 -4
  92. package/dist-types/commands/UpdateShardCountCommand.d.ts +19 -14
  93. package/dist-types/endpoint/EndpointParameters.d.ts +4 -1
  94. package/dist-types/models/models_0.d.ts +351 -214
  95. package/dist-types/pagination/ListStreamsPaginator.d.ts +4 -0
  96. package/dist-types/pagination/index.d.ts +1 -0
  97. package/dist-types/ts3.4/endpoint/EndpointParameters.d.ts +4 -1
  98. package/dist-types/ts3.4/models/models_0.d.ts +60 -18
  99. package/dist-types/ts3.4/pagination/ListStreamsPaginator.d.ts +11 -0
  100. package/dist-types/ts3.4/pagination/index.d.ts +1 -0
  101. package/package.json +1 -1
@@ -11,45 +11,49 @@ export interface PutRecordsCommandOutput extends PutRecordsOutput, __MetadataBea
11
11
  * <p>Writes multiple data records into a Kinesis data stream in a single call (also
12
12
  * referred to as a <code>PutRecords</code> request). Use this operation to send data into
13
13
  * the stream for data ingestion and processing. </p>
14
- * <p>Each <code>PutRecords</code> request can support up to 500 records. Each record in the
14
+ * <note>
15
+ * <p>When invoking this API, it is recommended you use the <code>StreamARN</code> input
16
+ * parameter rather than the <code>StreamName</code> input parameter.</p>
17
+ * </note>
18
+ * <p>Each <code>PutRecords</code> request can support up to 500 records. Each record in the
15
19
  * request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,
16
20
  * including partition keys. Each shard can support writes up to 1,000 records per second,
17
21
  * up to a maximum data write total of 1 MiB per second.</p>
18
- * <p>You must specify the name of the stream that captures, stores, and transports the
22
+ * <p>You must specify the name of the stream that captures, stores, and transports the
19
23
  * data; and an array of request <code>Records</code>, with each record in the array
20
24
  * requiring a partition key and data blob. The record size limit applies to the total size
21
25
  * of the partition key and data blob.</p>
22
- * <p>The data blob can be any type of data; for example, a segment from a log file,
26
+ * <p>The data blob can be any type of data; for example, a segment from a log file,
23
27
  * geographic/location data, website clickstream data, and so on.</p>
24
- * <p>The partition key is used by Kinesis Data Streams as input to a hash function that
28
+ * <p>The partition key is used by Kinesis Data Streams as input to a hash function that
25
29
  * maps the partition key and associated data to a specific shard. An MD5 hash function is
26
30
  * used to map partition keys to 128-bit integer values and to map associated data records
27
31
  * to shards. As a result of this hashing mechanism, all data records with the same
28
32
  * partition key map to the same shard within the stream. For more information, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream">Adding Data to a Stream</a> in the <i>Amazon Kinesis Data Streams
29
33
  * Developer Guide</i>.</p>
30
- * <p>Each record in the <code>Records</code> array may include an optional parameter,
34
+ * <p>Each record in the <code>Records</code> array may include an optional parameter,
31
35
  * <code>ExplicitHashKey</code>, which overrides the partition key to shard mapping.
32
36
  * This parameter allows a data producer to determine explicitly the shard where the record
33
37
  * is stored. For more information, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords">Adding Multiple Records with PutRecords</a> in the <i>Amazon Kinesis
34
38
  * Data Streams Developer Guide</i>.</p>
35
- * <p>The <code>PutRecords</code> response includes an array of response
39
+ * <p>The <code>PutRecords</code> response includes an array of response
36
40
  * <code>Records</code>. Each record in the response array directly correlates with a
37
41
  * record in the request array using natural ordering, from the top to the bottom of the
38
42
  * request and response. The response <code>Records</code> array always includes the same
39
43
  * number of records as the request array.</p>
40
- * <p>The response <code>Records</code> array includes both successfully and unsuccessfully
44
+ * <p>The response <code>Records</code> array includes both successfully and unsuccessfully
41
45
  * processed records. Kinesis Data Streams attempts to process all records in each
42
46
  * <code>PutRecords</code> request. A single record failure does not stop the
43
47
  * processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering
44
48
  * of records. If you need to read records in the same order they are written to the
45
49
  * stream, use <a>PutRecord</a> instead of <code>PutRecords</code>, and write to
46
50
  * the same shard.</p>
47
- * <p>A successfully processed record includes <code>ShardId</code> and
51
+ * <p>A successfully processed record includes <code>ShardId</code> and
48
52
  * <code>SequenceNumber</code> values. The <code>ShardId</code> parameter identifies
49
53
  * the shard in the stream where the record is stored. The <code>SequenceNumber</code>
50
54
  * parameter is an identifier assigned to the put record, unique to all records in the
51
55
  * stream.</p>
52
- * <p>An unsuccessfully processed record includes <code>ErrorCode</code> and
56
+ * <p>An unsuccessfully processed record includes <code>ErrorCode</code> and
53
57
  * <code>ErrorMessage</code> values. <code>ErrorCode</code> reflects the type of error
54
58
  * and can be one of the following values:
55
59
  * <code>ProvisionedThroughputExceededException</code> or <code>InternalFailure</code>.
@@ -58,11 +62,11 @@ export interface PutRecordsCommandOutput extends PutRecordsOutput, __MetadataBea
58
62
  * ID, stream name, and shard ID of the record that was throttled. For more information
59
63
  * about partially successful responses, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords">Adding Multiple Records with PutRecords</a> in the <i>Amazon Kinesis
60
64
  * Data Streams Developer Guide</i>.</p>
61
- * <important>
65
+ * <important>
62
66
  * <p>After you write a record to a stream, you cannot modify that record or its order
63
67
  * within the stream.</p>
64
- * </important>
65
- * <p>By default, data records are accessible for 24 hours from the time that they are added
68
+ * </important>
69
+ * <p>By default, data records are accessible for 24 hours from the time that they are added
66
70
  * to a stream. You can use <a>IncreaseStreamRetentionPeriod</a> or <a>DecreaseStreamRetentionPeriod</a> to modify this retention period.</p>
67
71
  * @example
68
72
  * Use a bare-bones client and the command you need to make an API call.
@@ -13,11 +13,11 @@ export interface RegisterStreamConsumerCommandOutput extends RegisterStreamConsu
13
13
  * from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every
14
14
  * shard you subscribe to. This rate is unaffected by the total number of consumers that
15
15
  * read from the same stream.</p>
16
- * <p>You can register up to 20 consumers per stream. A given consumer can only be
16
+ * <p>You can register up to 20 consumers per stream. A given consumer can only be
17
17
  * registered with one stream at a time.</p>
18
- * <p>For an example of how to use this operations, see <a href="/streams/latest/dev/building-enhanced-consumers-api.html">Enhanced Fan-Out
18
+ * <p>For an example of how to use this operations, see <a href="/streams/latest/dev/building-enhanced-consumers-api.html">Enhanced Fan-Out
19
19
  * Using the Kinesis Data Streams API</a>.</p>
20
- * <p>The use of this operation has a limit of five transactions per second per account.
20
+ * <p>The use of this operation has a limit of five transactions per second per account.
21
21
  * Also, only 5 consumers can be created simultaneously. In other words, you cannot have
22
22
  * more than 5 consumers in a <code>CREATING</code> status at the same time. Registering a
23
23
  * 6th consumer while there are 5 in a <code>CREATING</code> status results in a
@@ -10,8 +10,12 @@ export interface RemoveTagsFromStreamCommandOutput extends __MetadataBearer {
10
10
  /**
11
11
  * <p>Removes tags from the specified Kinesis data stream. Removed tags are deleted and
12
12
  * cannot be recovered after this operation successfully completes.</p>
13
- * <p>If you specify a tag that does not exist, it is ignored.</p>
14
- * <p>
13
+ * <note>
14
+ * <p>When invoking this API, it is recommended you use the <code>StreamARN</code> input
15
+ * parameter rather than the <code>StreamName</code> input parameter.</p>
16
+ * </note>
17
+ * <p>If you specify a tag that does not exist, it is ignored.</p>
18
+ * <p>
15
19
  * <a>RemoveTagsFromStream</a> has a limit of five transactions per second per
16
20
  * account.</p>
17
21
  * @example
@@ -11,42 +11,47 @@ export interface SplitShardCommandOutput extends __MetadataBearer {
11
11
  * <p>Splits a shard into two new shards in the Kinesis data stream, to increase the
12
12
  * stream's capacity to ingest and transport data. <code>SplitShard</code> is called when
13
13
  * there is a need to increase the overall capacity of a stream because of an expected
14
- * increase in the volume of data records being ingested. </p>
15
- * <p>You can also use <code>SplitShard</code> when a shard appears to be approaching its
14
+ * increase in the volume of data records being ingested. This API is only supported for
15
+ * the data streams with the provisioned capacity mode.</p>
16
+ * <note>
17
+ * <p>When invoking this API, it is recommended you use the <code>StreamARN</code> input
18
+ * parameter rather than the <code>StreamName</code> input parameter.</p>
19
+ * </note>
20
+ * <p>You can also use <code>SplitShard</code> when a shard appears to be approaching its
16
21
  * maximum utilization; for example, the producers sending data into the specific shard are
17
22
  * suddenly sending more than previously anticipated. You can also call
18
23
  * <code>SplitShard</code> to increase stream capacity, so that more Kinesis Data
19
24
  * Streams applications can simultaneously read data from the stream for real-time
20
25
  * processing. </p>
21
- * <p>You must specify the shard to be split and the new hash key, which is the position in
26
+ * <p>You must specify the shard to be split and the new hash key, which is the position in
22
27
  * the shard where the shard gets split in two. In many cases, the new hash key might be
23
28
  * the average of the beginning and ending hash key, but it can be any hash key value in
24
29
  * the range being mapped into the shard. For more information, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html">Split a
25
30
  * Shard</a> in the <i>Amazon Kinesis Data Streams Developer
26
31
  * Guide</i>.</p>
27
- * <p>You can use <a>DescribeStreamSummary</a> and the <a>ListShards</a> APIs to determine the shard ID and hash key values for the <code>ShardToSplit</code>
32
+ * <p>You can use <a>DescribeStreamSummary</a> and the <a>ListShards</a> APIs to determine the shard ID and hash key values for the <code>ShardToSplit</code>
28
33
  * and <code>NewStartingHashKey</code> parameters that are specified in the
29
34
  * <code>SplitShard</code> request.</p>
30
- * <p>
35
+ * <p>
31
36
  * <code>SplitShard</code> is an asynchronous operation. Upon receiving a
32
37
  * <code>SplitShard</code> request, Kinesis Data Streams immediately returns a response
33
38
  * and sets the stream status to <code>UPDATING</code>. After the operation is completed,
34
39
  * Kinesis Data Streams sets the stream status to <code>ACTIVE</code>. Read and write
35
40
  * operations continue to work while the stream is in the <code>UPDATING</code> state. </p>
36
- * <p>You can use <a>DescribeStreamSummary</a> to check the status of the stream,
41
+ * <p>You can use <a>DescribeStreamSummary</a> to check the status of the stream,
37
42
  * which is returned in <code>StreamStatus</code>. If the stream is in the
38
43
  * <code>ACTIVE</code> state, you can call <code>SplitShard</code>.
39
44
  * </p>
40
- * <p>If the specified stream does not exist, <a>DescribeStreamSummary</a>
45
+ * <p>If the specified stream does not exist, <a>DescribeStreamSummary</a>
41
46
  * returns a <code>ResourceNotFoundException</code>. If you try to create more shards than
42
47
  * are authorized for your account, you receive a <code>LimitExceededException</code>. </p>
43
- * <p>For the default shard limit for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Kinesis
48
+ * <p>For the default shard limit for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Kinesis
44
49
  * Data Streams Limits</a> in the <i>Amazon Kinesis Data Streams Developer
45
50
  * Guide</i>. To increase this limit, <a href="https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html">contact Amazon Web Services
46
51
  * Support</a>.</p>
47
- * <p>If you try to operate on too many streams simultaneously using <a>CreateStream</a>, <a>DeleteStream</a>, <a>MergeShards</a>, and/or <a>SplitShard</a>, you receive a
52
+ * <p>If you try to operate on too many streams simultaneously using <a>CreateStream</a>, <a>DeleteStream</a>, <a>MergeShards</a>, and/or <a>SplitShard</a>, you receive a
48
53
  * <code>LimitExceededException</code>. </p>
49
- * <p>
54
+ * <p>
50
55
  * <code>SplitShard</code> has a limit of five transactions per second per account.</p>
51
56
  * @example
52
57
  * Use a bare-bones client and the command you need to make an API call.
@@ -10,7 +10,7 @@ export interface StartStreamEncryptionCommandOutput extends __MetadataBearer {
10
10
  /**
11
11
  * <p>Enables or updates server-side encryption using an Amazon Web Services KMS key for a
12
12
  * specified stream. </p>
13
- * <p>Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis
13
+ * <p>Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis
14
14
  * Data Streams returns immediately and sets the status of the stream to
15
15
  * <code>UPDATING</code>. After the update is complete, Kinesis Data Streams sets the
16
16
  * status of the stream back to <code>ACTIVE</code>. Updating or applying encryption
@@ -18,12 +18,16 @@ export interface StartStreamEncryptionCommandOutput extends __MetadataBearer {
18
18
  * read and write data to your stream while its status is <code>UPDATING</code>. Once the
19
19
  * status of the stream is <code>ACTIVE</code>, encryption begins for records written to
20
20
  * the stream. </p>
21
- * <p>API Limits: You can successfully apply a new Amazon Web Services KMS key for
21
+ * <p>API Limits: You can successfully apply a new Amazon Web Services KMS key for
22
22
  * server-side encryption 25 times in a rolling 24-hour period.</p>
23
- * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status
23
+ * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status
24
24
  * before all records written to the stream are encrypted. After you enable encryption, you
25
25
  * can verify that encryption is applied by inspecting the API response from
26
26
  * <code>PutRecord</code> or <code>PutRecords</code>.</p>
27
+ * <note>
28
+ * <p>When invoking this API, it is recommended you use the <code>StreamARN</code> input
29
+ * parameter rather than the <code>StreamName</code> input parameter.</p>
30
+ * </note>
27
31
  * @example
28
32
  * Use a bare-bones client and the command you need to make an API call.
29
33
  * ```javascript
@@ -9,7 +9,11 @@ export interface StopStreamEncryptionCommandOutput extends __MetadataBearer {
9
9
  }
10
10
  /**
11
11
  * <p>Disables server-side encryption for a specified stream. </p>
12
- * <p>Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis
12
+ * <note>
13
+ * <p>When invoking this API, it is recommended you use the <code>StreamARN</code> input
14
+ * parameter rather than the <code>StreamName</code> input parameter.</p>
15
+ * </note>
16
+ * <p>Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis
13
17
  * Data Streams returns immediately and sets the status of the stream to
14
18
  * <code>UPDATING</code>. After the update is complete, Kinesis Data Streams sets the
15
19
  * status of the stream back to <code>ACTIVE</code>. Stopping encryption normally takes a
@@ -17,9 +21,9 @@ export interface StopStreamEncryptionCommandOutput extends __MetadataBearer {
17
21
  * data to your stream while its status is <code>UPDATING</code>. Once the status of the
18
22
  * stream is <code>ACTIVE</code>, records written to the stream are no longer encrypted by
19
23
  * Kinesis Data Streams. </p>
20
- * <p>API Limits: You can successfully disable server-side encryption 25 times in a rolling
24
+ * <p>API Limits: You can successfully disable server-side encryption 25 times in a rolling
21
25
  * 24-hour period. </p>
22
- * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status
26
+ * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status
23
27
  * before all records written to the stream are no longer subject to encryption. After you
24
28
  * disabled encryption, you can verify that encryption is not applied by inspecting the API
25
29
  * response from <code>PutRecord</code> or <code>PutRecords</code>.</p>
@@ -14,22 +14,22 @@ export interface SubscribeToShardCommandOutput extends SubscribeToShardOutput, _
14
14
  * Kinesis Data Streams pushes records from the shard to the consumer over this connection.
15
15
  * Before you call this operation, call <a>RegisterStreamConsumer</a> to
16
16
  * register the consumer with Kinesis Data Streams.</p>
17
- * <p>When the <code>SubscribeToShard</code> call succeeds, your consumer starts receiving
17
+ * <p>When the <code>SubscribeToShard</code> call succeeds, your consumer starts receiving
18
18
  * events of type <a>SubscribeToShardEvent</a> over the HTTP/2 connection for up
19
19
  * to 5 minutes, after which time you need to call <code>SubscribeToShard</code> again to
20
20
  * renew the subscription if you want to continue to receive records.</p>
21
- * <p>You can make one call to <code>SubscribeToShard</code> per second per registered
21
+ * <p>You can make one call to <code>SubscribeToShard</code> per second per registered
22
22
  * consumer per shard. For example, if you have a 4000 shard stream and two registered
23
23
  * stream consumers, you can make one <code>SubscribeToShard</code> request per second for
24
24
  * each combination of shard and registered consumer, allowing you to subscribe both
25
25
  * consumers to all 4000 shards in one second. </p>
26
- * <p>If you call <code>SubscribeToShard</code> again with the same <code>ConsumerARN</code>
26
+ * <p>If you call <code>SubscribeToShard</code> again with the same <code>ConsumerARN</code>
27
27
  * and <code>ShardId</code> within 5 seconds of a successful call, you'll get a
28
28
  * <code>ResourceInUseException</code>. If you call <code>SubscribeToShard</code> 5
29
29
  * seconds or more after a successful call, the second call takes over the subscription and
30
30
  * the previous connection expires or fails with a
31
31
  * <code>ResourceInUseException</code>.</p>
32
- * <p>For an example of how to use this operations, see <a href="/streams/latest/dev/building-enhanced-consumers-api.html">Enhanced Fan-Out
32
+ * <p>For an example of how to use this operations, see <a href="/streams/latest/dev/building-enhanced-consumers-api.html">Enhanced Fan-Out
33
33
  * Using the Kinesis Data Streams API</a>.</p>
34
34
  * @example
35
35
  * Use a bare-bones client and the command you need to make an API call.
@@ -8,46 +8,51 @@ export interface UpdateShardCountCommandInput extends UpdateShardCountInput {
8
8
  export interface UpdateShardCountCommandOutput extends UpdateShardCountOutput, __MetadataBearer {
9
9
  }
10
10
  /**
11
- * <p>Updates the shard count of the specified stream to the specified number of
12
- * shards.</p>
13
- * <p>Updating the shard count is an asynchronous operation. Upon receiving the request,
11
+ * <p>Updates the shard count of the specified stream to the specified number of shards.
12
+ * This API is only supported for the data streams with the provisioned capacity
13
+ * mode.</p>
14
+ * <note>
15
+ * <p>When invoking this API, it is recommended you use the <code>StreamARN</code> input
16
+ * parameter rather than the <code>StreamName</code> input parameter.</p>
17
+ * </note>
18
+ * <p>Updating the shard count is an asynchronous operation. Upon receiving the request,
14
19
  * Kinesis Data Streams returns immediately and sets the status of the stream to
15
20
  * <code>UPDATING</code>. After the update is complete, Kinesis Data Streams sets the
16
21
  * status of the stream back to <code>ACTIVE</code>. Depending on the size of the stream,
17
22
  * the scaling action could take a few minutes to complete. You can continue to read and
18
23
  * write data to your stream while its status is <code>UPDATING</code>.</p>
19
- * <p>To update the shard count, Kinesis Data Streams performs splits or merges on
24
+ * <p>To update the shard count, Kinesis Data Streams performs splits or merges on
20
25
  * individual shards. This can cause short-lived shards to be created, in addition to the
21
26
  * final shards. These short-lived shards count towards your total shard limit for your
22
27
  * account in the Region.</p>
23
- * <p>When using this operation, we recommend that you specify a target shard count that is
28
+ * <p>When using this operation, we recommend that you specify a target shard count that is
24
29
  * a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your
25
30
  * shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling
26
31
  * action might take longer to complete. </p>
27
- * <p>This operation has the following default limits. By default, you cannot do the
32
+ * <p>This operation has the following default limits. By default, you cannot do the
28
33
  * following:</p>
29
- * <ul>
34
+ * <ul>
30
35
  * <li>
31
- * <p>Scale more than ten times per rolling 24-hour period per stream</p>
36
+ * <p>Scale more than ten times per rolling 24-hour period per stream</p>
32
37
  * </li>
33
38
  * <li>
34
- * <p>Scale up to more than double your current shard count for a stream</p>
39
+ * <p>Scale up to more than double your current shard count for a stream</p>
35
40
  * </li>
36
41
  * <li>
37
- * <p>Scale down below half your current shard count for a stream</p>
42
+ * <p>Scale down below half your current shard count for a stream</p>
38
43
  * </li>
39
44
  * <li>
40
- * <p>Scale up to more than 10000 shards in a stream</p>
45
+ * <p>Scale up to more than 10000 shards in a stream</p>
41
46
  * </li>
42
47
  * <li>
43
- * <p>Scale a stream with more than 10000 shards down unless the result is less than
48
+ * <p>Scale a stream with more than 10000 shards down unless the result is less than
44
49
  * 10000 shards</p>
45
50
  * </li>
46
51
  * <li>
47
- * <p>Scale up to more than the shard limit for your account</p>
52
+ * <p>Scale up to more than the shard limit for your account</p>
48
53
  * </li>
49
54
  * </ul>
50
- * <p>For the default limits for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Streams
55
+ * <p>For the default limits for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Streams
51
56
  * Limits</a> in the <i>Amazon Kinesis Data Streams Developer
52
57
  * Guide</i>. To request an increase in the call rate limit, the shard limit for
53
58
  * this API, or your overall shard limit, use the <a href="https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis">limits form</a>.</p>
@@ -12,8 +12,11 @@ export declare const resolveClientEndpointParameters: <T>(options: T & ClientInp
12
12
  defaultSigningName: string;
13
13
  };
14
14
  export interface EndpointParameters extends __EndpointParameters {
15
- Region?: string;
15
+ Region: string;
16
16
  UseDualStack?: boolean;
17
17
  UseFIPS?: boolean;
18
18
  Endpoint?: string;
19
+ StreamARN?: string;
20
+ OperationType?: string;
21
+ ConsumerARN?: string;
19
22
  }