@aws-sdk/client-kinesis 3.40.0 → 3.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +38 -0
  2. package/README.md +2 -2
  3. package/dist-cjs/Kinesis.js +15 -0
  4. package/dist-cjs/commands/UpdateStreamModeCommand.js +36 -0
  5. package/dist-cjs/commands/index.js +1 -0
  6. package/dist-cjs/endpoints.js +8 -0
  7. package/dist-cjs/models/models_0.js +25 -2
  8. package/dist-cjs/protocols/Aws_json1_1.js +151 -2
  9. package/dist-es/Kinesis.js +15 -0
  10. package/dist-es/commands/UpdateStreamModeCommand.js +39 -0
  11. package/dist-es/commands/index.js +1 -0
  12. package/dist-es/endpoints.js +8 -0
  13. package/dist-es/models/models_0.js +17 -0
  14. package/dist-es/protocols/Aws_json1_1.js +189 -35
  15. package/dist-types/Kinesis.d.ts +165 -143
  16. package/dist-types/KinesisClient.d.ts +5 -4
  17. package/dist-types/commands/AddTagsToStreamCommand.d.ts +3 -5
  18. package/dist-types/commands/CreateStreamCommand.d.ts +20 -20
  19. package/dist-types/commands/DecreaseStreamRetentionPeriodCommand.d.ts +6 -6
  20. package/dist-types/commands/DeleteStreamCommand.d.ts +6 -5
  21. package/dist-types/commands/DeregisterStreamConsumerCommand.d.ts +1 -1
  22. package/dist-types/commands/DescribeLimitsCommand.d.ts +1 -1
  23. package/dist-types/commands/DescribeStreamCommand.d.ts +6 -2
  24. package/dist-types/commands/DescribeStreamConsumerCommand.d.ts +1 -1
  25. package/dist-types/commands/DescribeStreamSummaryCommand.d.ts +6 -6
  26. package/dist-types/commands/DisableEnhancedMonitoringCommand.d.ts +1 -1
  27. package/dist-types/commands/EnableEnhancedMonitoringCommand.d.ts +1 -1
  28. package/dist-types/commands/GetRecordsCommand.d.ts +30 -20
  29. package/dist-types/commands/GetShardIteratorCommand.d.ts +8 -8
  30. package/dist-types/commands/IncreaseStreamRetentionPeriodCommand.d.ts +4 -4
  31. package/dist-types/commands/ListShardsCommand.d.ts +5 -3
  32. package/dist-types/commands/ListStreamConsumersCommand.d.ts +1 -1
  33. package/dist-types/commands/ListStreamsCommand.d.ts +2 -2
  34. package/dist-types/commands/ListTagsForStreamCommand.d.ts +1 -1
  35. package/dist-types/commands/MergeShardsCommand.d.ts +6 -6
  36. package/dist-types/commands/PutRecordCommand.d.ts +9 -9
  37. package/dist-types/commands/PutRecordsCommand.d.ts +8 -8
  38. package/dist-types/commands/RegisterStreamConsumerCommand.d.ts +1 -1
  39. package/dist-types/commands/RemoveTagsFromStreamCommand.d.ts +1 -1
  40. package/dist-types/commands/SplitShardCommand.d.ts +18 -20
  41. package/dist-types/commands/StartStreamEncryptionCommand.d.ts +11 -11
  42. package/dist-types/commands/StopStreamEncryptionCommand.d.ts +9 -10
  43. package/dist-types/commands/SubscribeToShardCommand.d.ts +4 -3
  44. package/dist-types/commands/UpdateShardCountCommand.d.ts +11 -12
  45. package/dist-types/commands/UpdateStreamModeCommand.d.ts +38 -0
  46. package/dist-types/commands/index.d.ts +1 -0
  47. package/dist-types/models/models_0.d.ts +333 -152
  48. package/dist-types/protocols/Aws_json1_1.d.ts +3 -0
  49. package/dist-types/ts3.4/Kinesis.d.ts +5 -0
  50. package/dist-types/ts3.4/KinesisClient.d.ts +3 -2
  51. package/dist-types/ts3.4/commands/UpdateStreamModeCommand.d.ts +17 -0
  52. package/dist-types/ts3.4/commands/index.d.ts +1 -0
  53. package/dist-types/ts3.4/models/models_0.d.ts +54 -1
  54. package/dist-types/ts3.4/protocols/Aws_json1_1.d.ts +3 -0
  55. package/package.json +4 -4
@@ -27,18 +27,17 @@ import { StartStreamEncryptionCommandInput, StartStreamEncryptionCommandOutput }
27
27
  import { StopStreamEncryptionCommandInput, StopStreamEncryptionCommandOutput } from "./commands/StopStreamEncryptionCommand";
28
28
  import { SubscribeToShardCommandInput, SubscribeToShardCommandOutput } from "./commands/SubscribeToShardCommand";
29
29
  import { UpdateShardCountCommandInput, UpdateShardCountCommandOutput } from "./commands/UpdateShardCountCommand";
30
+ import { UpdateStreamModeCommandInput, UpdateStreamModeCommandOutput } from "./commands/UpdateStreamModeCommand";
30
31
  import { KinesisClient } from "./KinesisClient";
31
32
  /**
32
33
  * <fullname>Amazon Kinesis Data Streams Service API Reference</fullname>
33
- * <p>Amazon Kinesis Data Streams is a managed service that scales elastically for
34
- * real-time processing of streaming big data.</p>
34
+ * <p>Amazon Kinesis Data Streams is a managed service that scales elastically for real-time
35
+ * processing of streaming big data.</p>
35
36
  */
36
37
  export declare class Kinesis extends KinesisClient {
37
38
  /**
38
- * <p>Adds or updates tags for the specified Kinesis data stream. Each time you invoke
39
- * this operation, you can specify up to 10 tags. If you want to add more than 10 tags to
40
- * your stream, you can invoke this operation multiple times. In total, each stream can
41
- * have up to 50 tags.</p>
39
+ * <p>Adds or updates tags for the specified Kinesis data stream. You can assign up to 50
40
+ * tags to a data stream.</p>
42
41
  * <p>If tags have already been assigned to the stream, <code>AddTagsToStream</code>
43
42
  * overwrites any existing tags that correspond to the specified tag keys.</p>
44
43
  * <p>
@@ -49,19 +48,19 @@ export declare class Kinesis extends KinesisClient {
49
48
  addTagsToStream(args: AddTagsToStreamCommandInput, cb: (err: any, data?: AddTagsToStreamCommandOutput) => void): void;
50
49
  addTagsToStream(args: AddTagsToStreamCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: AddTagsToStreamCommandOutput) => void): void;
51
50
  /**
52
- * <p>Creates a Kinesis data stream. A stream captures and transports data records that
53
- * are continuously emitted from different data sources or <i>producers</i>.
51
+ * <p>Creates a Kinesis data stream. A stream captures and transports data records that are
52
+ * continuously emitted from different data sources or <i>producers</i>.
54
53
  * Scale-out within a stream is explicitly supported by means of shards, which are uniquely
55
54
  * identified groups of data records in a stream.</p>
56
- * <p>You specify and control the number of shards that a stream is composed of. Each
57
- * shard can support reads up to five transactions per second, up to a maximum data read
58
- * total of 2 MiB per second. Each shard can support writes up to 1,000 records per second,
59
- * up to a maximum data write total of 1 MiB per second. If the amount of data input
60
- * increases or decreases, you can add or remove shards.</p>
61
- * <p>The stream name identifies the stream. The name is scoped to the AWS account used
62
- * by the application. It is also scoped by AWS Region. That is, two streams in two
63
- * different accounts can have the same name, and two streams in the same account, but in
64
- * two different Regions, can have the same name.</p>
55
+ * <p>You specify and control the number of shards that a stream is composed of. Each shard
56
+ * can support reads up to five transactions per second, up to a maximum data read total of
57
+ * 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a
58
+ * maximum data write total of 1 MiB per second. If the amount of data input increases or
59
+ * decreases, you can add or remove shards.</p>
60
+ * <p>The stream name identifies the stream. The name is scoped to the Amazon Web Services
61
+ * account used by the application. It is also scoped by Amazon Web Services Region. That
62
+ * is, two streams in two different accounts can have the same name, and two streams in the
63
+ * same account, but in two different Regions, can have the same name.</p>
65
64
  * <p>
66
65
  * <code>CreateStream</code> is an asynchronous operation. Upon receiving a
67
66
  * <code>CreateStream</code> request, Kinesis Data Streams immediately returns and sets
@@ -73,20 +72,20 @@ export declare class Kinesis extends KinesisClient {
73
72
  * <ul>
74
73
  * <li>
75
74
  *
76
- * <p>Have more than five streams in the <code>CREATING</code> state at any point
77
- * in time.</p>
75
+ * <p>Have more than five streams in the <code>CREATING</code> state at any point in
76
+ * time.</p>
78
77
  * </li>
79
78
  * <li>
80
79
  *
81
80
  * <p>Create more shards than are authorized for your account.</p>
82
81
  * </li>
83
82
  * </ul>
84
- * <p>For the default shard limit for an AWS account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Amazon Kinesis Data Streams
85
- * Limits</a> in the <i>Amazon Kinesis Data Streams Developer
86
- * Guide</i>. To increase this limit, <a href="https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html">contact AWS
87
- * Support</a>.</p>
88
- * <p>You can use <code>DescribeStream</code> to check the stream status, which is
89
- * returned in <code>StreamStatus</code>.</p>
83
+ * <p>For the default shard limit for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Amazon
84
+ * Kinesis Data Streams Limits</a> in the <i>Amazon Kinesis Data Streams
85
+ * Developer Guide</i>. To increase this limit, <a href="https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html">contact Amazon Web Services
86
+ * Support</a>.</p>
87
+ * <p>You can use <a>DescribeStreamSummary</a> to check the stream status, which
88
+ * is returned in <code>StreamStatus</code>.</p>
90
89
  * <p>
91
90
  * <a>CreateStream</a> has a limit of five transactions per second per
92
91
  * account.</p>
@@ -95,12 +94,12 @@ export declare class Kinesis extends KinesisClient {
95
94
  createStream(args: CreateStreamCommandInput, cb: (err: any, data?: CreateStreamCommandOutput) => void): void;
96
95
  createStream(args: CreateStreamCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: CreateStreamCommandOutput) => void): void;
97
96
  /**
98
- * <p>Decreases the Kinesis data stream's retention period, which is the length of time
99
- * data records are accessible after they are added to the stream. The minimum value of a
97
+ * <p>Decreases the Kinesis data stream's retention period, which is the length of time data
98
+ * records are accessible after they are added to the stream. The minimum value of a
100
99
  * stream's retention period is 24 hours.</p>
101
- * <p>This operation may result in lost data. For example, if the stream's retention
102
- * period is 48 hours and is decreased to 24 hours, any data already in the stream that is
103
- * older than 24 hours is inaccessible.</p>
100
+ * <p>This operation may result in lost data. For example, if the stream's retention period
101
+ * is 48 hours and is decreased to 24 hours, any data already in the stream that is older
102
+ * than 24 hours is inaccessible.</p>
104
103
  */
105
104
  decreaseStreamRetentionPeriod(args: DecreaseStreamRetentionPeriodCommandInput, options?: __HttpHandlerOptions): Promise<DecreaseStreamRetentionPeriodCommandOutput>;
106
105
  decreaseStreamRetentionPeriod(args: DecreaseStreamRetentionPeriodCommandInput, cb: (err: any, data?: DecreaseStreamRetentionPeriodCommandOutput) => void): void;
@@ -118,10 +117,11 @@ export declare class Kinesis extends KinesisClient {
118
117
  * <b>Note:</b> Kinesis Data Streams might continue to accept
119
118
  * data read and write operations, such as <a>PutRecord</a>, <a>PutRecords</a>, and <a>GetRecords</a>, on a stream in the
120
119
  * <code>DELETING</code> state until the stream deletion is complete.</p>
121
- * <p>When you delete a stream, any shards in that stream are also deleted, and any tags
122
- * are dissociated from the stream.</p>
123
- * <p>You can use the <a>DescribeStream</a> operation to check the state of
124
- * the stream, which is returned in <code>StreamStatus</code>.</p>
120
+ * <p>When you delete a stream, any shards in that stream are also deleted, and any tags are
121
+ * dissociated from the stream.</p>
122
+ * <p>You can use the <a>DescribeStreamSummary</a> operation to check the state
123
+ * of the stream, which is returned in <code>StreamStatus</code>.</p>
124
+ *
125
125
  * <p>
126
126
  * <a>DeleteStream</a> has a limit of five transactions per second per
127
127
  * account.</p>
@@ -153,7 +153,11 @@ export declare class Kinesis extends KinesisClient {
153
153
  describeLimits(args: DescribeLimitsCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: DescribeLimitsCommandOutput) => void): void;
154
154
  /**
155
155
  * <p>Describes the specified Kinesis data stream.</p>
156
- *
156
+ * <note>
157
+ * <p>This API has been revised. It's highly recommended that you use the <a>DescribeStreamSummary</a> API to get a summarized description of the
158
+ * specified Kinesis data stream and the <a>ListShards</a> API to list the
159
+ * shards in a specified data stream and obtain information about each shard. </p>
160
+ * </note>
157
161
  * <p>The information returned includes the stream name, Amazon Resource Name (ARN),
158
162
  * creation time, enhanced metric configuration, and shard map. The shard map is an array
159
163
  * of shard objects. For each shard object, there is the hash key and sequence number
@@ -189,12 +193,12 @@ export declare class Kinesis extends KinesisClient {
189
193
  /**
190
194
  * <p>Provides a summarized description of the specified Kinesis data stream without the
191
195
  * shard list.</p>
192
- * <p>The information returned includes the stream name, Amazon Resource Name (ARN),
193
- * status, record retention period, approximate creation time, monitoring, encryption
194
- * details, and open shard count. </p>
196
+ * <p>The information returned includes the stream name, Amazon Resource Name (ARN), status,
197
+ * record retention period, approximate creation time, monitoring, encryption details, and
198
+ * open shard count. </p>
195
199
  * <p>
196
- * <a>DescribeStreamSummary</a> has a limit of 20 transactions per second
197
- * per account.</p>
200
+ * <a>DescribeStreamSummary</a> has a limit of 20 transactions per second per
201
+ * account.</p>
198
202
  */
199
203
  describeStreamSummary(args: DescribeStreamSummaryCommandInput, options?: __HttpHandlerOptions): Promise<DescribeStreamSummaryCommandOutput>;
200
204
  describeStreamSummary(args: DescribeStreamSummaryCommandInput, cb: (err: any, data?: DescribeStreamSummaryCommandOutput) => void): void;
@@ -230,47 +234,57 @@ export declare class Kinesis extends KinesisClient {
230
234
  * You can terminate the loop when the shard is closed, or when the shard iterator reaches
231
235
  * the record with the sequence number or other attribute that marks it as the last record
232
236
  * to process.</p>
233
- * <p>Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB
234
- * per second. You can ensure that your calls don't exceed the maximum supported size or
237
+ * <p>Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per
238
+ * second. You can ensure that your calls don't exceed the maximum supported size or
235
239
  * throughput by using the <code>Limit</code> parameter to specify the maximum number of
236
240
  * records that <a>GetRecords</a> can return. Consider your average record size
237
241
  * when determining this limit. The maximum number of records that can be returned per call
238
242
  * is 10,000.</p>
239
243
  *
240
- * <p>The size of the data returned by <a>GetRecords</a> varies depending on
241
- * the utilization of the shard. The maximum size of data that <a>GetRecords</a>
242
- * can return is 10 MiB. If a call returns this amount of data, subsequent calls made
243
- * within the next 5 seconds throw <code>ProvisionedThroughputExceededException</code>. If
244
- * there is insufficient provisioned throughput on the stream, subsequent calls made within
245
- * the next 1 second throw <code>ProvisionedThroughputExceededException</code>. <a>GetRecords</a> doesn't return any data when it throws an exception. For this
246
- * reason, we recommend that you wait 1 second between calls to <a>GetRecords</a>. However, it's possible that the application will get exceptions for longer than 1
247
- * second.</p>
244
+ * <p>The size of the data returned by <a>GetRecords</a> varies depending on the
245
+ * utilization of the shard. It is recommended that consumer applications retrieve records
246
+ * via the <code>GetRecords</code> command using the 5 TPS limit to remain caught up.
247
+ * Retrieving records less frequently can lead to consumer applications falling behind. The
248
+ * maximum size of data that <a>GetRecords</a> can return is 10 MiB. If a call
249
+ * returns this amount of data, subsequent calls made within the next 5 seconds throw
250
+ * <code>ProvisionedThroughputExceededException</code>. If there is insufficient
251
+ * provisioned throughput on the stream, subsequent calls made within the next 1 second
252
+ * throw <code>ProvisionedThroughputExceededException</code>. <a>GetRecords</a>
253
+ * doesn't return any data when it throws an exception. For this reason, we recommend that
254
+ * you wait 1 second between calls to <a>GetRecords</a>. However, it's possible
255
+ * that the application will get exceptions for longer than 1 second.</p>
256
+ *
257
+ *
258
+ *
259
+ *
260
+ *
261
+ *
262
+ *
263
+ *
248
264
  * <p>To detect whether the application is falling behind in processing, you can use the
249
265
  * <code>MillisBehindLatest</code> response attribute. You can also monitor the stream
250
266
  * using CloudWatch metrics and other mechanisms (see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html">Monitoring</a> in the <i>Amazon
251
267
  * Kinesis Data Streams Developer Guide</i>).</p>
252
- * <p>Each Amazon Kinesis record includes a value,
253
- * <code>ApproximateArrivalTimestamp</code>, that is set when a stream successfully
254
- * receives and stores a record. This is commonly referred to as a server-side time stamp,
255
- * whereas a client-side time stamp is set when a data producer creates or sends the record
256
- * to a stream (a data producer is any data source putting data records into a stream, for
257
- * example with <a>PutRecords</a>). The time stamp has millisecond precision.
258
- * There are no guarantees about the time stamp accuracy, or that the time stamp is always
259
- * increasing. For example, records in a shard or across a stream might have time stamps
260
- * that are out of order.</p>
268
+ * <p>Each Amazon Kinesis record includes a value, <code>ApproximateArrivalTimestamp</code>,
269
+ * that is set when a stream successfully receives and stores a record. This is commonly
270
+ * referred to as a server-side time stamp, whereas a client-side time stamp is set when a
271
+ * data producer creates or sends the record to a stream (a data producer is any data
272
+ * source putting data records into a stream, for example with <a>PutRecords</a>). The time stamp has millisecond precision. There are no guarantees about the time
273
+ * stamp accuracy, or that the time stamp is always increasing. For example, records in a
274
+ * shard or across a stream might have time stamps that are out of order.</p>
261
275
  * <p>This operation has a limit of five transactions per second per shard.</p>
262
276
  */
263
277
  getRecords(args: GetRecordsCommandInput, options?: __HttpHandlerOptions): Promise<GetRecordsCommandOutput>;
264
278
  getRecords(args: GetRecordsCommandInput, cb: (err: any, data?: GetRecordsCommandOutput) => void): void;
265
279
  getRecords(args: GetRecordsCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: GetRecordsCommandOutput) => void): void;
266
280
  /**
267
- * <p>Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it
268
- * is returned to the requester.</p>
269
- * <p>A shard iterator specifies the shard position from which to start reading data
270
- * records sequentially. The position is specified using the sequence number of a data
271
- * record in a shard. A sequence number is the identifier associated with every record
272
- * ingested in the stream, and is assigned when a record is put into the stream. Each
273
- * stream has one or more shards.</p>
281
+ * <p>Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is
282
+ * returned to the requester.</p>
283
+ * <p>A shard iterator specifies the shard position from which to start reading data records
284
+ * sequentially. The position is specified using the sequence number of a data record in a
285
+ * shard. A sequence number is the identifier associated with every record ingested in the
286
+ * stream, and is assigned when a record is put into the stream. Each stream has one or
287
+ * more shards.</p>
274
288
  * <p>You must specify the shard iterator type. For example, you can set the
275
289
  * <code>ShardIteratorType</code> parameter to read exactly from the position denoted
276
290
  * by a specific sequence number by using the <code>AT_SEQUENCE_NUMBER</code> shard
@@ -303,9 +317,9 @@ export declare class Kinesis extends KinesisClient {
303
317
  getShardIterator(args: GetShardIteratorCommandInput, cb: (err: any, data?: GetShardIteratorCommandOutput) => void): void;
304
318
  getShardIterator(args: GetShardIteratorCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: GetShardIteratorCommandOutput) => void): void;
305
319
  /**
306
- * <p>Increases the Kinesis data stream's retention period, which is the length of time
307
- * data records are accessible after they are added to the stream. The maximum value of a
308
- * stream's retention period is 168 hours (7 days).</p>
320
+ * <p>Increases the Kinesis data stream's retention period, which is the length of time data
321
+ * records are accessible after they are added to the stream. The maximum value of a
322
+ * stream's retention period is 8760 hours (365 days).</p>
309
323
  * <p>If you choose a longer stream retention period, this operation increases the time
310
324
  * period during which records that have not yet expired are accessible. However, it does
311
325
  * not make previous, expired data (older than the stream's previous retention period)
@@ -317,8 +331,10 @@ export declare class Kinesis extends KinesisClient {
317
331
  increaseStreamRetentionPeriod(args: IncreaseStreamRetentionPeriodCommandInput, cb: (err: any, data?: IncreaseStreamRetentionPeriodCommandOutput) => void): void;
318
332
  increaseStreamRetentionPeriod(args: IncreaseStreamRetentionPeriodCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: IncreaseStreamRetentionPeriodCommandOutput) => void): void;
319
333
  /**
320
- * <p>Lists the shards in a stream and provides information about each shard. This
321
- * operation has a limit of 100 transactions per second per data stream.</p>
334
+ * <p>Lists the shards in a stream and provides information about each shard. This operation
335
+ * has a limit of 1000 transactions per second per data stream.</p>
336
+ * <p>This action does not list expired shards. For information about expired shards, see
337
+ * <a href="https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-after-resharding.html#kinesis-using-sdk-java-resharding-data-routing">Data Routing, Data Persistence, and Shard State after a Reshard</a>. </p>
322
338
  * <important>
323
339
  * <p>This API is a new operation that is used by the Amazon Kinesis Client Library
324
340
  * (KCL). If you have a fine-grained IAM policy that only allows specific operations,
@@ -344,7 +360,7 @@ export declare class Kinesis extends KinesisClient {
344
360
  * <code>ListStreams</code>. You can limit the number of returned streams using the
345
361
  * <code>Limit</code> parameter. If you do not specify a value for the
346
362
  * <code>Limit</code> parameter, Kinesis Data Streams uses the default limit, which is
347
- * currently 10.</p>
363
+ * currently 100.</p>
348
364
  * <p>You can detect if there are more streams available to list by using the
349
365
  * <code>HasMoreStreams</code> flag from the returned output. If there are more streams
350
366
  * available, you can request more streams by using the name of the last stream returned by
@@ -387,7 +403,7 @@ export declare class Kinesis extends KinesisClient {
387
403
  * <code>UPDATING</code>, or <code>DELETING</code> state, <code>MergeShards</code>
388
404
  * returns a <code>ResourceInUseException</code>. If the specified stream does not exist,
389
405
  * <code>MergeShards</code> returns a <code>ResourceNotFoundException</code>. </p>
390
- * <p>You can use <a>DescribeStream</a> to check the state of the stream,
406
+ * <p>You can use <a>DescribeStreamSummary</a> to check the state of the stream,
391
407
  * which is returned in <code>StreamStatus</code>.</p>
392
408
  * <p>
393
409
  * <code>MergeShards</code> is an asynchronous operation. Upon receiving a
@@ -396,13 +412,13 @@ export declare class Kinesis extends KinesisClient {
396
412
  * operation is completed, Kinesis Data Streams sets the <code>StreamStatus</code> to
397
413
  * <code>ACTIVE</code>. Read and write operations continue to work while the stream is
398
414
  * in the <code>UPDATING</code> state. </p>
399
- * <p>You use <a>DescribeStream</a> to determine the shard IDs that are
400
- * specified in the <code>MergeShards</code> request. </p>
415
+ * <p>You use <a>DescribeStreamSummary</a> and the <a>ListShards</a>
416
+ * APIs to determine the shard IDs that are specified in the <code>MergeShards</code>
417
+ * request. </p>
401
418
  * <p>If you try to operate on too many streams in parallel using <a>CreateStream</a>, <a>DeleteStream</a>, <code>MergeShards</code>,
402
419
  * or <a>SplitShard</a>, you receive a <code>LimitExceededException</code>. </p>
403
420
  * <p>
404
- * <code>MergeShards</code> has a limit of five transactions per second per
405
- * account.</p>
421
+ * <code>MergeShards</code> has a limit of five transactions per second per account.</p>
406
422
  */
407
423
  mergeShards(args: MergeShardsCommandInput, options?: __HttpHandlerOptions): Promise<MergeShardsCommandOutput>;
408
424
  mergeShards(args: MergeShardsCommandInput, cb: (err: any, data?: MergeShardsCommandOutput) => void): void;
@@ -420,18 +436,18 @@ export declare class Kinesis extends KinesisClient {
420
436
  * Kinesis Data Streams segregates the data records that belong to a stream into multiple
421
437
  * shards, using the partition key associated with each data record to determine the shard
422
438
  * to which a given data record belongs.</p>
423
- * <p>Partition keys are Unicode strings, with a maximum length limit of 256 characters
424
- * for each key. An MD5 hash function is used to map partition keys to 128-bit integer
425
- * values and to map associated data records to shards using the hash key ranges of the
426
- * shards. You can override hashing the partition key to determine the shard by explicitly
439
+ * <p>Partition keys are Unicode strings, with a maximum length limit of 256 characters for
440
+ * each key. An MD5 hash function is used to map partition keys to 128-bit integer values
441
+ * and to map associated data records to shards using the hash key ranges of the shards.
442
+ * You can override hashing the partition key to determine the shard by explicitly
427
443
  * specifying a hash value using the <code>ExplicitHashKey</code> parameter. For more
428
444
  * information, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream">Adding Data to a Stream</a> in the <i>Amazon Kinesis Data Streams
429
445
  * Developer Guide</i>.</p>
430
446
  * <p>
431
447
  * <code>PutRecord</code> returns the shard ID of where the data record was placed and the
432
448
  * sequence number that was assigned to the data record.</p>
433
- * <p>Sequence numbers increase over time and are specific to a shard within a stream,
434
- * not across all shards within a stream. To guarantee strictly increasing ordering, write
449
+ * <p>Sequence numbers increase over time and are specific to a shard within a stream, not
450
+ * across all shards within a stream. To guarantee strictly increasing ordering, write
435
451
  * serially to a shard and use the <code>SequenceNumberForOrdering</code> parameter. For
436
452
  * more information, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream">Adding Data to a Stream</a> in the <i>Amazon Kinesis Data Streams
437
453
  * Developer Guide</i>.</p>
@@ -442,8 +458,8 @@ export declare class Kinesis extends KinesisClient {
442
458
  * <p>If a <code>PutRecord</code> request cannot be processed because of insufficient
443
459
  * provisioned throughput on the shard involved in the request, <code>PutRecord</code>
444
460
  * throws <code>ProvisionedThroughputExceededException</code>. </p>
445
- * <p>By default, data records are accessible for 24 hours from the time that they are
446
- * added to a stream. You can use <a>IncreaseStreamRetentionPeriod</a> or <a>DecreaseStreamRetentionPeriod</a> to modify this retention period.</p>
461
+ * <p>By default, data records are accessible for 24 hours from the time that they are added
462
+ * to a stream. You can use <a>IncreaseStreamRetentionPeriod</a> or <a>DecreaseStreamRetentionPeriod</a> to modify this retention period.</p>
447
463
  */
448
464
  putRecord(args: PutRecordCommandInput, options?: __HttpHandlerOptions): Promise<PutRecordCommandOutput>;
449
465
  putRecord(args: PutRecordCommandInput, cb: (err: any, data?: PutRecordCommandOutput) => void): void;
@@ -452,8 +468,8 @@ export declare class Kinesis extends KinesisClient {
452
468
  * <p>Writes multiple data records into a Kinesis data stream in a single call (also
453
469
  * referred to as a <code>PutRecords</code> request). Use this operation to send data into
454
470
  * the stream for data ingestion and processing. </p>
455
- * <p>Each <code>PutRecords</code> request can support up to 500 records. Each record in
456
- * the request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,
471
+ * <p>Each <code>PutRecords</code> request can support up to 500 records. Each record in the
472
+ * request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,
457
473
  * including partition keys. Each shard can support writes up to 1,000 records per second,
458
474
  * up to a maximum data write total of 1 MiB per second.</p>
459
475
  * <p>You must specify the name of the stream that captures, stores, and transports the
@@ -478,9 +494,9 @@ export declare class Kinesis extends KinesisClient {
478
494
  * record in the request array using natural ordering, from the top to the bottom of the
479
495
  * request and response. The response <code>Records</code> array always includes the same
480
496
  * number of records as the request array.</p>
481
- * <p>The response <code>Records</code> array includes both successfully and
482
- * unsuccessfully processed records. Kinesis Data Streams attempts to process all records
483
- * in each <code>PutRecords</code> request. A single record failure does not stop the
497
+ * <p>The response <code>Records</code> array includes both successfully and unsuccessfully
498
+ * processed records. Kinesis Data Streams attempts to process all records in each
499
+ * <code>PutRecords</code> request. A single record failure does not stop the
484
500
  * processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering
485
501
  * of records. If you need to read records in the same order they are written to the
486
502
  * stream, use <a>PutRecord</a> instead of <code>PutRecords</code>, and write to
@@ -503,8 +519,8 @@ export declare class Kinesis extends KinesisClient {
503
519
  * <p>After you write a record to a stream, you cannot modify that record or its order
504
520
  * within the stream.</p>
505
521
  * </important>
506
- * <p>By default, data records are accessible for 24 hours from the time that they are
507
- * added to a stream. You can use <a>IncreaseStreamRetentionPeriod</a> or <a>DecreaseStreamRetentionPeriod</a> to modify this retention period.</p>
522
+ * <p>By default, data records are accessible for 24 hours from the time that they are added
523
+ * to a stream. You can use <a>IncreaseStreamRetentionPeriod</a> or <a>DecreaseStreamRetentionPeriod</a> to modify this retention period.</p>
508
524
  */
509
525
  putRecords(args: PutRecordsCommandInput, options?: __HttpHandlerOptions): Promise<PutRecordsCommandOutput>;
510
526
  putRecords(args: PutRecordsCommandInput, cb: (err: any, data?: PutRecordsCommandOutput) => void): void;
@@ -550,80 +566,77 @@ export declare class Kinesis extends KinesisClient {
550
566
  * <code>SplitShard</code> to increase stream capacity, so that more Kinesis Data
551
567
  * Streams applications can simultaneously read data from the stream for real-time
552
568
  * processing. </p>
553
- * <p>You must specify the shard to be split and the new hash key, which is the position
554
- * in the shard where the shard gets split in two. In many cases, the new hash key might be
569
+ * <p>You must specify the shard to be split and the new hash key, which is the position in
570
+ * the shard where the shard gets split in two. In many cases, the new hash key might be
555
571
  * the average of the beginning and ending hash key, but it can be any hash key value in
556
572
  * the range being mapped into the shard. For more information, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html">Split a
557
573
  * Shard</a> in the <i>Amazon Kinesis Data Streams Developer
558
574
  * Guide</i>.</p>
559
- * <p>You can use <a>DescribeStream</a> to determine the shard ID and hash key
560
- * values for the <code>ShardToSplit</code> and <code>NewStartingHashKey</code> parameters
561
- * that are specified in the <code>SplitShard</code> request.</p>
575
+ * <p>You can use <a>DescribeStreamSummary</a> and the <a>ListShards</a> APIs to determine the shard ID and hash key values for the <code>ShardToSplit</code>
576
+ * and <code>NewStartingHashKey</code> parameters that are specified in the
577
+ * <code>SplitShard</code> request.</p>
562
578
  * <p>
563
579
  * <code>SplitShard</code> is an asynchronous operation. Upon receiving a
564
580
  * <code>SplitShard</code> request, Kinesis Data Streams immediately returns a response
565
581
  * and sets the stream status to <code>UPDATING</code>. After the operation is completed,
566
582
  * Kinesis Data Streams sets the stream status to <code>ACTIVE</code>. Read and write
567
583
  * operations continue to work while the stream is in the <code>UPDATING</code> state. </p>
568
- * <p>You can use <code>DescribeStream</code> to check the status of the stream, which is
569
- * returned in <code>StreamStatus</code>. If the stream is in the <code>ACTIVE</code>
570
- * state, you can call <code>SplitShard</code>. If a stream is in <code>CREATING</code> or
571
- * <code>UPDATING</code> or <code>DELETING</code> states, <code>DescribeStream</code>
572
- * returns a <code>ResourceInUseException</code>.</p>
573
- * <p>If the specified stream does not exist, <code>DescribeStream</code> returns a
574
- * <code>ResourceNotFoundException</code>. If you try to create more shards than are
575
- * authorized for your account, you receive a <code>LimitExceededException</code>. </p>
576
- * <p>For the default shard limit for an AWS account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Kinesis Data Streams
577
- * Limits</a> in the <i>Amazon Kinesis Data Streams Developer
578
- * Guide</i>. To increase this limit, <a href="https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html">contact AWS
579
- * Support</a>.</p>
584
+ * <p>You can use <a>DescribeStreamSummary</a> to check the status of the stream,
585
+ * which is returned in <code>StreamStatus</code>. If the stream is in the
586
+ * <code>ACTIVE</code> state, you can call <code>SplitShard</code>.
587
+ * </p>
588
+ * <p>If the specified stream does not exist, <a>DescribeStreamSummary</a>
589
+ * returns a <code>ResourceNotFoundException</code>. If you try to create more shards than
590
+ * are authorized for your account, you receive a <code>LimitExceededException</code>. </p>
591
+ * <p>For the default shard limit for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Kinesis
592
+ * Data Streams Limits</a> in the <i>Amazon Kinesis Data Streams Developer
593
+ * Guide</i>. To increase this limit, <a href="https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html">contact Amazon Web Services
594
+ * Support</a>.</p>
580
595
  * <p>If you try to operate on too many streams simultaneously using <a>CreateStream</a>, <a>DeleteStream</a>, <a>MergeShards</a>, and/or <a>SplitShard</a>, you receive a
581
596
  * <code>LimitExceededException</code>. </p>
582
597
  * <p>
583
- * <code>SplitShard</code> has a limit of five transactions per second per
584
- * account.</p>
598
+ * <code>SplitShard</code> has a limit of five transactions per second per account.</p>
585
599
  */
586
600
  splitShard(args: SplitShardCommandInput, options?: __HttpHandlerOptions): Promise<SplitShardCommandOutput>;
587
601
  splitShard(args: SplitShardCommandInput, cb: (err: any, data?: SplitShardCommandOutput) => void): void;
588
602
  splitShard(args: SplitShardCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: SplitShardCommandOutput) => void): void;
589
603
  /**
590
- * <p>Enables or updates server-side encryption using an AWS KMS key for a specified
591
- * stream. </p>
592
- * <p>Starting encryption is an asynchronous operation. Upon receiving the request,
593
- * Kinesis Data Streams returns immediately and sets the status of the stream to
604
+ * <p>Enables or updates server-side encryption using an Amazon Web Services KMS key for a
605
+ * specified stream. </p>
606
+ * <p>Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis
607
+ * Data Streams returns immediately and sets the status of the stream to
594
608
  * <code>UPDATING</code>. After the update is complete, Kinesis Data Streams sets the
595
609
  * status of the stream back to <code>ACTIVE</code>. Updating or applying encryption
596
610
  * normally takes a few seconds to complete, but it can take minutes. You can continue to
597
611
  * read and write data to your stream while its status is <code>UPDATING</code>. Once the
598
612
  * status of the stream is <code>ACTIVE</code>, encryption begins for records written to
599
613
  * the stream. </p>
600
- * <p>API Limits: You can successfully apply a new AWS KMS key for server-side encryption
601
- * 25 times in a rolling 24-hour period.</p>
602
- * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code>
603
- * status before all records written to the stream are encrypted. After you enable
604
- * encryption, you can verify that encryption is applied by inspecting the API response
605
- * from <code>PutRecord</code> or <code>PutRecords</code>.</p>
614
+ * <p>API Limits: You can successfully apply a new Amazon Web Services KMS key for
615
+ * server-side encryption 25 times in a rolling 24-hour period.</p>
616
+ * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status
617
+ * before all records written to the stream are encrypted. After you enable encryption, you
618
+ * can verify that encryption is applied by inspecting the API response from
619
+ * <code>PutRecord</code> or <code>PutRecords</code>.</p>
606
620
  */
607
621
  startStreamEncryption(args: StartStreamEncryptionCommandInput, options?: __HttpHandlerOptions): Promise<StartStreamEncryptionCommandOutput>;
608
622
  startStreamEncryption(args: StartStreamEncryptionCommandInput, cb: (err: any, data?: StartStreamEncryptionCommandOutput) => void): void;
609
623
  startStreamEncryption(args: StartStreamEncryptionCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: StartStreamEncryptionCommandOutput) => void): void;
610
624
  /**
611
625
  * <p>Disables server-side encryption for a specified stream. </p>
612
- * <p>Stopping encryption is an asynchronous operation. Upon receiving the request,
613
- * Kinesis Data Streams returns immediately and sets the status of the stream to
626
+ * <p>Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis
627
+ * Data Streams returns immediately and sets the status of the stream to
614
628
  * <code>UPDATING</code>. After the update is complete, Kinesis Data Streams sets the
615
629
  * status of the stream back to <code>ACTIVE</code>. Stopping encryption normally takes a
616
630
  * few seconds to complete, but it can take minutes. You can continue to read and write
617
631
  * data to your stream while its status is <code>UPDATING</code>. Once the status of the
618
632
  * stream is <code>ACTIVE</code>, records written to the stream are no longer encrypted by
619
633
  * Kinesis Data Streams. </p>
620
- * <p>API Limits: You can successfully disable server-side encryption 25 times in a
621
- * rolling 24-hour period. </p>
622
- * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code>
623
- * status before all records written to the stream are no longer subject to encryption.
624
- * After you disabled encryption, you can verify that encryption is not applied by
625
- * inspecting the API response from <code>PutRecord</code> or
626
- * <code>PutRecords</code>.</p>
634
+ * <p>API Limits: You can successfully disable server-side encryption 25 times in a rolling
635
+ * 24-hour period. </p>
636
+ * <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status
637
+ * before all records written to the stream are no longer subject to encryption. After you
638
+ * disabled encryption, you can verify that encryption is not applied by inspecting the API
639
+ * response from <code>PutRecord</code> or <code>PutRecords</code>.</p>
627
640
  */
628
641
  stopStreamEncryption(args: StopStreamEncryptionCommandInput, options?: __HttpHandlerOptions): Promise<StopStreamEncryptionCommandOutput>;
629
642
  stopStreamEncryption(args: StopStreamEncryptionCommandInput, cb: (err: any, data?: StopStreamEncryptionCommandOutput) => void): void;
@@ -647,8 +660,9 @@ export declare class Kinesis extends KinesisClient {
647
660
  * <p>If you call <code>SubscribeToShard</code> again with the same <code>ConsumerARN</code>
648
661
  * and <code>ShardId</code> within 5 seconds of a successful call, you'll get a
649
662
  * <code>ResourceInUseException</code>. If you call <code>SubscribeToShard</code> 5
650
- * seconds or more after a successful call, the first connection will expire and the second
651
- * call will take over the subscription.</p>
663
+ * seconds or more after a successful call, the second call takes over the subscription and
664
+ * the previous connection expires or fails with a
665
+ * <code>ResourceInUseException</code>.</p>
652
666
  * <p>For an example of how to use this operations, see <a href="/streams/latest/dev/building-enhanced-consumers-api.html">Enhanced Fan-Out
653
667
  * Using the Kinesis Data Streams API</a>.</p>
654
668
  */
@@ -668,8 +682,8 @@ export declare class Kinesis extends KinesisClient {
668
682
  * individual shards. This can cause short-lived shards to be created, in addition to the
669
683
  * final shards. These short-lived shards count towards your total shard limit for your
670
684
  * account in the Region.</p>
671
- * <p>When using this operation, we recommend that you specify a target shard count that
672
- * is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your
685
+ * <p>When using this operation, we recommend that you specify a target shard count that is
686
+ * a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your
673
687
  * shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling
674
688
  * action might take longer to complete. </p>
675
689
  * <p>This operation has the following default limits. By default, you cannot do the
@@ -679,29 +693,37 @@ export declare class Kinesis extends KinesisClient {
679
693
  * <p>Scale more than ten times per rolling 24-hour period per stream</p>
680
694
  * </li>
681
695
  * <li>
682
- * <p>Scale up to more than double your current shard count for a
683
- * stream</p>
696
+ * <p>Scale up to more than double your current shard count for a stream</p>
684
697
  * </li>
685
698
  * <li>
686
699
  * <p>Scale down below half your current shard count for a stream</p>
687
700
  * </li>
688
701
  * <li>
689
- * <p>Scale up to more than 500 shards in a stream</p>
702
+ * <p>Scale up to more than 10000 shards in a stream</p>
690
703
  * </li>
691
704
  * <li>
692
- * <p>Scale a stream with more than 500 shards down unless the result is less
693
- * than 500 shards</p>
705
+ * <p>Scale a stream with more than 10000 shards down unless the result is less than
706
+ * 10000 shards</p>
694
707
  * </li>
695
708
  * <li>
696
709
  * <p>Scale up to more than the shard limit for your account</p>
697
710
  * </li>
698
711
  * </ul>
699
- * <p>For the default limits for an AWS account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Streams Limits</a> in the
700
- * <i>Amazon Kinesis Data Streams Developer Guide</i>. To request an
701
- * increase in the call rate limit, the shard limit for this API, or your overall shard
702
- * limit, use the <a href="https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis">limits form</a>.</p>
712
+ * <p>For the default limits for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Streams
713
+ * Limits</a> in the <i>Amazon Kinesis Data Streams Developer
714
+ * Guide</i>. To request an increase in the call rate limit, the shard limit for
715
+ * this API, or your overall shard limit, use the <a href="https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis">limits form</a>.</p>
703
716
  */
704
717
  updateShardCount(args: UpdateShardCountCommandInput, options?: __HttpHandlerOptions): Promise<UpdateShardCountCommandOutput>;
705
718
  updateShardCount(args: UpdateShardCountCommandInput, cb: (err: any, data?: UpdateShardCountCommandOutput) => void): void;
706
719
  updateShardCount(args: UpdateShardCountCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: UpdateShardCountCommandOutput) => void): void;
720
+ /**
721
+ * <p> Updates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you
722
+ * can choose between an <b>on-demand</b> capacity mode and a
723
+ * <b>provisioned</b> capacity mode for your data stream.
724
+ * </p>
725
+ */
726
+ updateStreamMode(args: UpdateStreamModeCommandInput, options?: __HttpHandlerOptions): Promise<UpdateStreamModeCommandOutput>;
727
+ updateStreamMode(args: UpdateStreamModeCommandInput, cb: (err: any, data?: UpdateStreamModeCommandOutput) => void): void;
728
+ updateStreamMode(args: UpdateStreamModeCommandInput, options: __HttpHandlerOptions, cb: (err: any, data?: UpdateStreamModeCommandOutput) => void): void;
707
729
  }
@@ -35,8 +35,9 @@ import { StartStreamEncryptionCommandInput, StartStreamEncryptionCommandOutput }
35
35
  import { StopStreamEncryptionCommandInput, StopStreamEncryptionCommandOutput } from "./commands/StopStreamEncryptionCommand";
36
36
  import { SubscribeToShardCommandInput, SubscribeToShardCommandOutput } from "./commands/SubscribeToShardCommand";
37
37
  import { UpdateShardCountCommandInput, UpdateShardCountCommandOutput } from "./commands/UpdateShardCountCommand";
38
- export declare type ServiceInputTypes = AddTagsToStreamCommandInput | CreateStreamCommandInput | DecreaseStreamRetentionPeriodCommandInput | DeleteStreamCommandInput | DeregisterStreamConsumerCommandInput | DescribeLimitsCommandInput | DescribeStreamCommandInput | DescribeStreamConsumerCommandInput | DescribeStreamSummaryCommandInput | DisableEnhancedMonitoringCommandInput | EnableEnhancedMonitoringCommandInput | GetRecordsCommandInput | GetShardIteratorCommandInput | IncreaseStreamRetentionPeriodCommandInput | ListShardsCommandInput | ListStreamConsumersCommandInput | ListStreamsCommandInput | ListTagsForStreamCommandInput | MergeShardsCommandInput | PutRecordCommandInput | PutRecordsCommandInput | RegisterStreamConsumerCommandInput | RemoveTagsFromStreamCommandInput | SplitShardCommandInput | StartStreamEncryptionCommandInput | StopStreamEncryptionCommandInput | SubscribeToShardCommandInput | UpdateShardCountCommandInput;
39
- export declare type ServiceOutputTypes = AddTagsToStreamCommandOutput | CreateStreamCommandOutput | DecreaseStreamRetentionPeriodCommandOutput | DeleteStreamCommandOutput | DeregisterStreamConsumerCommandOutput | DescribeLimitsCommandOutput | DescribeStreamCommandOutput | DescribeStreamConsumerCommandOutput | DescribeStreamSummaryCommandOutput | DisableEnhancedMonitoringCommandOutput | EnableEnhancedMonitoringCommandOutput | GetRecordsCommandOutput | GetShardIteratorCommandOutput | IncreaseStreamRetentionPeriodCommandOutput | ListShardsCommandOutput | ListStreamConsumersCommandOutput | ListStreamsCommandOutput | ListTagsForStreamCommandOutput | MergeShardsCommandOutput | PutRecordCommandOutput | PutRecordsCommandOutput | RegisterStreamConsumerCommandOutput | RemoveTagsFromStreamCommandOutput | SplitShardCommandOutput | StartStreamEncryptionCommandOutput | StopStreamEncryptionCommandOutput | SubscribeToShardCommandOutput | UpdateShardCountCommandOutput;
38
+ import { UpdateStreamModeCommandInput, UpdateStreamModeCommandOutput } from "./commands/UpdateStreamModeCommand";
39
+ export declare type ServiceInputTypes = AddTagsToStreamCommandInput | CreateStreamCommandInput | DecreaseStreamRetentionPeriodCommandInput | DeleteStreamCommandInput | DeregisterStreamConsumerCommandInput | DescribeLimitsCommandInput | DescribeStreamCommandInput | DescribeStreamConsumerCommandInput | DescribeStreamSummaryCommandInput | DisableEnhancedMonitoringCommandInput | EnableEnhancedMonitoringCommandInput | GetRecordsCommandInput | GetShardIteratorCommandInput | IncreaseStreamRetentionPeriodCommandInput | ListShardsCommandInput | ListStreamConsumersCommandInput | ListStreamsCommandInput | ListTagsForStreamCommandInput | MergeShardsCommandInput | PutRecordCommandInput | PutRecordsCommandInput | RegisterStreamConsumerCommandInput | RemoveTagsFromStreamCommandInput | SplitShardCommandInput | StartStreamEncryptionCommandInput | StopStreamEncryptionCommandInput | SubscribeToShardCommandInput | UpdateShardCountCommandInput | UpdateStreamModeCommandInput;
40
+ export declare type ServiceOutputTypes = AddTagsToStreamCommandOutput | CreateStreamCommandOutput | DecreaseStreamRetentionPeriodCommandOutput | DeleteStreamCommandOutput | DeregisterStreamConsumerCommandOutput | DescribeLimitsCommandOutput | DescribeStreamCommandOutput | DescribeStreamConsumerCommandOutput | DescribeStreamSummaryCommandOutput | DisableEnhancedMonitoringCommandOutput | EnableEnhancedMonitoringCommandOutput | GetRecordsCommandOutput | GetShardIteratorCommandOutput | IncreaseStreamRetentionPeriodCommandOutput | ListShardsCommandOutput | ListStreamConsumersCommandOutput | ListStreamsCommandOutput | ListTagsForStreamCommandOutput | MergeShardsCommandOutput | PutRecordCommandOutput | PutRecordsCommandOutput | RegisterStreamConsumerCommandOutput | RemoveTagsFromStreamCommandOutput | SplitShardCommandOutput | StartStreamEncryptionCommandOutput | StopStreamEncryptionCommandOutput | SubscribeToShardCommandOutput | UpdateShardCountCommandOutput | UpdateStreamModeCommandOutput;
40
41
  export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> {
41
42
  /**
42
43
  * The HTTP handler to use. Fetch in browser and Https in Nodejs.
@@ -156,8 +157,8 @@ export interface KinesisClientResolvedConfig extends KinesisClientResolvedConfig
156
157
  }
157
158
  /**
158
159
  * <fullname>Amazon Kinesis Data Streams Service API Reference</fullname>
159
- * <p>Amazon Kinesis Data Streams is a managed service that scales elastically for
160
- * real-time processing of streaming big data.</p>
160
+ * <p>Amazon Kinesis Data Streams is a managed service that scales elastically for real-time
161
+ * processing of streaming big data.</p>
161
162
  */
162
163
  export declare class KinesisClient extends __Client<__HttpHandlerOptions, ServiceInputTypes, ServiceOutputTypes, KinesisClientResolvedConfig> {
163
164
  /**