cdk-lambda-subminute 2.0.250 → 2.0.251
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +3 -3
- package/lib/cdk-lambda-subminute.js +3 -3
- package/node_modules/aws-sdk/CHANGELOG.md +7 -1
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.examples.json +0 -77
- package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.min.json +8 -2
- package/node_modules/aws-sdk/apis/firehose-2015-08-04.min.json +69 -30
- package/node_modules/aws-sdk/apis/iot-2015-05-28.min.json +142 -128
- package/node_modules/aws-sdk/clients/cognitoidentityserviceprovider.d.ts +1 -1
- package/node_modules/aws-sdk/clients/firehose.d.ts +59 -9
- package/node_modules/aws-sdk/clients/iot.d.ts +46 -29
- package/node_modules/aws-sdk/clients/textract.d.ts +5 -5
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +4 -4
- package/node_modules/aws-sdk/dist/aws-sdk.js +222 -163
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +84 -84
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +3 -3
@@ -943,7 +943,7 @@ declare namespace CognitoIdentityServiceProvider {
|
|
943
943
|
*/
|
944
944
|
UserPoolId: UserPoolIdType;
|
945
945
|
/**
|
946
|
-
* The username
|
946
|
+
* The value that you want to set as the username sign-in attribute. The following conditions apply to the username parameter. The username can't be a duplicate of another username in the same user pool. You can't change the value of a username after you create it. You can only provide a value if usernames are a valid sign-in attribute for your user pool. If your user pool only supports phone numbers or email addresses as sign-in attributes, Amazon Cognito automatically generates a username value. For more information, see Customizing sign-in attributes.
|
947
947
|
*/
|
948
948
|
Username: UsernameType;
|
949
949
|
/**
|
@@ -52,27 +52,27 @@ declare class Firehose extends Service {
|
|
52
52
|
*/
|
53
53
|
listTagsForDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
|
54
54
|
/**
|
55
|
-
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException,
|
55
|
+
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
56
56
|
*/
|
57
57
|
putRecord(params: Firehose.Types.PutRecordInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
|
58
58
|
/**
|
59
|
-
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException,
|
59
|
+
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
60
60
|
*/
|
61
61
|
putRecord(callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
|
62
62
|
/**
|
63
|
-
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException,
|
63
|
+
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
64
64
|
*/
|
65
65
|
putRecordBatch(params: Firehose.Types.PutRecordBatchInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
|
66
66
|
/**
|
67
|
-
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException,
|
67
|
+
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
68
68
|
*/
|
69
69
|
putRecordBatch(callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
|
70
70
|
/**
|
71
|
-
* Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
|
71
|
+
* Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
|
72
72
|
*/
|
73
73
|
startDeliveryStreamEncryption(params: Firehose.Types.StartDeliveryStreamEncryptionInput, callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
|
74
74
|
/**
|
75
|
-
* Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
|
75
|
+
* Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
|
76
76
|
*/
|
77
77
|
startDeliveryStreamEncryption(callback?: (err: AWSError, data: Firehose.Types.StartDeliveryStreamEncryptionOutput) => void): Request<Firehose.Types.StartDeliveryStreamEncryptionOutput, AWSError>;
|
78
78
|
/**
|
@@ -375,6 +375,16 @@ declare namespace Firehose {
|
|
375
375
|
}
|
376
376
|
export type AmazonopensearchserviceS3BackupMode = "FailedDocumentsOnly"|"AllDocuments"|string;
|
377
377
|
export type AmazonopensearchserviceTypeName = string;
|
378
|
+
export interface AuthenticationConfiguration {
|
379
|
+
/**
|
380
|
+
* The ARN of the role used to access the Amazon MSK cluster.
|
381
|
+
*/
|
382
|
+
RoleARN: RoleARN;
|
383
|
+
/**
|
384
|
+
* The type of connectivity used to access the Amazon MSK cluster.
|
385
|
+
*/
|
386
|
+
Connectivity: Connectivity;
|
387
|
+
}
|
378
388
|
export type BlockSizeBytes = number;
|
379
389
|
export type BooleanObject = boolean;
|
380
390
|
export type BucketARN = string;
|
@@ -405,6 +415,7 @@ declare namespace Firehose {
|
|
405
415
|
export type ClusterJDBCURL = string;
|
406
416
|
export type ColumnToJsonKeyMappings = {[key: string]: NonEmptyString};
|
407
417
|
export type CompressionFormat = "UNCOMPRESSED"|"GZIP"|"ZIP"|"Snappy"|"HADOOP_SNAPPY"|string;
|
418
|
+
export type Connectivity = "PUBLIC"|"PRIVATE"|string;
|
408
419
|
export type ContentEncoding = "NONE"|"GZIP"|string;
|
409
420
|
export interface CopyCommand {
|
410
421
|
/**
|
@@ -474,6 +485,7 @@ declare namespace Firehose {
|
|
474
485
|
* The destination in the Serverless offering for Amazon OpenSearch Service. You can specify only one destination.
|
475
486
|
*/
|
476
487
|
AmazonOpenSearchServerlessDestinationConfiguration?: AmazonOpenSearchServerlessDestinationConfiguration;
|
488
|
+
MSKSourceConfiguration?: MSKSourceConfiguration;
|
477
489
|
}
|
478
490
|
export interface CreateDeliveryStreamOutput {
|
479
491
|
/**
|
@@ -600,7 +612,7 @@ declare namespace Firehose {
|
|
600
612
|
export type DeliveryStreamName = string;
|
601
613
|
export type DeliveryStreamNameList = DeliveryStreamName[];
|
602
614
|
export type DeliveryStreamStatus = "CREATING"|"CREATING_FAILED"|"DELETING"|"DELETING_FAILED"|"ACTIVE"|string;
|
603
|
-
export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|string;
|
615
|
+
export type DeliveryStreamType = "DirectPut"|"KinesisStreamAsSource"|"MSKAsSource"|string;
|
604
616
|
export type DeliveryStreamVersionId = string;
|
605
617
|
export interface DescribeDeliveryStreamInput {
|
606
618
|
/**
|
@@ -1326,6 +1338,39 @@ declare namespace Firehose {
|
|
1326
1338
|
export type ListTagsForDeliveryStreamOutputTagList = Tag[];
|
1327
1339
|
export type LogGroupName = string;
|
1328
1340
|
export type LogStreamName = string;
|
1341
|
+
export type MSKClusterARN = string;
|
1342
|
+
export interface MSKSourceConfiguration {
|
1343
|
+
/**
|
1344
|
+
* The ARN of the Amazon MSK cluster.
|
1345
|
+
*/
|
1346
|
+
MSKClusterARN: MSKClusterARN;
|
1347
|
+
/**
|
1348
|
+
* The topic name within the Amazon MSK cluster.
|
1349
|
+
*/
|
1350
|
+
TopicName: TopicName;
|
1351
|
+
/**
|
1352
|
+
* The authentication configuration of the Amazon MSK cluster.
|
1353
|
+
*/
|
1354
|
+
AuthenticationConfiguration: AuthenticationConfiguration;
|
1355
|
+
}
|
1356
|
+
export interface MSKSourceDescription {
|
1357
|
+
/**
|
1358
|
+
* The ARN of the Amazon MSK cluster.
|
1359
|
+
*/
|
1360
|
+
MSKClusterARN?: MSKClusterARN;
|
1361
|
+
/**
|
1362
|
+
* The topic name within the Amazon MSK cluster.
|
1363
|
+
*/
|
1364
|
+
TopicName?: TopicName;
|
1365
|
+
/**
|
1366
|
+
* The authentication configuration of the Amazon MSK cluster.
|
1367
|
+
*/
|
1368
|
+
AuthenticationConfiguration?: AuthenticationConfiguration;
|
1369
|
+
/**
|
1370
|
+
* Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.
|
1371
|
+
*/
|
1372
|
+
DeliveryStartTimestamp?: DeliveryStartTimestamp;
|
1373
|
+
}
|
1329
1374
|
export type NoEncryptionConfig = "NoEncryption"|string;
|
1330
1375
|
export type NonEmptyString = string;
|
1331
1376
|
export type NonEmptyStringWithoutWhitespace = string;
|
@@ -1459,9 +1504,9 @@ declare namespace Firehose {
|
|
1459
1504
|
ParameterValue: ProcessorParameterValue;
|
1460
1505
|
}
|
1461
1506
|
export type ProcessorParameterList = ProcessorParameter[];
|
1462
|
-
export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"MetadataExtractionQuery"|"JsonParsingEngine"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|"SubRecordType"|"Delimiter"|string;
|
1507
|
+
export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"MetadataExtractionQuery"|"JsonParsingEngine"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|"SubRecordType"|"Delimiter"|"CompressionFormat"|string;
|
1463
1508
|
export type ProcessorParameterValue = string;
|
1464
|
-
export type ProcessorType = "RecordDeAggregation"|"Lambda"|"MetadataExtraction"|"AppendDelimiterToRecord"|string;
|
1509
|
+
export type ProcessorType = "RecordDeAggregation"|"Decompression"|"Lambda"|"MetadataExtraction"|"AppendDelimiterToRecord"|string;
|
1465
1510
|
export type Proportion = number;
|
1466
1511
|
export interface PutRecordBatchInput {
|
1467
1512
|
/**
|
@@ -1826,6 +1871,10 @@ declare namespace Firehose {
|
|
1826
1871
|
* The KinesisStreamSourceDescription value for the source Kinesis data stream.
|
1827
1872
|
*/
|
1828
1873
|
KinesisStreamSourceDescription?: KinesisStreamSourceDescription;
|
1874
|
+
/**
|
1875
|
+
* The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.
|
1876
|
+
*/
|
1877
|
+
MSKSourceDescription?: MSKSourceDescription;
|
1829
1878
|
}
|
1830
1879
|
export interface SplunkDestinationConfiguration {
|
1831
1880
|
/**
|
@@ -1997,6 +2046,7 @@ declare namespace Firehose {
|
|
1997
2046
|
export type TagKeyList = TagKey[];
|
1998
2047
|
export type TagValue = string;
|
1999
2048
|
export type Timestamp = Date;
|
2049
|
+
export type TopicName = string;
|
2000
2050
|
export interface UntagDeliveryStreamInput {
|
2001
2051
|
/**
|
2002
2052
|
* The name of the delivery stream.
|
@@ -164,11 +164,11 @@ declare class Iot extends Service {
|
|
164
164
|
*/
|
165
165
|
createBillingGroup(callback?: (err: AWSError, data: Iot.Types.CreateBillingGroupResponse) => void): Request<Iot.Types.CreateBillingGroupResponse, AWSError>;
|
166
166
|
/**
|
167
|
-
* Creates an X.509 certificate using the specified certificate signing request. Requires permission to access the CreateCertificateFromCsr action. The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256 or NIST P-
|
167
|
+
* Creates an X.509 certificate using the specified certificate signing request. Requires permission to access the CreateCertificateFromCsr action. The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256, NIST P-384, or NIST P-521 curves. For supported certificates, consult Certificate signing algorithms supported by IoT. Reusing the same certificate signing request (CSR) results in a distinct certificate. You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs. In the following commands, we assume that a set of CSRs are located inside of the directory my-csr-directory: On Linux and OS X, the command is: $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{} This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr Amazon Web Services CLI command to create a certificate for the corresponding CSR. You can also run the aws iot create-certificate-from-csr part of the command in parallel to speed up the certificate creation process: $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{} On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is: > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_} On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is: > forfiles /p my-csr-directory /c "cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path"
|
168
168
|
*/
|
169
169
|
createCertificateFromCsr(params: Iot.Types.CreateCertificateFromCsrRequest, callback?: (err: AWSError, data: Iot.Types.CreateCertificateFromCsrResponse) => void): Request<Iot.Types.CreateCertificateFromCsrResponse, AWSError>;
|
170
170
|
/**
|
171
|
-
* Creates an X.509 certificate using the specified certificate signing request. Requires permission to access the CreateCertificateFromCsr action. The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256 or NIST P-
|
171
|
+
* Creates an X.509 certificate using the specified certificate signing request. Requires permission to access the CreateCertificateFromCsr action. The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256, NIST P-384, or NIST P-521 curves. For supported certificates, consult Certificate signing algorithms supported by IoT. Reusing the same certificate signing request (CSR) results in a distinct certificate. You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs. In the following commands, we assume that a set of CSRs are located inside of the directory my-csr-directory: On Linux and OS X, the command is: $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{} This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr Amazon Web Services CLI command to create a certificate for the corresponding CSR. You can also run the aws iot create-certificate-from-csr part of the command in parallel to speed up the certificate creation process: $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{} On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is: > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_} On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is: > forfiles /p my-csr-directory /c "cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path"
|
172
172
|
*/
|
173
173
|
createCertificateFromCsr(callback?: (err: AWSError, data: Iot.Types.CreateCertificateFromCsrResponse) => void): Request<Iot.Types.CreateCertificateFromCsrResponse, AWSError>;
|
174
174
|
/**
|
@@ -516,11 +516,11 @@ declare class Iot extends Service {
|
|
516
516
|
*/
|
517
517
|
deletePackage(callback?: (err: AWSError, data: Iot.Types.DeletePackageResponse) => void): Request<Iot.Types.DeletePackageResponse, AWSError>;
|
518
518
|
/**
|
519
|
-
* Deletes a specific version from a software package. Note: If a package version is designated as default, you must remove the designation from the package using the UpdatePackage action.
|
519
|
+
* Deletes a specific version from a software package. Note: If a package version is designated as default, you must remove the designation from the software package using the UpdatePackage action.
|
520
520
|
*/
|
521
521
|
deletePackageVersion(params: Iot.Types.DeletePackageVersionRequest, callback?: (err: AWSError, data: Iot.Types.DeletePackageVersionResponse) => void): Request<Iot.Types.DeletePackageVersionResponse, AWSError>;
|
522
522
|
/**
|
523
|
-
* Deletes a specific version from a software package. Note: If a package version is designated as default, you must remove the designation from the package using the UpdatePackage action.
|
523
|
+
* Deletes a specific version from a software package. Note: If a package version is designated as default, you must remove the designation from the software package using the UpdatePackage action.
|
524
524
|
*/
|
525
525
|
deletePackageVersion(callback?: (err: AWSError, data: Iot.Types.DeletePackageVersionResponse) => void): Request<Iot.Types.DeletePackageVersionResponse, AWSError>;
|
526
526
|
/**
|
@@ -1908,19 +1908,19 @@ declare class Iot extends Service {
|
|
1908
1908
|
*/
|
1909
1909
|
updateMitigationAction(callback?: (err: AWSError, data: Iot.Types.UpdateMitigationActionResponse) => void): Request<Iot.Types.UpdateMitigationActionResponse, AWSError>;
|
1910
1910
|
/**
|
1911
|
-
* Updates the supported fields for a specific package. Requires permission to access the UpdatePackage and GetIndexingConfiguration actions.
|
1911
|
+
* Updates the supported fields for a specific software package. Requires permission to access the UpdatePackage and GetIndexingConfiguration actions.
|
1912
1912
|
*/
|
1913
1913
|
updatePackage(params: Iot.Types.UpdatePackageRequest, callback?: (err: AWSError, data: Iot.Types.UpdatePackageResponse) => void): Request<Iot.Types.UpdatePackageResponse, AWSError>;
|
1914
1914
|
/**
|
1915
|
-
* Updates the supported fields for a specific package. Requires permission to access the UpdatePackage and GetIndexingConfiguration actions.
|
1915
|
+
* Updates the supported fields for a specific software package. Requires permission to access the UpdatePackage and GetIndexingConfiguration actions.
|
1916
1916
|
*/
|
1917
1917
|
updatePackage(callback?: (err: AWSError, data: Iot.Types.UpdatePackageResponse) => void): Request<Iot.Types.UpdatePackageResponse, AWSError>;
|
1918
1918
|
/**
|
1919
|
-
* Updates the package configuration. Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions.
|
1919
|
+
* Updates the software package configuration. Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions.
|
1920
1920
|
*/
|
1921
1921
|
updatePackageConfiguration(params: Iot.Types.UpdatePackageConfigurationRequest, callback?: (err: AWSError, data: Iot.Types.UpdatePackageConfigurationResponse) => void): Request<Iot.Types.UpdatePackageConfigurationResponse, AWSError>;
|
1922
1922
|
/**
|
1923
|
-
* Updates the package configuration. Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions.
|
1923
|
+
* Updates the software package configuration. Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions.
|
1924
1924
|
*/
|
1925
1925
|
updatePackageConfiguration(callback?: (err: AWSError, data: Iot.Types.UpdatePackageConfigurationResponse) => void): Request<Iot.Types.UpdatePackageConfigurationResponse, AWSError>;
|
1926
1926
|
/**
|
@@ -2847,7 +2847,7 @@ declare namespace Iot {
|
|
2847
2847
|
*/
|
2848
2848
|
metricDimension?: MetricDimension;
|
2849
2849
|
/**
|
2850
|
-
* The criteria that determine if a device is behaving normally in regard to the metric.
|
2850
|
+
* The criteria that determine if a device is behaving normally in regard to the metric. In the IoT console, you can choose to be sent an alert through Amazon SNS when IoT Device Defender detects that a device is behaving anomalously.
|
2851
2851
|
*/
|
2852
2852
|
criteria?: BehaviorCriteria;
|
2853
2853
|
/**
|
@@ -3764,7 +3764,7 @@ declare namespace Iot {
|
|
3764
3764
|
*/
|
3765
3765
|
jobArn?: JobArn;
|
3766
3766
|
/**
|
3767
|
-
* An S3 link to the job document
|
3767
|
+
* An S3 link, or S3 object URL, to the job document. The link is an Amazon S3 object URL and is required if you don't specify a value for document. For example, --document-source https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0 For more information, see Methods for accessing a bucket.
|
3768
3768
|
*/
|
3769
3769
|
documentSource?: JobDocumentSource;
|
3770
3770
|
/**
|
@@ -3904,7 +3904,7 @@ declare namespace Iot {
|
|
3904
3904
|
*/
|
3905
3905
|
roleArn: RoleArn;
|
3906
3906
|
/**
|
3907
|
-
* A list of additional OTA update parameters which are name-value pairs.
|
3907
|
+
* A list of additional OTA update parameters, which are name-value pairs. They won't be sent to devices as a part of the Job document.
|
3908
3908
|
*/
|
3909
3909
|
additionalParameters?: AdditionalParameterMap;
|
3910
3910
|
/**
|
@@ -3936,7 +3936,7 @@ declare namespace Iot {
|
|
3936
3936
|
}
|
3937
3937
|
export interface CreatePackageRequest {
|
3938
3938
|
/**
|
3939
|
-
* The name of the new package.
|
3939
|
+
* The name of the new software package.
|
3940
3940
|
*/
|
3941
3941
|
packageName: PackageName;
|
3942
3942
|
/**
|
@@ -3954,7 +3954,7 @@ declare namespace Iot {
|
|
3954
3954
|
}
|
3955
3955
|
export interface CreatePackageResponse {
|
3956
3956
|
/**
|
3957
|
-
* The name of the package.
|
3957
|
+
* The name of the software package.
|
3958
3958
|
*/
|
3959
3959
|
packageName?: PackageName;
|
3960
3960
|
/**
|
@@ -3968,7 +3968,7 @@ declare namespace Iot {
|
|
3968
3968
|
}
|
3969
3969
|
export interface CreatePackageVersionRequest {
|
3970
3970
|
/**
|
3971
|
-
* The name of the associated package.
|
3971
|
+
* The name of the associated software package.
|
3972
3972
|
*/
|
3973
3973
|
packageName: PackageName;
|
3974
3974
|
/**
|
@@ -3998,7 +3998,7 @@ declare namespace Iot {
|
|
3998
3998
|
*/
|
3999
3999
|
packageVersionArn?: PackageVersionArn;
|
4000
4000
|
/**
|
4001
|
-
* The name of the associated package.
|
4001
|
+
* The name of the associated software package.
|
4002
4002
|
*/
|
4003
4003
|
packageName?: PackageName;
|
4004
4004
|
/**
|
@@ -4647,7 +4647,7 @@ declare namespace Iot {
|
|
4647
4647
|
}
|
4648
4648
|
export interface DeletePackageRequest {
|
4649
4649
|
/**
|
4650
|
-
* The name of the target package.
|
4650
|
+
* The name of the target software package.
|
4651
4651
|
*/
|
4652
4652
|
packageName: PackageName;
|
4653
4653
|
/**
|
@@ -4659,7 +4659,7 @@ declare namespace Iot {
|
|
4659
4659
|
}
|
4660
4660
|
export interface DeletePackageVersionRequest {
|
4661
4661
|
/**
|
4662
|
-
* The name of the associated package.
|
4662
|
+
* The name of the associated software package.
|
4663
4663
|
*/
|
4664
4664
|
packageName: PackageName;
|
4665
4665
|
/**
|
@@ -5150,7 +5150,7 @@ declare namespace Iot {
|
|
5150
5150
|
}
|
5151
5151
|
export interface DescribeEndpointRequest {
|
5152
5152
|
/**
|
5153
|
-
* The endpoint type. Valid endpoint types include: iot:Data - Returns a VeriSign signed data endpoint. iot:Data-ATS - Returns an ATS signed data endpoint. iot:CredentialProvider - Returns an IoT credentials provider API endpoint. iot:Jobs - Returns an IoT device management Jobs API endpoint. We strongly recommend that customers use the newer iot:Data-ATS endpoint type to avoid issues related to the widespread distrust of Symantec certificate authorities.
|
5153
|
+
* The endpoint type. Valid endpoint types include: iot:Data - Returns a VeriSign signed data endpoint. iot:Data-ATS - Returns an ATS signed data endpoint. iot:CredentialProvider - Returns an IoT credentials provider API endpoint. iot:Jobs - Returns an IoT device management Jobs API endpoint. We strongly recommend that customers use the newer iot:Data-ATS endpoint type to avoid issues related to the widespread distrust of Symantec certificate authorities. ATS Signed Certificates are more secure and are trusted by most popular browsers.
|
5154
5154
|
*/
|
5155
5155
|
endpointType?: EndpointType;
|
5156
5156
|
}
|
@@ -6400,13 +6400,13 @@ declare namespace Iot {
|
|
6400
6400
|
}
|
6401
6401
|
export interface GetPackageRequest {
|
6402
6402
|
/**
|
6403
|
-
* The name of the target package.
|
6403
|
+
* The name of the target software package.
|
6404
6404
|
*/
|
6405
6405
|
packageName: PackageName;
|
6406
6406
|
}
|
6407
6407
|
export interface GetPackageResponse {
|
6408
6408
|
/**
|
6409
|
-
* The name of the package.
|
6409
|
+
* The name of the software package.
|
6410
6410
|
*/
|
6411
6411
|
packageName?: PackageName;
|
6412
6412
|
/**
|
@@ -6446,7 +6446,7 @@ declare namespace Iot {
|
|
6446
6446
|
*/
|
6447
6447
|
packageVersionArn?: PackageVersionArn;
|
6448
6448
|
/**
|
6449
|
-
* The name of the package.
|
6449
|
+
* The name of the software package.
|
6450
6450
|
*/
|
6451
6451
|
packageName?: PackageName;
|
6452
6452
|
/**
|
@@ -7181,7 +7181,24 @@ declare namespace Iot {
|
|
7181
7181
|
* Properties of the Apache Kafka producer client.
|
7182
7182
|
*/
|
7183
7183
|
clientProperties: ClientProperties;
|
7184
|
+
/**
|
7185
|
+
* The list of Kafka headers that you specify.
|
7186
|
+
*/
|
7187
|
+
headers?: KafkaHeaders;
|
7188
|
+
}
|
7189
|
+
export interface KafkaActionHeader {
|
7190
|
+
/**
|
7191
|
+
* The key of the Kafka header.
|
7192
|
+
*/
|
7193
|
+
key: KafkaHeaderKey;
|
7194
|
+
/**
|
7195
|
+
* The value of the Kafka header.
|
7196
|
+
*/
|
7197
|
+
value: KafkaHeaderValue;
|
7184
7198
|
}
|
7199
|
+
export type KafkaHeaderKey = string;
|
7200
|
+
export type KafkaHeaderValue = string;
|
7201
|
+
export type KafkaHeaders = KafkaActionHeader[];
|
7185
7202
|
export type Key = string;
|
7186
7203
|
export type KeyName = string;
|
7187
7204
|
export interface KeyPair {
|
@@ -8034,7 +8051,7 @@ declare namespace Iot {
|
|
8034
8051
|
}
|
8035
8052
|
export interface ListPackageVersionsRequest {
|
8036
8053
|
/**
|
8037
|
-
* The name of the target package.
|
8054
|
+
* The name of the target software package.
|
8038
8055
|
*/
|
8039
8056
|
packageName: PackageName;
|
8040
8057
|
/**
|
@@ -8886,7 +8903,7 @@ declare namespace Iot {
|
|
8886
8903
|
}
|
8887
8904
|
export type LogTargetConfigurations = LogTargetConfiguration[];
|
8888
8905
|
export type LogTargetName = string;
|
8889
|
-
export type LogTargetType = "DEFAULT"|"THING_GROUP"|"CLIENT_ID"|"SOURCE_IP"|"PRINCIPAL_ID"|string;
|
8906
|
+
export type LogTargetType = "DEFAULT"|"THING_GROUP"|"CLIENT_ID"|"SOURCE_IP"|"PRINCIPAL_ID"|"EVENT_TYPE"|"DEVICE_DEFENDER"|string;
|
8890
8907
|
export interface LoggingOptionsPayload {
|
8891
8908
|
/**
|
8892
8909
|
* The ARN of the IAM role that grants access.
|
@@ -9172,7 +9189,7 @@ declare namespace Iot {
|
|
9172
9189
|
*/
|
9173
9190
|
codeSigning?: CodeSigning;
|
9174
9191
|
/**
|
9175
|
-
* A list of name
|
9192
|
+
* A list of name-attribute pairs. They won't be sent to devices as a part of the Job document.
|
9176
9193
|
*/
|
9177
9194
|
attributes?: AttributesMap;
|
9178
9195
|
}
|
@@ -9318,7 +9335,7 @@ declare namespace Iot {
|
|
9318
9335
|
export type PackageName = string;
|
9319
9336
|
export interface PackageSummary {
|
9320
9337
|
/**
|
9321
|
-
* The name for the target package.
|
9338
|
+
* The name for the target software package.
|
9322
9339
|
*/
|
9323
9340
|
packageName?: PackageName;
|
9324
9341
|
/**
|
@@ -10020,11 +10037,11 @@ declare namespace Iot {
|
|
10020
10037
|
export type ScheduledJobRolloutList = ScheduledJobRollout[];
|
10021
10038
|
export interface SchedulingConfig {
|
10022
10039
|
/**
|
10023
|
-
* The time a job will begin rollout of the job document to all devices in the target group for a job. The startTime can be scheduled up to a year in advance and must be scheduled a minimum of thirty minutes from the current time. The date and time format for the startTime is YYYY-MM-DD for the date and HH:MM for the time.
|
10040
|
+
* The time a job will begin rollout of the job document to all devices in the target group for a job. The startTime can be scheduled up to a year in advance and must be scheduled a minimum of thirty minutes from the current time. The date and time format for the startTime is YYYY-MM-DD for the date and HH:MM for the time. For more information on the syntax for startTime when using an API command or the Command Line Interface, see Timestamp.
|
10024
10041
|
*/
|
10025
10042
|
startTime?: StringDateTime;
|
10026
10043
|
/**
|
10027
|
-
* The time a job will stop rollout of the job document to all devices in the target group for a job. The endTime must take place no later than two years from the current time and be scheduled a minimum of thirty minutes from the current time. The minimum duration between startTime and endTime is thirty minutes. The maximum duration between startTime and endTime is two years. The date and time format for the endTime is YYYY-MM-DD for the date and HH:MM for the time.
|
10044
|
+
* The time a job will stop rollout of the job document to all devices in the target group for a job. The endTime must take place no later than two years from the current time and be scheduled a minimum of thirty minutes from the current time. The minimum duration between startTime and endTime is thirty minutes. The maximum duration between startTime and endTime is two years. The date and time format for the endTime is YYYY-MM-DD for the date and HH:MM for the time. For more information on the syntax for endTime when using an API command or the Command Line Interface, see Timestamp.
|
10028
10045
|
*/
|
10029
10046
|
endTime?: StringDateTime;
|
10030
10047
|
/**
|
@@ -11625,7 +11642,7 @@ declare namespace Iot {
|
|
11625
11642
|
}
|
11626
11643
|
export interface UpdatePackageRequest {
|
11627
11644
|
/**
|
11628
|
-
* The name of the target package.
|
11645
|
+
* The name of the target software package.
|
11629
11646
|
*/
|
11630
11647
|
packageName: PackageName;
|
11631
11648
|
/**
|
@@ -11661,7 +11678,7 @@ declare namespace Iot {
|
|
11661
11678
|
*/
|
11662
11679
|
description?: ResourceDescription;
|
11663
11680
|
/**
|
11664
|
-
* Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet. Note: Attributes can be updated only when the package version is in a draft state. The combined size of all the attributes on a package version is limited to 3KB.
|
11681
|
+
* Metadata that can be used to define a package version’s configuration. For example, the Amazon S3 file location, configuration options that are being sent to the device or fleet. Note: Attributes can be updated only when the package version is in a draft state. The combined size of all the attributes on a package version is limited to 3KB.
|
11665
11682
|
*/
|
11666
11683
|
attributes?: ResourceAttributes;
|
11667
11684
|
/**
|
@@ -123,7 +123,7 @@ declare namespace Textract {
|
|
123
123
|
*/
|
124
124
|
Document: Document;
|
125
125
|
/**
|
126
|
-
* A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. Add SIGNATURES to return the locations of detected signatures. To perform both forms and table analysis, add TABLES and FORMS to FeatureTypes. To detect signatures within form data and table data, add SIGNATURES to either TABLES or FORMS. All lines and words detected in the document are included in the response (including text that isn't related to the value of FeatureTypes).
|
126
|
+
* A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. Add SIGNATURES to return the locations of detected signatures. Add LAYOUT to the list to return information about the layout of the document. To perform both forms and table analysis, add TABLES and FORMS to FeatureTypes. To detect signatures within the document and within form data and table data, add SIGNATURES to either TABLES or FORMS. All lines and words detected in the document are included in the response (including text that isn't related to the value of FeatureTypes).
|
127
127
|
*/
|
128
128
|
FeatureTypes: FeatureTypes;
|
129
129
|
/**
|
@@ -196,7 +196,7 @@ declare namespace Textract {
|
|
196
196
|
}
|
197
197
|
export interface Block {
|
198
198
|
/**
|
199
|
-
* The type of text item that's recognized. In operations for text detection, the following types are returned: PAGE - Contains a list of the LINE Block objects that are detected on a document page. WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. In text analysis operations, the following types are returned: PAGE - Contains a list of child Block objects that are detected on a document page. KEY_VALUE_SET - Stores the KEY and VALUE Block objects for linked text that's detected on a document page. Use the EntityType field to determine if a KEY_VALUE_SET object is a KEY Block object or a VALUE Block object. WORD - A word that's detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. TABLE - A table that's detected on a document page. A table is grid-based information with two or more rows or columns, with a cell span of one row and one column each. TABLE_TITLE - The title of a table. A title is typically a line of text above or below a table, or embedded as the first row of a table. TABLE_FOOTER - The footer associated with a table. A footer is typically a line or lines of text below a table or embedded as the last row of a table. CELL - A cell within a detected table. The cell is the parent of the block that contains the text in the cell. MERGED_CELL - A cell in a table whose content spans more than one row or column. The Relationships array for this cell contain data from individual cells. SELECTION_ELEMENT - A selection element such as an option button (radio button) or a check box that's detected on a document page. Use the value of SelectionStatus to determine the status of the selection element. SIGNATURE - The location and
|
199
|
+
* The type of text item that's recognized. In operations for text detection, the following types are returned: PAGE - Contains a list of the LINE Block objects that are detected on a document page. WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. In text analysis operations, the following types are returned: PAGE - Contains a list of child Block objects that are detected on a document page. KEY_VALUE_SET - Stores the KEY and VALUE Block objects for linked text that's detected on a document page. Use the EntityType field to determine if a KEY_VALUE_SET object is a KEY Block object or a VALUE Block object. WORD - A word that's detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. TABLE - A table that's detected on a document page. A table is grid-based information with two or more rows or columns, with a cell span of one row and one column each. TABLE_TITLE - The title of a table. A title is typically a line of text above or below a table, or embedded as the first row of a table. TABLE_FOOTER - The footer associated with a table. A footer is typically a line or lines of text below a table or embedded as the last row of a table. CELL - A cell within a detected table. The cell is the parent of the block that contains the text in the cell. MERGED_CELL - A cell in a table whose content spans more than one row or column. The Relationships array for this cell contain data from individual cells. SELECTION_ELEMENT - A selection element such as an option button (radio button) or a check box that's detected on a document page. Use the value of SelectionStatus to determine the status of the selection element. SIGNATURE - The location and confidence score of a signature detected on a document page. Can be returned as part of a Key-Value pair or a detected cell. QUERY - A question asked during the call of AnalyzeDocument. Contains an alias and an ID that attaches it to its answer. QUERY_RESULT - A response to a question asked during the call of analyze document. Comes with an alias and ID for ease of locating in a response. Also contains location and confidence score.
|
200
200
|
*/
|
201
201
|
BlockType?: BlockType;
|
202
202
|
/**
|
@@ -248,7 +248,7 @@ declare namespace Textract {
|
|
248
248
|
*/
|
249
249
|
SelectionStatus?: SelectionStatus;
|
250
250
|
/**
|
251
|
-
* The page on which a block was detected. Page is returned by synchronous and asynchronous operations. Page values greater than 1 are only returned for multipage documents that are in PDF or TIFF format. A scanned image (JPEG/PNG) provided to an asynchronous operation, even if it contains multiple document pages, is considered a single-page document. This means that for scanned images the value of Page is always 1.
|
251
|
+
* The page on which a block was detected. Page is returned by synchronous and asynchronous operations. Page values greater than 1 are only returned for multipage documents that are in PDF or TIFF format. A scanned image (JPEG/PNG) provided to an asynchronous operation, even if it contains multiple document pages, is considered a single-page document. This means that for scanned images the value of Page is always 1.
|
252
252
|
*/
|
253
253
|
Page?: UInteger;
|
254
254
|
/**
|
@@ -257,7 +257,7 @@ declare namespace Textract {
|
|
257
257
|
Query?: Query;
|
258
258
|
}
|
259
259
|
export type BlockList = Block[];
|
260
|
-
export type BlockType = "KEY_VALUE_SET"|"PAGE"|"LINE"|"WORD"|"TABLE"|"CELL"|"SELECTION_ELEMENT"|"MERGED_CELL"|"TITLE"|"QUERY"|"QUERY_RESULT"|"SIGNATURE"|"TABLE_TITLE"|"TABLE_FOOTER"|string;
|
260
|
+
export type BlockType = "KEY_VALUE_SET"|"PAGE"|"LINE"|"WORD"|"TABLE"|"CELL"|"SELECTION_ELEMENT"|"MERGED_CELL"|"TITLE"|"QUERY"|"QUERY_RESULT"|"SIGNATURE"|"TABLE_TITLE"|"TABLE_FOOTER"|"LAYOUT_TEXT"|"LAYOUT_TITLE"|"LAYOUT_HEADER"|"LAYOUT_FOOTER"|"LAYOUT_SECTION_HEADER"|"LAYOUT_PAGE_NUMBER"|"LAYOUT_LIST"|"LAYOUT_FIGURE"|"LAYOUT_TABLE"|"LAYOUT_KEY_VALUE"|string;
|
261
261
|
export interface BoundingBox {
|
262
262
|
/**
|
263
263
|
* The width of the bounding box as a ratio of the overall document page width.
|
@@ -448,7 +448,7 @@ declare namespace Textract {
|
|
448
448
|
IdentityDocument?: IdentityDocument;
|
449
449
|
}
|
450
450
|
export type ExtractionList = Extraction[];
|
451
|
-
export type FeatureType = "TABLES"|"FORMS"|"QUERIES"|"SIGNATURES"|string;
|
451
|
+
export type FeatureType = "TABLES"|"FORMS"|"QUERIES"|"SIGNATURES"|"LAYOUT"|string;
|
452
452
|
export type FeatureTypes = FeatureType[];
|
453
453
|
export type Float = number;
|
454
454
|
export type FlowDefinitionArn = string;
|