aws-sdk 2.978.0 → 2.982.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +27 -1
- package/README.md +1 -1
- package/apis/acm-pca-2017-08-22.min.json +26 -14
- package/apis/cloudformation-2010-05-15.min.json +27 -1
- package/apis/ec2-2016-11-15.min.json +731 -727
- package/apis/elasticfilesystem-2015-02-01.min.json +2 -1
- package/apis/firehose-2015-08-04.min.json +104 -79
- package/apis/fsx-2018-03-01.min.json +639 -66
- package/apis/fsx-2018-03-01.paginators.json +15 -0
- package/apis/iot-2015-05-28.min.json +481 -232
- package/apis/iot-2015-05-28.paginators.json +12 -0
- package/apis/kms-2014-11-01.examples.json +100 -94
- package/apis/kms-2014-11-01.min.json +54 -34
- package/apis/mediatailor-2018-04-23.min.json +144 -116
- package/apis/quicksight-2018-04-01.min.json +162 -127
- package/apis/s3control-2018-08-20.min.json +470 -24
- package/apis/s3control-2018-08-20.paginators.json +5 -0
- package/apis/schemas-2019-12-02.min.json +49 -31
- package/apis/securityhub-2018-10-26.min.json +492 -205
- package/apis/securityhub-2018-10-26.paginators.json +22 -11
- package/apis/servicecatalog-appregistry-2020-06-24.min.json +68 -0
- package/apis/transfer-2018-11-05.min.json +403 -9
- package/apis/transfer-2018-11-05.paginators.json +10 -0
- package/clients/accessanalyzer.d.ts +46 -43
- package/clients/acmpca.d.ts +21 -7
- package/clients/cloudformation.d.ts +116 -80
- package/clients/cloudtrail.d.ts +72 -72
- package/clients/codebuild.d.ts +3 -3
- package/clients/computeoptimizer.d.ts +1 -1
- package/clients/configservice.d.ts +323 -323
- package/clients/ebs.d.ts +1 -1
- package/clients/ec2.d.ts +22 -13
- package/clients/efs.d.ts +57 -31
- package/clients/firehose.d.ts +39 -10
- package/clients/fsx.d.ts +694 -52
- package/clients/iot.d.ts +829 -506
- package/clients/kms.d.ts +293 -280
- package/clients/lexmodelbuildingservice.d.ts +1 -1
- package/clients/mediatailor.d.ts +45 -8
- package/clients/memorydb.d.ts +2 -2
- package/clients/polly.d.ts +2 -2
- package/clients/quicksight.d.ts +343 -310
- package/clients/s3control.d.ts +439 -44
- package/clients/schemas.d.ts +25 -0
- package/clients/securityhub.d.ts +524 -21
- package/clients/servicecatalogappregistry.d.ts +81 -3
- package/clients/sqs.d.ts +4 -4
- package/clients/transfer.d.ts +556 -28
- package/dist/aws-sdk-core-react-native.js +1 -1
- package/dist/aws-sdk-react-native.js +25 -27
- package/dist/aws-sdk.js +1414 -1077
- package/dist/aws-sdk.min.js +85 -85
- package/dist/xml2js.js +2 -3
- package/lib/core.js +1 -1
- package/package.json +1 -1
package/clients/ebs.d.ts
CHANGED
|
@@ -292,7 +292,7 @@ declare namespace EBS {
|
|
|
292
292
|
export type SnapshotId = string;
|
|
293
293
|
export interface StartSnapshotRequest {
|
|
294
294
|
/**
|
|
295
|
-
* The size of the volume, in GiB. The maximum size is
|
|
295
|
+
* The size of the volume, in GiB. The maximum size is 65536 GiB (64 TiB).
|
|
296
296
|
*/
|
|
297
297
|
VolumeSize: VolumeSize;
|
|
298
298
|
/**
|
package/clients/ec2.d.ts
CHANGED
|
@@ -8757,10 +8757,6 @@ declare namespace EC2 {
|
|
|
8757
8757
|
VpcPeeringConnection?: VpcPeeringConnection;
|
|
8758
8758
|
}
|
|
8759
8759
|
export interface CreateVpcRequest {
|
|
8760
|
-
/**
|
|
8761
|
-
* The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.
|
|
8762
|
-
*/
|
|
8763
|
-
CidrBlock: String;
|
|
8764
8760
|
/**
|
|
8765
8761
|
* Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IP addresses, or the size of the CIDR block.
|
|
8766
8762
|
*/
|
|
@@ -8789,6 +8785,10 @@ declare namespace EC2 {
|
|
|
8789
8785
|
* The tags to assign to the VPC.
|
|
8790
8786
|
*/
|
|
8791
8787
|
TagSpecifications?: TagSpecificationList;
|
|
8788
|
+
/**
|
|
8789
|
+
* The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.
|
|
8790
|
+
*/
|
|
8791
|
+
CidrBlock: String;
|
|
8792
8792
|
}
|
|
8793
8793
|
export interface CreateVpcResult {
|
|
8794
8794
|
/**
|
|
@@ -19458,6 +19458,10 @@ declare namespace EC2 {
|
|
|
19458
19458
|
* This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled. If you specify a value of disabled, you will not be able to access your instance metadata.
|
|
19459
19459
|
*/
|
|
19460
19460
|
HttpEndpoint?: LaunchTemplateInstanceMetadataEndpointState;
|
|
19461
|
+
/**
|
|
19462
|
+
* Enables or disables the IPv6 endpoint for the instance metadata service. Default: disabled
|
|
19463
|
+
*/
|
|
19464
|
+
HttpProtocolIpv6?: LaunchTemplateInstanceMetadataProtocolIpv6;
|
|
19461
19465
|
}
|
|
19462
19466
|
export interface LaunchTemplateInstanceMetadataOptionsRequest {
|
|
19463
19467
|
/**
|
|
@@ -19472,8 +19476,13 @@ declare namespace EC2 {
|
|
|
19472
19476
|
* This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled. If you specify a value of disabled, you will not be able to access your instance metadata.
|
|
19473
19477
|
*/
|
|
19474
19478
|
HttpEndpoint?: LaunchTemplateInstanceMetadataEndpointState;
|
|
19479
|
+
/**
|
|
19480
|
+
* Enables or disables the IPv6 endpoint for the instance metadata service. Default: disabled
|
|
19481
|
+
*/
|
|
19482
|
+
HttpProtocolIpv6?: LaunchTemplateInstanceMetadataProtocolIpv6;
|
|
19475
19483
|
}
|
|
19476
19484
|
export type LaunchTemplateInstanceMetadataOptionsState = "pending"|"applied"|string;
|
|
19485
|
+
export type LaunchTemplateInstanceMetadataProtocolIpv6 = "disabled"|"enabled"|string;
|
|
19477
19486
|
export interface LaunchTemplateInstanceNetworkInterfaceSpecification {
|
|
19478
19487
|
/**
|
|
19479
19488
|
* Indicates whether to associate a Carrier IP address with eth0 for a new network interface. Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. For more information about Carrier IP addresses, see Carrier IP addresses in the Wavelength Developer Guide.
|
|
@@ -19536,19 +19545,19 @@ declare namespace EC2 {
|
|
|
19536
19545
|
*/
|
|
19537
19546
|
NetworkCardIndex?: Integer;
|
|
19538
19547
|
/**
|
|
19539
|
-
* One or more IPv4
|
|
19548
|
+
* One or more IPv4 prefixes assigned to the network interface.
|
|
19540
19549
|
*/
|
|
19541
19550
|
Ipv4Prefixes?: Ipv4PrefixListResponse;
|
|
19542
19551
|
/**
|
|
19543
|
-
* The number of IPv4
|
|
19552
|
+
* The number of IPv4 prefixes that Amazon Web Services automatically assigned to the network interface.
|
|
19544
19553
|
*/
|
|
19545
19554
|
Ipv4PrefixCount?: Integer;
|
|
19546
19555
|
/**
|
|
19547
|
-
* One or more IPv6
|
|
19556
|
+
* One or more IPv6 prefixes assigned to the network interface.
|
|
19548
19557
|
*/
|
|
19549
19558
|
Ipv6Prefixes?: Ipv6PrefixListResponse;
|
|
19550
19559
|
/**
|
|
19551
|
-
* The number of IPv6
|
|
19560
|
+
* The number of IPv6 prefixes that Amazon Web Services automatically assigned to the network interface.
|
|
19552
19561
|
*/
|
|
19553
19562
|
Ipv6PrefixCount?: Integer;
|
|
19554
19563
|
}
|
|
@@ -19615,19 +19624,19 @@ declare namespace EC2 {
|
|
|
19615
19624
|
*/
|
|
19616
19625
|
NetworkCardIndex?: Integer;
|
|
19617
19626
|
/**
|
|
19618
|
-
* One or more IPv4
|
|
19627
|
+
* One or more IPv4 prefixes to be assigned to the network interface. You cannot use this option if you use the Ipv4PrefixCount option.
|
|
19619
19628
|
*/
|
|
19620
19629
|
Ipv4Prefixes?: Ipv4PrefixList;
|
|
19621
19630
|
/**
|
|
19622
|
-
* The number of IPv4
|
|
19631
|
+
* The number of IPv4 prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv4Prefix option.
|
|
19623
19632
|
*/
|
|
19624
19633
|
Ipv4PrefixCount?: Integer;
|
|
19625
19634
|
/**
|
|
19626
|
-
* One or more IPv6
|
|
19635
|
+
* One or more IPv6 prefixes to be assigned to the network interface. You cannot use this option if you use the Ipv6PrefixCount option.
|
|
19627
19636
|
*/
|
|
19628
19637
|
Ipv6Prefixes?: Ipv6PrefixList;
|
|
19629
19638
|
/**
|
|
19630
|
-
* The number of IPv6
|
|
19639
|
+
* The number of IPv6 prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.
|
|
19631
19640
|
*/
|
|
19632
19641
|
Ipv6PrefixCount?: Integer;
|
|
19633
19642
|
}
|
|
@@ -23785,7 +23794,7 @@ declare namespace EC2 {
|
|
|
23785
23794
|
*/
|
|
23786
23795
|
InstanceInitiatedShutdownBehavior?: ShutdownBehavior;
|
|
23787
23796
|
/**
|
|
23788
|
-
* The
|
|
23797
|
+
* The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see Running Commands on Your Linux Instance at Launch (Linux) or Adding User Data (Windows). If you are creating the launch template for use with Batch, the user data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide.
|
|
23789
23798
|
*/
|
|
23790
23799
|
UserData?: String;
|
|
23791
23800
|
/**
|
package/clients/efs.d.ts
CHANGED
|
@@ -20,11 +20,11 @@ declare class EFS extends Service {
|
|
|
20
20
|
*/
|
|
21
21
|
createAccessPoint(callback?: (err: AWSError, data: EFS.Types.AccessPointDescription) => void): Request<EFS.Types.AccessPointDescription, AWSError>;
|
|
22
22
|
/**
|
|
23
|
-
* Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's
|
|
23
|
+
* Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's Amazon Web Services account with the specified creation token, this operation does the following: Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state creating. Returns with the description of the created file system. Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system. For basic use cases, you can use a randomly generated UUID for the creation token. The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error. For more information, see Creating a file system in the Amazon EFS User Guide. The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state. This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes. You can set the throughput mode for the file system using the ThroughputMode parameter. After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works. This operation requires permissions for the elasticfilesystem:CreateFileSystem action.
|
|
24
24
|
*/
|
|
25
25
|
createFileSystem(params: EFS.Types.CreateFileSystemRequest, callback?: (err: AWSError, data: EFS.Types.FileSystemDescription) => void): Request<EFS.Types.FileSystemDescription, AWSError>;
|
|
26
26
|
/**
|
|
27
|
-
* Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's
|
|
27
|
+
* Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's Amazon Web Services account with the specified creation token, this operation does the following: Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state creating. Returns with the description of the created file system. Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system. For basic use cases, you can use a randomly generated UUID for the creation token. The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error. For more information, see Creating a file system in the Amazon EFS User Guide. The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state. This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes. You can set the throughput mode for the file system using the ThroughputMode parameter. After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works. This operation requires permissions for the elasticfilesystem:CreateFileSystem action.
|
|
28
28
|
*/
|
|
29
29
|
createFileSystem(callback?: (err: AWSError, data: EFS.Types.FileSystemDescription) => void): Request<EFS.Types.FileSystemDescription, AWSError>;
|
|
30
30
|
/**
|
|
@@ -76,11 +76,11 @@ declare class EFS extends Service {
|
|
|
76
76
|
*/
|
|
77
77
|
deleteMountTarget(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
|
78
78
|
/**
|
|
79
|
-
* DEPRECATED - DeleteTags is deprecated and not maintained. Please use the API action to remove tags from EFS resources. Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag
|
|
79
|
+
* DEPRECATED - DeleteTags is deprecated and not maintained. Please use the API action to remove tags from EFS resources. Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag restrictions in the Billing and Cost Management User Guide. This operation requires permissions for the elasticfilesystem:DeleteTags action.
|
|
80
80
|
*/
|
|
81
81
|
deleteTags(params: EFS.Types.DeleteTagsRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
|
82
82
|
/**
|
|
83
|
-
* DEPRECATED - DeleteTags is deprecated and not maintained. Please use the API action to remove tags from EFS resources. Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag
|
|
83
|
+
* DEPRECATED - DeleteTags is deprecated and not maintained. Please use the API action to remove tags from EFS resources. Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag restrictions in the Billing and Cost Management User Guide. This operation requires permissions for the elasticfilesystem:DeleteTags action.
|
|
84
84
|
*/
|
|
85
85
|
deleteTags(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
|
86
86
|
/**
|
|
@@ -92,11 +92,11 @@ declare class EFS extends Service {
|
|
|
92
92
|
*/
|
|
93
93
|
describeAccessPoints(callback?: (err: AWSError, data: EFS.Types.DescribeAccessPointsResponse) => void): Request<EFS.Types.DescribeAccessPointsResponse, AWSError>;
|
|
94
94
|
/**
|
|
95
|
-
*
|
|
95
|
+
* Returns the account preferences settings for the Amazon Web Services account associated with the user making the request, in the current Amazon Web Services Region. For more information, see Managing Amazon EFS resource IDs.
|
|
96
96
|
*/
|
|
97
97
|
describeAccountPreferences(params: EFS.Types.DescribeAccountPreferencesRequest, callback?: (err: AWSError, data: EFS.Types.DescribeAccountPreferencesResponse) => void): Request<EFS.Types.DescribeAccountPreferencesResponse, AWSError>;
|
|
98
98
|
/**
|
|
99
|
-
*
|
|
99
|
+
* Returns the account preferences settings for the Amazon Web Services account associated with the user making the request, in the current Amazon Web Services Region. For more information, see Managing Amazon EFS resource IDs.
|
|
100
100
|
*/
|
|
101
101
|
describeAccountPreferences(callback?: (err: AWSError, data: EFS.Types.DescribeAccountPreferencesResponse) => void): Request<EFS.Types.DescribeAccountPreferencesResponse, AWSError>;
|
|
102
102
|
/**
|
|
@@ -116,19 +116,19 @@ declare class EFS extends Service {
|
|
|
116
116
|
*/
|
|
117
117
|
describeFileSystemPolicy(callback?: (err: AWSError, data: EFS.Types.FileSystemPolicyDescription) => void): Request<EFS.Types.FileSystemPolicyDescription, AWSError>;
|
|
118
118
|
/**
|
|
119
|
-
* Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's
|
|
119
|
+
* Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling. When retrieving all file system descriptions, you can optionally specify the MaxItems parameter to limit the number of descriptions in a response. Currently, this number is automatically set to 10. If more file system descriptions remain, Amazon EFS returns a NextMarker, an opaque token, in the response. In this case, you should send a subsequent request with the Marker request parameter set to the value of NextMarker. To retrieve a list of your file system descriptions, this operation is used in an iterative process, where DescribeFileSystems is called first without the Marker and then the operation continues to call it with the Marker parameter set to the value of the NextMarker from the previous response until the response has no NextMarker. The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multi-call iteration is unspecified. This operation requires permissions for the elasticfilesystem:DescribeFileSystems action.
|
|
120
120
|
*/
|
|
121
121
|
describeFileSystems(params: EFS.Types.DescribeFileSystemsRequest, callback?: (err: AWSError, data: EFS.Types.DescribeFileSystemsResponse) => void): Request<EFS.Types.DescribeFileSystemsResponse, AWSError>;
|
|
122
122
|
/**
|
|
123
|
-
* Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's
|
|
123
|
+
* Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling. When retrieving all file system descriptions, you can optionally specify the MaxItems parameter to limit the number of descriptions in a response. Currently, this number is automatically set to 10. If more file system descriptions remain, Amazon EFS returns a NextMarker, an opaque token, in the response. In this case, you should send a subsequent request with the Marker request parameter set to the value of NextMarker. To retrieve a list of your file system descriptions, this operation is used in an iterative process, where DescribeFileSystems is called first without the Marker and then the operation continues to call it with the Marker parameter set to the value of the NextMarker from the previous response until the response has no NextMarker. The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multi-call iteration is unspecified. This operation requires permissions for the elasticfilesystem:DescribeFileSystems action.
|
|
124
124
|
*/
|
|
125
125
|
describeFileSystems(callback?: (err: AWSError, data: EFS.Types.DescribeFileSystemsResponse) => void): Request<EFS.Types.DescribeFileSystemsResponse, AWSError>;
|
|
126
126
|
/**
|
|
127
|
-
* Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. EFS lifecycle management uses the LifecycleConfiguration object to identify which files to move to the EFS Infrequent Access (IA) storage class. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response. This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.
|
|
127
|
+
* Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. EFS lifecycle management uses the LifecycleConfiguration object to identify which files to move to the EFS Infrequent Access (IA) storage class. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response. When EFS Intelligent Tiering is enabled, TransitionToPrimaryStorageClass has a value of AFTER_1_ACCESS. This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.
|
|
128
128
|
*/
|
|
129
129
|
describeLifecycleConfiguration(params: EFS.Types.DescribeLifecycleConfigurationRequest, callback?: (err: AWSError, data: EFS.Types.LifecycleConfigurationDescription) => void): Request<EFS.Types.LifecycleConfigurationDescription, AWSError>;
|
|
130
130
|
/**
|
|
131
|
-
* Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. EFS lifecycle management uses the LifecycleConfiguration object to identify which files to move to the EFS Infrequent Access (IA) storage class. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response. This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.
|
|
131
|
+
* Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. EFS lifecycle management uses the LifecycleConfiguration object to identify which files to move to the EFS Infrequent Access (IA) storage class. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response. When EFS Intelligent Tiering is enabled, TransitionToPrimaryStorageClass has a value of AFTER_1_ACCESS. This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.
|
|
132
132
|
*/
|
|
133
133
|
describeLifecycleConfiguration(callback?: (err: AWSError, data: EFS.Types.LifecycleConfigurationDescription) => void): Request<EFS.Types.LifecycleConfigurationDescription, AWSError>;
|
|
134
134
|
/**
|
|
@@ -172,11 +172,11 @@ declare class EFS extends Service {
|
|
|
172
172
|
*/
|
|
173
173
|
modifyMountTargetSecurityGroups(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
|
174
174
|
/**
|
|
175
|
-
*
|
|
175
|
+
* Use this operation to set the account preference in the current Amazon Web Services Region to use either long 17 character (63 bit) or short 8 character (32 bit) IDs for new EFS file systems and mount targets created. All existing resource IDs are not affected by any changes you make. You can set the ID preference during the opt-in period as EFS transitions to long resource IDs. For more information, see Managing Amazon EFS resource IDs.
|
|
176
176
|
*/
|
|
177
177
|
putAccountPreferences(params: EFS.Types.PutAccountPreferencesRequest, callback?: (err: AWSError, data: EFS.Types.PutAccountPreferencesResponse) => void): Request<EFS.Types.PutAccountPreferencesResponse, AWSError>;
|
|
178
178
|
/**
|
|
179
|
-
*
|
|
179
|
+
* Use this operation to set the account preference in the current Amazon Web Services Region to use either long 17 character (63 bit) or short 8 character (32 bit) IDs for new EFS file systems and mount targets created. All existing resource IDs are not affected by any changes you make. You can set the ID preference during the opt-in period as EFS transitions to long resource IDs. For more information, see Managing Amazon EFS resource IDs.
|
|
180
180
|
*/
|
|
181
181
|
putAccountPreferences(callback?: (err: AWSError, data: EFS.Types.PutAccountPreferencesResponse) => void): Request<EFS.Types.PutAccountPreferencesResponse, AWSError>;
|
|
182
182
|
/**
|
|
@@ -196,11 +196,11 @@ declare class EFS extends Service {
|
|
|
196
196
|
*/
|
|
197
197
|
putFileSystemPolicy(callback?: (err: AWSError, data: EFS.Types.FileSystemPolicyDescription) => void): Request<EFS.Types.FileSystemPolicyDescription, AWSError>;
|
|
198
198
|
/**
|
|
199
|
-
* Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system. Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management. In the request, specify the following: The ID for the file system for which you are enabling, disabling, or modifying lifecycle management. A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one LifecyclePolicy item. This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation. To apply a LifecycleConfiguration object to an encrypted file system, you need the same
|
|
199
|
+
* Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. To enable EFS Intelligent Tiering, set the value of TransitionToPrimaryStorageClass to AFTER_1_ACCESS. For more information, see EFS Lifecycle Management. A LifecycleConfiguration applies to all files in a file system. Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management. In the request, specify the following: The ID for the file system for which you are enabling, disabling, or modifying lifecycle management. A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one LifecyclePolicy item. This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation. To apply a LifecycleConfiguration object to an encrypted file system, you need the same Key Management Service permissions as when you created the encrypted file system.
|
|
200
200
|
*/
|
|
201
201
|
putLifecycleConfiguration(params: EFS.Types.PutLifecycleConfigurationRequest, callback?: (err: AWSError, data: EFS.Types.LifecycleConfigurationDescription) => void): Request<EFS.Types.LifecycleConfigurationDescription, AWSError>;
|
|
202
202
|
/**
|
|
203
|
-
* Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A LifecycleConfiguration applies to all files in a file system. Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management. In the request, specify the following: The ID for the file system for which you are enabling, disabling, or modifying lifecycle management. A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one LifecyclePolicy item. This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation. To apply a LifecycleConfiguration object to an encrypted file system, you need the same
|
|
203
|
+
* Enables lifecycle management by creating a new LifecycleConfiguration object. A LifecycleConfiguration object defines when files in an Amazon EFS file system are automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. To enable EFS Intelligent Tiering, set the value of TransitionToPrimaryStorageClass to AFTER_1_ACCESS. For more information, see EFS Lifecycle Management. A LifecycleConfiguration applies to all files in a file system. Each Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a LifecycleConfiguration object already exists for the specified file system, a PutLifecycleConfiguration call modifies the existing configuration. A PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration and disables lifecycle management. In the request, specify the following: The ID for the file system for which you are enabling, disabling, or modifying lifecycle management. A LifecyclePolicies array of LifecyclePolicy objects that define when files are moved to the IA storage class. The array can contain only one LifecyclePolicy item. This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration operation. To apply a LifecycleConfiguration object to an encrypted file system, you need the same Key Management Service permissions as when you created the encrypted file system.
|
|
204
204
|
*/
|
|
205
205
|
putLifecycleConfiguration(callback?: (err: AWSError, data: EFS.Types.LifecycleConfigurationDescription) => void): Request<EFS.Types.LifecycleConfigurationDescription, AWSError>;
|
|
206
206
|
/**
|
|
@@ -264,7 +264,7 @@ declare namespace EFS {
|
|
|
264
264
|
*/
|
|
265
265
|
RootDirectory?: RootDirectory;
|
|
266
266
|
/**
|
|
267
|
-
* Identified the
|
|
267
|
+
* Identified the Amazon Web Services account that owns the access point resource.
|
|
268
268
|
*/
|
|
269
269
|
OwnerId?: AwsAccountId;
|
|
270
270
|
/**
|
|
@@ -298,7 +298,7 @@ declare namespace EFS {
|
|
|
298
298
|
*/
|
|
299
299
|
ClientToken: ClientToken;
|
|
300
300
|
/**
|
|
301
|
-
* Creates tags associated with the access point. Each tag is a key-value pair.
|
|
301
|
+
* Creates tags associated with the access point. Each tag is a key-value pair, each key must be unique. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.
|
|
302
302
|
*/
|
|
303
303
|
Tags?: Tags;
|
|
304
304
|
/**
|
|
@@ -324,11 +324,11 @@ declare namespace EFS {
|
|
|
324
324
|
*/
|
|
325
325
|
PerformanceMode?: PerformanceMode;
|
|
326
326
|
/**
|
|
327
|
-
* A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying CreateFileSystemRequest$KmsKeyId for an existing
|
|
327
|
+
* A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying CreateFileSystemRequest$KmsKeyId for an existing Key Management Service (KMS customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem, is used to protect the encrypted file system.
|
|
328
328
|
*/
|
|
329
329
|
Encrypted?: Encrypted;
|
|
330
330
|
/**
|
|
331
|
-
* The ID of the
|
|
331
|
+
* The ID of the KMS CMK that you want to use to protect the encrypted file system. This parameter is only required if you want to use a non-default KMS key. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats: Key ID - A unique identifier of the key, for example 1234abcd-12ab-34cd-56ef-1234567890ab. ARN - An Amazon Resource Name (ARN) for the key, for example arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab. Key alias - A previously created display name for a key, for example alias/projectKey1. Key alias ARN - An ARN for a key alias, for example arn:aws:kms:us-west-2:444455556666:alias/projectKey1. If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true. EFS accepts only symmetric KMS keys. You cannot use asymmetric KMS keys with EFS file systems.
|
|
332
332
|
*/
|
|
333
333
|
KmsKeyId?: KmsKeyId;
|
|
334
334
|
/**
|
|
@@ -336,19 +336,19 @@ declare namespace EFS {
|
|
|
336
336
|
*/
|
|
337
337
|
ThroughputMode?: ThroughputMode;
|
|
338
338
|
/**
|
|
339
|
-
* The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. To increase this limit, contact
|
|
339
|
+
* The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.
|
|
340
340
|
*/
|
|
341
341
|
ProvisionedThroughputInMibps?: ProvisionedThroughputInMibps;
|
|
342
342
|
/**
|
|
343
|
-
* Used to create a file system that uses One Zone storage classes. It specifies the
|
|
343
|
+
* Used to create a file system that uses One Zone storage classes. It specifies the Amazon Web Services Availability Zone in which to create the file system. Use the format us-east-1a to specify the Availability Zone. For more information about One Zone storage classes, see Using EFS storage classes in the Amazon EFS User Guide. One Zone storage classes are not available in all Availability Zones in Amazon Web Services Regions where Amazon EFS is available.
|
|
344
344
|
*/
|
|
345
345
|
AvailabilityZoneName?: AvailabilityZoneName;
|
|
346
346
|
/**
|
|
347
|
-
* Specifies whether automatic backups are enabled on the file system that you are creating. Set the value to true to enable automatic backups. If you are creating a file system that uses One Zone storage classes, automatic backups are enabled by default. For more information, see Automatic backups in the Amazon EFS User Guide. Default is false. However, if you specify an AvailabilityZoneName, the default is true.
|
|
347
|
+
* Specifies whether automatic backups are enabled on the file system that you are creating. Set the value to true to enable automatic backups. If you are creating a file system that uses One Zone storage classes, automatic backups are enabled by default. For more information, see Automatic backups in the Amazon EFS User Guide. Default is false. However, if you specify an AvailabilityZoneName, the default is true. Backup is not available in all Amazon Web Services Regionswhere Amazon EFS is available.
|
|
348
348
|
*/
|
|
349
349
|
Backup?: Backup;
|
|
350
350
|
/**
|
|
351
|
-
*
|
|
351
|
+
* Use to create one or more tags associated with the file system. Each tag is a user-defined key-value pair. Name your file system on creation by including a "Key":"Name","Value":"{value}" key-value pair. Each key must be unique. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.
|
|
352
352
|
*/
|
|
353
353
|
Tags?: Tags;
|
|
354
354
|
}
|
|
@@ -458,11 +458,23 @@ declare namespace EFS {
|
|
|
458
458
|
NextToken?: Token;
|
|
459
459
|
}
|
|
460
460
|
export interface DescribeAccountPreferencesRequest {
|
|
461
|
+
/**
|
|
462
|
+
* (Optional) You can use NextToken in a subsequent request to fetch the next page of Amazon Web Services account preferences if the response payload was paginated.
|
|
463
|
+
*/
|
|
461
464
|
NextToken?: Token;
|
|
465
|
+
/**
|
|
466
|
+
* (Optional) When retrieving account preferences, you can optionally specify the MaxItems parameter to limit the number of objects returned in a response. The default value is 100.
|
|
467
|
+
*/
|
|
462
468
|
MaxResults?: MaxResults;
|
|
463
469
|
}
|
|
464
470
|
export interface DescribeAccountPreferencesResponse {
|
|
471
|
+
/**
|
|
472
|
+
* Describes the resource ID preference setting for the Amazon Web Services account associated with the user making the request, in the current Amazon Web Services Region.
|
|
473
|
+
*/
|
|
465
474
|
ResourceIdPreference?: ResourceIdPreference;
|
|
475
|
+
/**
|
|
476
|
+
* Present if there are more records than returned in the response. You can use the NextToken in the subsequent request to fetch the additional descriptions.
|
|
477
|
+
*/
|
|
466
478
|
NextToken?: Token;
|
|
467
479
|
}
|
|
468
480
|
export interface DescribeBackupPolicyRequest {
|
|
@@ -595,7 +607,7 @@ declare namespace EFS {
|
|
|
595
607
|
export type FileSystemArn = string;
|
|
596
608
|
export interface FileSystemDescription {
|
|
597
609
|
/**
|
|
598
|
-
* The
|
|
610
|
+
* The Amazon Web Services account that created the file system. If the file system was created by an IAM user, the parent account to which the user belongs is the owner.
|
|
599
611
|
*/
|
|
600
612
|
OwnerId: AwsAccountId;
|
|
601
613
|
/**
|
|
@@ -639,7 +651,7 @@ declare namespace EFS {
|
|
|
639
651
|
*/
|
|
640
652
|
Encrypted?: Encrypted;
|
|
641
653
|
/**
|
|
642
|
-
* The ID of an
|
|
654
|
+
* The ID of an Key Management Service customer master key (CMK) that was used to protect the encrypted file system.
|
|
643
655
|
*/
|
|
644
656
|
KmsKeyId?: KmsKeyId;
|
|
645
657
|
/**
|
|
@@ -651,11 +663,11 @@ declare namespace EFS {
|
|
|
651
663
|
*/
|
|
652
664
|
ProvisionedThroughputInMibps?: ProvisionedThroughputInMibps;
|
|
653
665
|
/**
|
|
654
|
-
* Describes the
|
|
666
|
+
* Describes the Amazon Web Services Availability Zone in which the file system is located, and is valid only for file systems using One Zone storage classes. For more information, see Using EFS storage classes in the Amazon EFS User Guide.
|
|
655
667
|
*/
|
|
656
668
|
AvailabilityZoneName?: AvailabilityZoneName;
|
|
657
669
|
/**
|
|
658
|
-
* The unique and consistent identifier of the Availability Zone in which the file system's One Zone storage classes exist. For example, use1-az1 is an Availability Zone ID for the us-east-1
|
|
670
|
+
* The unique and consistent identifier of the Availability Zone in which the file system's One Zone storage classes exist. For example, use1-az1 is an Availability Zone ID for the us-east-1 Amazon Web Services Region, and it has the same location in every Amazon Web Services account.
|
|
659
671
|
*/
|
|
660
672
|
AvailabilityZoneId?: AvailabilityZoneId;
|
|
661
673
|
/**
|
|
@@ -701,16 +713,20 @@ declare namespace EFS {
|
|
|
701
713
|
export type LifeCycleState = "creating"|"available"|"updating"|"deleting"|"deleted"|"error"|string;
|
|
702
714
|
export interface LifecycleConfigurationDescription {
|
|
703
715
|
/**
|
|
704
|
-
* An array of lifecycle management policies.
|
|
716
|
+
* An array of lifecycle management policies. EFS supports a maximum of one policy per file system.
|
|
705
717
|
*/
|
|
706
718
|
LifecyclePolicies?: LifecyclePolicies;
|
|
707
719
|
}
|
|
708
720
|
export type LifecyclePolicies = LifecyclePolicy[];
|
|
709
721
|
export interface LifecyclePolicy {
|
|
710
722
|
/**
|
|
711
|
-
*
|
|
723
|
+
* Describes the period of time that a file is not accessed, after which it transitions to the IA storage class. Metadata operations such as listing the contents of a directory don't count as file access events.
|
|
712
724
|
*/
|
|
713
725
|
TransitionToIA?: TransitionToIARules;
|
|
726
|
+
/**
|
|
727
|
+
* Describes the policy used to transition a file from infequent access storage to primary storage.
|
|
728
|
+
*/
|
|
729
|
+
TransitionToPrimaryStorageClass?: TransitionToPrimaryStorageClassRules;
|
|
714
730
|
}
|
|
715
731
|
export interface ListTagsForResourceRequest {
|
|
716
732
|
/**
|
|
@@ -752,7 +768,7 @@ declare namespace EFS {
|
|
|
752
768
|
export type MountTargetCount = number;
|
|
753
769
|
export interface MountTargetDescription {
|
|
754
770
|
/**
|
|
755
|
-
*
|
|
771
|
+
* Amazon Web Services account ID that owns the resource.
|
|
756
772
|
*/
|
|
757
773
|
OwnerId?: AwsAccountId;
|
|
758
774
|
/**
|
|
@@ -780,11 +796,11 @@ declare namespace EFS {
|
|
|
780
796
|
*/
|
|
781
797
|
NetworkInterfaceId?: NetworkInterfaceId;
|
|
782
798
|
/**
|
|
783
|
-
* The unique and consistent identifier of the Availability Zone that the mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region and it has the same location in every
|
|
799
|
+
* The unique and consistent identifier of the Availability Zone that the mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region and it has the same location in every Amazon Web Services account.
|
|
784
800
|
*/
|
|
785
801
|
AvailabilityZoneId?: AvailabilityZoneId;
|
|
786
802
|
/**
|
|
787
|
-
* The name of the Availability Zone in which the mount target is located. Availability Zones are independently mapped to names for each
|
|
803
|
+
* The name of the Availability Zone in which the mount target is located. Availability Zones are independently mapped to names for each Amazon Web Services account. For example, the Availability Zone us-east-1a for your Amazon Web Services account might not be the same location as us-east-1a for another Amazon Web Services account.
|
|
788
804
|
*/
|
|
789
805
|
AvailabilityZoneName?: AvailabilityZoneName;
|
|
790
806
|
/**
|
|
@@ -818,6 +834,9 @@ declare namespace EFS {
|
|
|
818
834
|
}
|
|
819
835
|
export type ProvisionedThroughputInMibps = number;
|
|
820
836
|
export interface PutAccountPreferencesRequest {
|
|
837
|
+
/**
|
|
838
|
+
* Specifies the EFS resource ID preference to set for the user's Amazon Web Services account, in the current Amazon Web Services Region, either LONG_ID (17 characters), or SHORT_ID (8 characters).
|
|
839
|
+
*/
|
|
821
840
|
ResourceIdType: ResourceIdType;
|
|
822
841
|
}
|
|
823
842
|
export interface PutAccountPreferencesResponse {
|
|
@@ -860,7 +879,13 @@ declare namespace EFS {
|
|
|
860
879
|
export type Resource = "FILE_SYSTEM"|"MOUNT_TARGET"|string;
|
|
861
880
|
export type ResourceId = string;
|
|
862
881
|
export interface ResourceIdPreference {
|
|
882
|
+
/**
|
|
883
|
+
* Identifies the EFS resource ID preference, either LONG_ID (17 characters) or SHORT_ID (8 characters).
|
|
884
|
+
*/
|
|
863
885
|
ResourceIdType?: ResourceIdType;
|
|
886
|
+
/**
|
|
887
|
+
* Identifies the Amazon EFS resources to which the ID preference setting applies, FILE_SYSTEM and MOUNT_TARGET.
|
|
888
|
+
*/
|
|
864
889
|
Resources?: Resources;
|
|
865
890
|
}
|
|
866
891
|
export type ResourceIdType = "LONG_ID"|"SHORT_ID"|string;
|
|
@@ -908,6 +933,7 @@ declare namespace EFS {
|
|
|
908
933
|
export type Timestamp = Date;
|
|
909
934
|
export type Token = string;
|
|
910
935
|
export type TransitionToIARules = "AFTER_7_DAYS"|"AFTER_14_DAYS"|"AFTER_30_DAYS"|"AFTER_60_DAYS"|"AFTER_90_DAYS"|string;
|
|
936
|
+
export type TransitionToPrimaryStorageClassRules = "AFTER_1_ACCESS"|string;
|
|
911
937
|
export type Uid = number;
|
|
912
938
|
export interface UntagResourceRequest {
|
|
913
939
|
/**
|
package/clients/firehose.d.ts
CHANGED
|
@@ -52,19 +52,19 @@ declare class Firehose extends Service {
|
|
|
52
52
|
*/
|
|
53
53
|
listTagsForDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.ListTagsForDeliveryStreamOutput) => void): Request<Firehose.Types.ListTagsForDeliveryStreamOutput, AWSError>;
|
|
54
54
|
/**
|
|
55
|
-
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000
|
|
55
|
+
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
|
56
56
|
*/
|
|
57
57
|
putRecord(params: Firehose.Types.PutRecordInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
|
|
58
58
|
/**
|
|
59
|
-
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000
|
|
59
|
+
* Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
|
60
60
|
*/
|
|
61
61
|
putRecord(callback?: (err: AWSError, data: Firehose.Types.PutRecordOutput) => void): Request<Firehose.Types.PutRecordOutput, AWSError>;
|
|
62
62
|
/**
|
|
63
|
-
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before
|
|
63
|
+
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
|
64
64
|
*/
|
|
65
65
|
putRecordBatch(params: Firehose.Types.PutRecordBatchInput, callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
|
|
66
66
|
/**
|
|
67
|
-
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before
|
|
67
|
+
* Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. For information about service quota, see Amazon Kinesis Data Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.
|
|
68
68
|
*/
|
|
69
69
|
putRecordBatch(callback?: (err: AWSError, data: Firehose.Types.PutRecordBatchOutput) => void): Request<Firehose.Types.PutRecordBatchOutput, AWSError>;
|
|
70
70
|
/**
|
|
@@ -391,6 +391,16 @@ declare namespace Firehose {
|
|
|
391
391
|
}
|
|
392
392
|
export type DestinationDescriptionList = DestinationDescription[];
|
|
393
393
|
export type DestinationId = string;
|
|
394
|
+
export interface DynamicPartitioningConfiguration {
|
|
395
|
+
/**
|
|
396
|
+
* The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.
|
|
397
|
+
*/
|
|
398
|
+
RetryOptions?: RetryOptions;
|
|
399
|
+
/**
|
|
400
|
+
* Specifies that the dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.
|
|
401
|
+
*/
|
|
402
|
+
Enabled?: BooleanObject;
|
|
403
|
+
}
|
|
394
404
|
export interface ElasticsearchBufferingHints {
|
|
395
405
|
/**
|
|
396
406
|
* Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
|
|
@@ -632,6 +642,10 @@ declare namespace Firehose {
|
|
|
632
642
|
* The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
|
|
633
643
|
*/
|
|
634
644
|
DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
|
|
645
|
+
/**
|
|
646
|
+
* The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations. For more information, see https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html
|
|
647
|
+
*/
|
|
648
|
+
DynamicPartitioningConfiguration?: DynamicPartitioningConfiguration;
|
|
635
649
|
}
|
|
636
650
|
export interface ExtendedS3DestinationDescription {
|
|
637
651
|
/**
|
|
@@ -682,6 +696,10 @@ declare namespace Firehose {
|
|
|
682
696
|
* The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
|
|
683
697
|
*/
|
|
684
698
|
DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
|
|
699
|
+
/**
|
|
700
|
+
* The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations. For more information, see https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html
|
|
701
|
+
*/
|
|
702
|
+
DynamicPartitioningConfiguration?: DynamicPartitioningConfiguration;
|
|
685
703
|
}
|
|
686
704
|
export interface ExtendedS3DestinationUpdate {
|
|
687
705
|
/**
|
|
@@ -732,6 +750,10 @@ declare namespace Firehose {
|
|
|
732
750
|
* The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
|
|
733
751
|
*/
|
|
734
752
|
DataFormatConversionConfiguration?: DataFormatConversionConfiguration;
|
|
753
|
+
/**
|
|
754
|
+
* The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations. For more information, see https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html
|
|
755
|
+
*/
|
|
756
|
+
DynamicPartitioningConfiguration?: DynamicPartitioningConfiguration;
|
|
735
757
|
}
|
|
736
758
|
export interface FailureDescription {
|
|
737
759
|
/**
|
|
@@ -781,7 +803,7 @@ declare namespace Firehose {
|
|
|
781
803
|
export type HttpEndpointCommonAttributesList = HttpEndpointCommonAttribute[];
|
|
782
804
|
export interface HttpEndpointConfiguration {
|
|
783
805
|
/**
|
|
784
|
-
* The URL of the HTTP endpoint selected as the destination.
|
|
806
|
+
* The URL of the HTTP endpoint selected as the destination. If you choose an HTTP endpoint as your destination, review and follow the instructions in the Appendix - HTTP Endpoint Delivery Request and Response Specifications.
|
|
785
807
|
*/
|
|
786
808
|
Url: HttpEndpointUrl;
|
|
787
809
|
/**
|
|
@@ -1137,9 +1159,9 @@ declare namespace Firehose {
|
|
|
1137
1159
|
ParameterValue: ProcessorParameterValue;
|
|
1138
1160
|
}
|
|
1139
1161
|
export type ProcessorParameterList = ProcessorParameter[];
|
|
1140
|
-
export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|string;
|
|
1162
|
+
export type ProcessorParameterName = "LambdaArn"|"NumberOfRetries"|"MetadataExtractionQuery"|"JsonParsingEngine"|"RoleArn"|"BufferSizeInMBs"|"BufferIntervalInSeconds"|"SubRecordType"|"Delimiter"|string;
|
|
1141
1163
|
export type ProcessorParameterValue = string;
|
|
1142
|
-
export type ProcessorType = "Lambda"|string;
|
|
1164
|
+
export type ProcessorType = "RecordDeAggregation"|"Lambda"|"MetadataExtraction"|"AppendDelimiterToRecord"|string;
|
|
1143
1165
|
export type Proportion = number;
|
|
1144
1166
|
export interface PutRecordBatchInput {
|
|
1145
1167
|
/**
|
|
@@ -1350,6 +1372,13 @@ declare namespace Firehose {
|
|
|
1350
1372
|
DurationInSeconds?: RedshiftRetryDurationInSeconds;
|
|
1351
1373
|
}
|
|
1352
1374
|
export type RedshiftS3BackupMode = "Disabled"|"Enabled"|string;
|
|
1375
|
+
export type RetryDurationInSeconds = number;
|
|
1376
|
+
export interface RetryOptions {
|
|
1377
|
+
/**
|
|
1378
|
+
* The period of time during which Kinesis Data Firehose retries to deliver data to the specified Amazon S3 prefix.
|
|
1379
|
+
*/
|
|
1380
|
+
DurationInSeconds?: RetryDurationInSeconds;
|
|
1381
|
+
}
|
|
1353
1382
|
export type RoleARN = string;
|
|
1354
1383
|
export type S3BackupMode = "Disabled"|"Enabled"|string;
|
|
1355
1384
|
export interface S3DestinationConfiguration {
|
|
@@ -1456,7 +1485,7 @@ declare namespace Firehose {
|
|
|
1456
1485
|
}
|
|
1457
1486
|
export interface SchemaConfiguration {
|
|
1458
1487
|
/**
|
|
1459
|
-
* The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
|
|
1488
|
+
* The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed. If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.
|
|
1460
1489
|
*/
|
|
1461
1490
|
RoleARN?: NonEmptyStringWithoutWhitespace;
|
|
1462
1491
|
/**
|
|
@@ -1464,11 +1493,11 @@ declare namespace Firehose {
|
|
|
1464
1493
|
*/
|
|
1465
1494
|
CatalogId?: NonEmptyStringWithoutWhitespace;
|
|
1466
1495
|
/**
|
|
1467
|
-
* Specifies the name of the AWS Glue database that contains the schema for the output data.
|
|
1496
|
+
* Specifies the name of the AWS Glue database that contains the schema for the output data. If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the DatabaseName property is required and its value must be specified.
|
|
1468
1497
|
*/
|
|
1469
1498
|
DatabaseName?: NonEmptyStringWithoutWhitespace;
|
|
1470
1499
|
/**
|
|
1471
|
-
* Specifies the AWS Glue table that contains the column information that constitutes your data schema.
|
|
1500
|
+
* Specifies the AWS Glue table that contains the column information that constitutes your data schema. If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the TableName property is required and its value must be specified.
|
|
1472
1501
|
*/
|
|
1473
1502
|
TableName?: NonEmptyStringWithoutWhitespace;
|
|
1474
1503
|
/**
|