aws-sdk 2.1432.0 → 2.1433.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -924,6 +924,10 @@ declare namespace ChimeSDKVoice {
924
924
  * List of phone numbers, in E.164 format.
925
925
  */
926
926
  E164PhoneNumbers: E164PhoneNumberList;
927
+ /**
928
+ * Specifies the name assigned to one or more phone numbers.
929
+ */
930
+ Name?: PhoneNumberName;
927
931
  }
928
932
  export interface CreatePhoneNumberOrderResponse {
929
933
  /**
@@ -1978,6 +1982,10 @@ declare namespace ChimeSDKVoice {
1978
1982
  * The phone number's order ID.
1979
1983
  */
1980
1984
  OrderId?: GuidString;
1985
+ /**
1986
+ * The name of the phone number.
1987
+ */
1988
+ Name?: PhoneNumberName;
1981
1989
  }
1982
1990
  export interface PhoneNumberAssociation {
1983
1991
  /**
@@ -2049,6 +2057,7 @@ declare namespace ChimeSDKVoice {
2049
2057
  export type PhoneNumberErrorList = PhoneNumberError[];
2050
2058
  export type PhoneNumberList = PhoneNumber[];
2051
2059
  export type PhoneNumberMaxResults = number;
2060
+ export type PhoneNumberName = string;
2052
2061
  export interface PhoneNumberOrder {
2053
2062
  /**
2054
2063
  * The ID of the phone order.
@@ -2737,6 +2746,10 @@ declare namespace ChimeSDKVoice {
2737
2746
  * The outbound calling name associated with the phone number.
2738
2747
  */
2739
2748
  CallingName?: CallingName;
2749
+ /**
2750
+ * Specifies the name assigned to one or more phone numbers.
2751
+ */
2752
+ Name?: PhoneNumberName;
2740
2753
  }
2741
2754
  export interface UpdatePhoneNumberRequestItem {
2742
2755
  /**
@@ -2751,6 +2764,10 @@ declare namespace ChimeSDKVoice {
2751
2764
  * The outbound calling name to update.
2752
2765
  */
2753
2766
  CallingName?: CallingName;
2767
+ /**
2768
+ * The name of the phone number.
2769
+ */
2770
+ Name?: PhoneNumberName;
2754
2771
  }
2755
2772
  export type UpdatePhoneNumberRequestItemList = UpdatePhoneNumberRequestItem[];
2756
2773
  export interface UpdatePhoneNumberResponse {
package/clients/fsx.d.ts CHANGED
@@ -20,11 +20,11 @@ declare class FSx extends Service {
20
20
  */
21
21
  associateFileSystemAliases(callback?: (err: AWSError, data: FSx.Types.AssociateFileSystemAliasesResponse) => void): Request<FSx.Types.AssociateFileSystemAliasesResponse, AWSError>;
22
22
  /**
23
- * Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following. Any files that FSx has already exported are not reverted. FSx continues to export any files that are "in-flight" when the cancel operation is received. FSx does not export any files that have not yet been exported.
23
+ * Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel am export task, Amazon FSx does the following. Any files that FSx has already exported are not reverted. FSx continues to export any files that are in-flight when the cancel operation is received. FSx does not export any files that have not yet been exported. For a release task, Amazon FSx will stop releasing files upon cancellation. Any files that have already been released will remain in the released state.
24
24
  */
25
25
  cancelDataRepositoryTask(params: FSx.Types.CancelDataRepositoryTaskRequest, callback?: (err: AWSError, data: FSx.Types.CancelDataRepositoryTaskResponse) => void): Request<FSx.Types.CancelDataRepositoryTaskResponse, AWSError>;
26
26
  /**
27
- * Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following. Any files that FSx has already exported are not reverted. FSx continues to export any files that are "in-flight" when the cancel operation is received. FSx does not export any files that have not yet been exported.
27
+ * Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel am export task, Amazon FSx does the following. Any files that FSx has already exported are not reverted. FSx continues to export any files that are in-flight when the cancel operation is received. FSx does not export any files that have not yet been exported. For a release task, Amazon FSx will stop releasing files upon cancellation. Any files that have already been released will remain in the released state.
28
28
  */
29
29
  cancelDataRepositoryTask(callback?: (err: AWSError, data: FSx.Types.CancelDataRepositoryTaskResponse) => void): Request<FSx.Types.CancelDataRepositoryTaskResponse, AWSError>;
30
30
  /**
@@ -52,11 +52,11 @@ declare class FSx extends Service {
52
52
  */
53
53
  createDataRepositoryAssociation(callback?: (err: AWSError, data: FSx.Types.CreateDataRepositoryAssociationResponse) => void): Request<FSx.Types.CreateDataRepositoryAssociationResponse, AWSError>;
54
54
  /**
55
- * Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
55
+ * Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. You use import and export data repository tasks to perform bulk operations between your FSx for Lustre file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. You use release data repository tasks to release data from your file system for files that are archived to S3. The metadata of released files remains on the file system so users or applications can still access released files by reading the files again, which will restore data from Amazon S3 to the FSx for Lustre file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
56
56
  */
57
57
  createDataRepositoryTask(params: FSx.Types.CreateDataRepositoryTaskRequest, callback?: (err: AWSError, data: FSx.Types.CreateDataRepositoryTaskResponse) => void): Request<FSx.Types.CreateDataRepositoryTaskResponse, AWSError>;
58
58
  /**
59
- * Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
59
+ * Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. You use import and export data repository tasks to perform bulk operations between your FSx for Lustre file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. You use release data repository tasks to release data from your file system for files that are archived to S3. The metadata of released files remains on the file system so users or applications can still access released files by reading the files again, which will restore data from Amazon S3 to the FSx for Lustre file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
60
60
  */
61
61
  createDataRepositoryTask(callback?: (err: AWSError, data: FSx.Types.CreateDataRepositoryTaskResponse) => void): Request<FSx.Types.CreateDataRepositoryTaskResponse, AWSError>;
62
62
  /**
@@ -308,11 +308,11 @@ declare class FSx extends Service {
308
308
  */
309
309
  updateFileCache(callback?: (err: AWSError, data: FSx.Types.UpdateFileCacheResponse) => void): Request<FSx.Types.UpdateFileCacheResponse, AWSError>;
310
310
  /**
311
- * Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request. For FSx for Windows File Server file systems, you can update the following properties: AuditLogConfiguration AutomaticBackupRetentionDays DailyAutomaticBackupStartTime SelfManagedActiveDirectoryConfiguration StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following properties: AutoImportPolicy AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DataCompressionType LogConfiguration LustreRootSquashConfiguration StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the following properties: AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime
311
+ * Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request. For FSx for Windows File Server file systems, you can update the following properties: AuditLogConfiguration AutomaticBackupRetentionDays DailyAutomaticBackupStartTime SelfManagedActiveDirectoryConfiguration StorageCapacity StorageType ThroughputCapacity DiskIopsConfiguration WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following properties: AutoImportPolicy AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DataCompressionType LogConfiguration LustreRootSquashConfiguration StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the following properties: AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime
312
312
  */
313
313
  updateFileSystem(params: FSx.Types.UpdateFileSystemRequest, callback?: (err: AWSError, data: FSx.Types.UpdateFileSystemResponse) => void): Request<FSx.Types.UpdateFileSystemResponse, AWSError>;
314
314
  /**
315
- * Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request. For FSx for Windows File Server file systems, you can update the following properties: AuditLogConfiguration AutomaticBackupRetentionDays DailyAutomaticBackupStartTime SelfManagedActiveDirectoryConfiguration StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following properties: AutoImportPolicy AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DataCompressionType LogConfiguration LustreRootSquashConfiguration StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the following properties: AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime
315
+ * Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request. For FSx for Windows File Server file systems, you can update the following properties: AuditLogConfiguration AutomaticBackupRetentionDays DailyAutomaticBackupStartTime SelfManagedActiveDirectoryConfiguration StorageCapacity StorageType ThroughputCapacity DiskIopsConfiguration WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following properties: AutoImportPolicy AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DataCompressionType LogConfiguration LustreRootSquashConfiguration StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the following properties: AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime
316
316
  */
317
317
  updateFileSystem(callback?: (err: AWSError, data: FSx.Types.UpdateFileSystemResponse) => void): Request<FSx.Types.UpdateFileSystemResponse, AWSError>;
318
318
  /**
@@ -383,7 +383,7 @@ declare namespace FSx {
383
383
  */
384
384
  Message?: ErrorMessage;
385
385
  }
386
- export type AdministrativeActionType = "FILE_SYSTEM_UPDATE"|"STORAGE_OPTIMIZATION"|"FILE_SYSTEM_ALIAS_ASSOCIATION"|"FILE_SYSTEM_ALIAS_DISASSOCIATION"|"VOLUME_UPDATE"|"SNAPSHOT_UPDATE"|"RELEASE_NFS_V3_LOCKS"|"VOLUME_RESTORE"|string;
386
+ export type AdministrativeActionType = "FILE_SYSTEM_UPDATE"|"STORAGE_OPTIMIZATION"|"FILE_SYSTEM_ALIAS_ASSOCIATION"|"FILE_SYSTEM_ALIAS_DISASSOCIATION"|"VOLUME_UPDATE"|"SNAPSHOT_UPDATE"|"RELEASE_NFS_V3_LOCKS"|"VOLUME_RESTORE"|"THROUGHPUT_OPTIMIZATION"|"IOPS_OPTIMIZATION"|"STORAGE_TYPE_OPTIMIZATION"|string;
387
387
  export type AdministrativeActions = AdministrativeAction[];
388
388
  export interface Alias {
389
389
  /**
@@ -624,11 +624,11 @@ declare namespace FSx {
624
624
  }
625
625
  export interface CreateDataRepositoryTaskRequest {
626
626
  /**
627
- * Specifies the type of data repository task to create.
627
+ * Specifies the type of data repository task to create. EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository. IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system. RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria. AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.
628
628
  */
629
629
  Type: DataRepositoryTaskType;
630
630
  /**
631
- * A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. For export tasks, the list contains paths on the Amazon FSx file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1. For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the Amazon FSx file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional).
631
+ * A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all archived files that meet the last accessed time criteria (for release tasks). For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1. For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional). For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release archived files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all archived files in the file system, specify a forward slash (/) as the path. A file must also meet the last accessed time criteria specified in for the file to be released.
632
632
  */
633
633
  Paths?: DataRepositoryTaskPaths;
634
634
  FileSystemId: FileSystemId;
@@ -642,6 +642,10 @@ declare namespace FSx {
642
642
  * Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.
643
643
  */
644
644
  CapacityToRelease?: CapacityToRelease;
645
+ /**
646
+ * The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.
647
+ */
648
+ ReleaseConfiguration?: ReleaseConfiguration;
645
649
  }
646
650
  export interface CreateDataRepositoryTaskResponse {
647
651
  /**
@@ -858,7 +862,7 @@ declare namespace FSx {
858
862
  CopyTagsToVolumes?: Flag;
859
863
  DailyAutomaticBackupStartTime?: DailyTime;
860
864
  /**
861
- * Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following: SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MBps. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available. SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions. For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.
865
+ * Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following: MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available in the following Amazon Web Services Regions: SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available. SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions. For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.
862
866
  */
863
867
  DeploymentType: OpenZFSDeploymentType;
864
868
  /**
@@ -871,6 +875,18 @@ declare namespace FSx {
871
875
  * The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume.
872
876
  */
873
877
  RootVolumeConfiguration?: OpenZFSCreateRootVolumeConfiguration;
878
+ /**
879
+ * Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.
880
+ */
881
+ PreferredSubnetId?: SubnetId;
882
+ /**
883
+ * (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.
884
+ */
885
+ EndpointIpAddressRange?: IpAddressRange;
886
+ /**
887
+ * (Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.
888
+ */
889
+ RouteTableIds?: RouteTableIds;
874
890
  }
875
891
  export interface CreateFileSystemRequest {
876
892
  /**
@@ -965,6 +981,10 @@ declare namespace FSx {
965
981
  * The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.
966
982
  */
967
983
  AuditLogConfiguration?: WindowsAuditLogCreateConfiguration;
984
+ /**
985
+ * The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.
986
+ */
987
+ DiskIopsConfiguration?: DiskIopsConfiguration;
968
988
  }
969
989
  export interface CreateOntapVolumeConfiguration {
970
990
  /**
@@ -1272,7 +1292,7 @@ declare namespace FSx {
1272
1292
  */
1273
1293
  Lifecycle: DataRepositoryTaskLifecycle;
1274
1294
  /**
1275
- * The type of data repository task. EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository. IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system. AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource. RELEASE_DATA_FROM_FILESYSTEM tasks are not supported.
1295
+ * The type of data repository task. EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository. IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system. RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria. AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.
1276
1296
  */
1277
1297
  Type: DataRepositoryTaskType;
1278
1298
  CreationTime: CreationTime;
@@ -1311,6 +1331,10 @@ declare namespace FSx {
1311
1331
  * The system-generated, unique ID of the cache.
1312
1332
  */
1313
1333
  FileCacheId?: FileCacheId;
1334
+ /**
1335
+ * The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.
1336
+ */
1337
+ ReleaseConfiguration?: ReleaseConfiguration;
1314
1338
  }
1315
1339
  export interface DataRepositoryTaskFailureDetails {
1316
1340
  Message?: ErrorMessage;
@@ -1554,7 +1578,7 @@ declare namespace FSx {
1554
1578
  SkipFinalBackup?: Flag;
1555
1579
  FinalBackupTags?: Tags;
1556
1580
  /**
1557
- * Setting this to true allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. The IAM permission fsx:BypassSnaplockEnterpriseRetention is also required to delete SnapLock Enterprise volumes with unexpired WORM files. The default value is false. For more information, see Deleting a SnapLock volume .
1581
+ * Setting this to true allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. The IAM permission fsx:BypassSnaplockEnterpriseRetention is also required to delete SnapLock Enterprise volumes with unexpired WORM files. The default value is false. For more information, see Deleting a SnapLock volume.
1558
1582
  */
1559
1583
  BypassSnaplockEnterpriseRetention?: Flag;
1560
1584
  }
@@ -1817,6 +1841,16 @@ declare namespace FSx {
1817
1841
  export type DiskIopsConfigurationMode = "AUTOMATIC"|"USER_PROVISIONED"|string;
1818
1842
  export type DnsIps = IpAddress[];
1819
1843
  export type DriveCacheType = "NONE"|"READ"|string;
1844
+ export interface DurationSinceLastAccess {
1845
+ /**
1846
+ * The unit of time used by the Value parameter to determine if a file can be released, based on when it was last accessed. DAYS is the only supported value. This is a required parameter.
1847
+ */
1848
+ Unit?: Unit;
1849
+ /**
1850
+ * An integer that represents the minimum amount of time (in days) since a file was last accessed in the file system. Only archived files with a MAX(atime, ctime, mtime) timestamp that is more than this amount of time in the past (relative to the task create time) will be released. The default of Value is 0. This is a required parameter. If an archived file meets the last accessed time criteria, its file or directory path must also be specified in the Paths parameter of the operation in order for the file to be released.
1851
+ */
1852
+ Value?: Value;
1853
+ }
1820
1854
  export type EndTime = Date;
1821
1855
  export type ErrorMessage = string;
1822
1856
  export type EventType = "NEW"|"CHANGED"|"DELETED"|string;
@@ -2388,7 +2422,7 @@ declare namespace FSx {
2388
2422
  ReadOnly?: ReadOnly;
2389
2423
  }
2390
2424
  export type OpenZFSDataCompressionType = "NONE"|"ZSTD"|"LZ4"|string;
2391
- export type OpenZFSDeploymentType = "SINGLE_AZ_1"|"SINGLE_AZ_2"|string;
2425
+ export type OpenZFSDeploymentType = "SINGLE_AZ_1"|"SINGLE_AZ_2"|"MULTI_AZ_1"|string;
2392
2426
  export interface OpenZFSFileSystemConfiguration {
2393
2427
  AutomaticBackupRetentionDays?: AutomaticBackupRetentionDays;
2394
2428
  /**
@@ -2401,7 +2435,7 @@ declare namespace FSx {
2401
2435
  CopyTagsToVolumes?: Flag;
2402
2436
  DailyAutomaticBackupStartTime?: DailyTime;
2403
2437
  /**
2404
- * Specifies the file-system deployment type. Amazon FSx for OpenZFS supports&#x2028; SINGLE_AZ_1 and SINGLE_AZ_2.
2438
+ * Specifies the file-system deployment type. Amazon FSx for OpenZFS supports&#x2028; MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2.
2405
2439
  */
2406
2440
  DeploymentType?: OpenZFSDeploymentType;
2407
2441
  /**
@@ -2414,6 +2448,22 @@ declare namespace FSx {
2414
2448
  * The ID of the root volume of the OpenZFS file system.
2415
2449
  */
2416
2450
  RootVolumeId?: VolumeId;
2451
+ /**
2452
+ * Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.
2453
+ */
2454
+ PreferredSubnetId?: SubnetId;
2455
+ /**
2456
+ * (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.
2457
+ */
2458
+ EndpointIpAddressRange?: IpAddressRange;
2459
+ /**
2460
+ * (Multi-AZ only) The VPC route tables in which your file system's endpoints are created.
2461
+ */
2462
+ RouteTableIds?: RouteTableIds;
2463
+ /**
2464
+ * The IP address of the endpoint that is used to access data or to manage the file system.
2465
+ */
2466
+ EndpointIpAddress?: IpAddress;
2417
2467
  }
2418
2468
  export interface OpenZFSNfsExport {
2419
2469
  /**
@@ -2511,6 +2561,12 @@ declare namespace FSx {
2511
2561
  export type ProgressPercent = number;
2512
2562
  export type ReadOnly = boolean;
2513
2563
  export type Region = string;
2564
+ export interface ReleaseConfiguration {
2565
+ /**
2566
+ * Defines the point-in-time since an archived file was last accessed, in order for that file to be eligible for release. Only files that were last accessed before this point-in-time are eligible to be released from the file system.
2567
+ */
2568
+ DurationSinceLastAccess?: DurationSinceLastAccess;
2569
+ }
2514
2570
  export interface ReleaseFileSystemNfsV3LocksRequest {
2515
2571
  FileSystemId: FileSystemId;
2516
2572
  ClientRequestToken?: ClientRequestToken;
@@ -2889,6 +2945,7 @@ declare namespace FSx {
2889
2945
  export type TieringPolicyName = "SNAPSHOT_ONLY"|"AUTO"|"ALL"|"NONE"|string;
2890
2946
  export type TotalCount = number;
2891
2947
  export type UUID = string;
2948
+ export type Unit = "DAYS"|string;
2892
2949
  export interface UntagResourceRequest {
2893
2950
  /**
2894
2951
  * The ARN of the Amazon FSx resource to untag.
@@ -3011,6 +3068,14 @@ declare namespace FSx {
3011
3068
  ThroughputCapacity?: MegabytesPerSecond;
3012
3069
  WeeklyMaintenanceStartTime?: WeeklyTime;
3013
3070
  DiskIopsConfiguration?: DiskIopsConfiguration;
3071
+ /**
3072
+ * (Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for OpenZFS file system.
3073
+ */
3074
+ AddRouteTableIds?: RouteTableIds;
3075
+ /**
3076
+ * (Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for OpenZFS file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system.
3077
+ */
3078
+ RemoveRouteTableIds?: RouteTableIds;
3014
3079
  }
3015
3080
  export interface UpdateFileSystemRequest {
3016
3081
  /**
@@ -3035,6 +3100,7 @@ declare namespace FSx {
3035
3100
  * The configuration updates for an FSx for OpenZFS file system.
3036
3101
  */
3037
3102
  OpenZFSConfiguration?: UpdateFileSystemOpenZFSConfiguration;
3103
+ StorageType?: StorageType;
3038
3104
  }
3039
3105
  export interface UpdateFileSystemResponse {
3040
3106
  /**
@@ -3067,6 +3133,10 @@ declare namespace FSx {
3067
3133
  * The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system..
3068
3134
  */
3069
3135
  AuditLogConfiguration?: WindowsAuditLogCreateConfiguration;
3136
+ /**
3137
+ * The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.
3138
+ */
3139
+ DiskIopsConfiguration?: DiskIopsConfiguration;
3070
3140
  }
3071
3141
  export interface UpdateOntapVolumeConfiguration {
3072
3142
  /**
@@ -3221,6 +3291,7 @@ declare namespace FSx {
3221
3291
  */
3222
3292
  Volume?: Volume;
3223
3293
  }
3294
+ export type Value = number;
3224
3295
  export interface Volume {
3225
3296
  CreationTime?: CreationTime;
3226
3297
  FileSystemId?: FileSystemId;
@@ -3291,7 +3362,7 @@ declare namespace FSx {
3291
3362
  */
3292
3363
  FileShareAccessAuditLogLevel: WindowsAccessAuditLogLevel;
3293
3364
  /**
3294
- * The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the aws-fsx prefix. The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.
3365
+ * The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehose delivery stream must begin with the aws-fsx prefix. The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.
3295
3366
  */
3296
3367
  AuditLogDestination?: GeneralARN;
3297
3368
  }
@@ -3305,7 +3376,7 @@ declare namespace FSx {
3305
3376
  */
3306
3377
  FileShareAccessAuditLogLevel: WindowsAccessAuditLogLevel;
3307
3378
  /**
3308
- * The Amazon Resource Name (ARN) that specifies the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements: The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the aws-fsx prefix. If you do not provide a destination in AuditLogDestination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group. If AuditLogDestination is provided and the resource does not exist, the request will fail with a BadRequest error. If FileAccessAuditLogLevel and FileShareAccessAuditLogLevel are both set to DISABLED, you cannot specify a destination in AuditLogDestination.
3379
+ * The Amazon Resource Name (ARN) that specifies the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements: The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehose delivery stream must begin with the aws-fsx prefix. If you do not provide a destination in AuditLogDestination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group. If AuditLogDestination is provided and the resource does not exist, the request will fail with a BadRequest error. If FileAccessAuditLogLevel and FileShareAccessAuditLogLevel are both set to DISABLED, you cannot specify a destination in AuditLogDestination.
3309
3380
  */
3310
3381
  AuditLogDestination?: GeneralARN;
3311
3382
  }
@@ -3361,6 +3432,10 @@ declare namespace FSx {
3361
3432
  * The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.
3362
3433
  */
3363
3434
  AuditLogConfiguration?: WindowsAuditLogConfiguration;
3435
+ /**
3436
+ * The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.
3437
+ */
3438
+ DiskIopsConfiguration?: DiskIopsConfiguration;
3364
3439
  }
3365
3440
  /**
3366
3441
  * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
@@ -20,11 +20,11 @@ declare class GlobalAccelerator extends Service {
20
20
  */
21
21
  addCustomRoutingEndpoints(callback?: (err: AWSError, data: GlobalAccelerator.Types.AddCustomRoutingEndpointsResponse) => void): Request<GlobalAccelerator.Types.AddCustomRoutingEndpointsResponse, AWSError>;
22
22
  /**
23
- * Add endpoints to an endpoint group. The AddEndpoints API operation is the recommended option for adding endpoints. The alternative options are to add endpoints when you create an endpoint group (with the CreateEndpointGroup API) or when you update an endpoint group (with the UpdateEndpointGroup API). There are two advantages to using AddEndpoints to add endpoints: It's faster, because Global Accelerator only has to resolve the new endpoints that you're adding. It's more convenient, because you don't need to specify all of the current endpoints that are already in the endpoint group in addition to the new endpoints that you want to add.
23
+ * Add endpoints to an endpoint group. The AddEndpoints API operation is the recommended option for adding endpoints. The alternative options are to add endpoints when you create an endpoint group (with the CreateEndpointGroup API) or when you update an endpoint group (with the UpdateEndpointGroup API). There are two advantages to using AddEndpoints to add endpoints in Global Accelerator: It's faster, because Global Accelerator only has to resolve the new endpoints that you're adding, rather than resolving new and existing endpoints. It's more convenient, because you don't need to specify the current endpoints that are already in the endpoint group, in addition to the new endpoints that you want to add. For information about endpoint types and requirements for endpoints that you can add to Global Accelerator, see Endpoints for standard accelerators in the Global Accelerator Developer Guide.
24
24
  */
25
25
  addEndpoints(params: GlobalAccelerator.Types.AddEndpointsRequest, callback?: (err: AWSError, data: GlobalAccelerator.Types.AddEndpointsResponse) => void): Request<GlobalAccelerator.Types.AddEndpointsResponse, AWSError>;
26
26
  /**
27
- * Add endpoints to an endpoint group. The AddEndpoints API operation is the recommended option for adding endpoints. The alternative options are to add endpoints when you create an endpoint group (with the CreateEndpointGroup API) or when you update an endpoint group (with the UpdateEndpointGroup API). There are two advantages to using AddEndpoints to add endpoints: It's faster, because Global Accelerator only has to resolve the new endpoints that you're adding. It's more convenient, because you don't need to specify all of the current endpoints that are already in the endpoint group in addition to the new endpoints that you want to add.
27
+ * Add endpoints to an endpoint group. The AddEndpoints API operation is the recommended option for adding endpoints. The alternative options are to add endpoints when you create an endpoint group (with the CreateEndpointGroup API) or when you update an endpoint group (with the UpdateEndpointGroup API). There are two advantages to using AddEndpoints to add endpoints in Global Accelerator: It's faster, because Global Accelerator only has to resolve the new endpoints that you're adding, rather than resolving new and existing endpoints. It's more convenient, because you don't need to specify the current endpoints that are already in the endpoint group, in addition to the new endpoints that you want to add. For information about endpoint types and requirements for endpoints that you can add to Global Accelerator, see Endpoints for standard accelerators in the Global Accelerator Developer Guide.
28
28
  */
29
29
  addEndpoints(callback?: (err: AWSError, data: GlobalAccelerator.Types.AddEndpointsResponse) => void): Request<GlobalAccelerator.Types.AddEndpointsResponse, AWSError>;
30
30
  /**
@@ -44,19 +44,19 @@ declare class GlobalAccelerator extends Service {
44
44
  */
45
45
  allowCustomRoutingTraffic(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
46
46
  /**
47
- * Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.
47
+ * Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.
48
48
  */
49
49
  createAccelerator(params: GlobalAccelerator.Types.CreateAcceleratorRequest, callback?: (err: AWSError, data: GlobalAccelerator.Types.CreateAcceleratorResponse) => void): Request<GlobalAccelerator.Types.CreateAcceleratorResponse, AWSError>;
50
50
  /**
51
- * Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.
51
+ * Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.
52
52
  */
53
53
  createAccelerator(callback?: (err: AWSError, data: GlobalAccelerator.Types.CreateAcceleratorResponse) => void): Request<GlobalAccelerator.Types.CreateAcceleratorResponse, AWSError>;
54
54
  /**
55
- * Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints. Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.
55
+ * Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints. Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.
56
56
  */
57
57
  createCustomRoutingAccelerator(params: GlobalAccelerator.Types.CreateCustomRoutingAcceleratorRequest, callback?: (err: AWSError, data: GlobalAccelerator.Types.CreateCustomRoutingAcceleratorResponse) => void): Request<GlobalAccelerator.Types.CreateCustomRoutingAcceleratorResponse, AWSError>;
58
58
  /**
59
- * Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints. Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.
59
+ * Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints. Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.
60
60
  */
61
61
  createCustomRoutingAccelerator(callback?: (err: AWSError, data: GlobalAccelerator.Types.CreateCustomRoutingAcceleratorResponse) => void): Request<GlobalAccelerator.Types.CreateCustomRoutingAcceleratorResponse, AWSError>;
62
62
  /**
@@ -76,11 +76,11 @@ declare class GlobalAccelerator extends Service {
76
76
  */
77
77
  createCustomRoutingListener(callback?: (err: AWSError, data: GlobalAccelerator.Types.CreateCustomRoutingListenerResponse) => void): Request<GlobalAccelerator.Types.CreateCustomRoutingListenerResponse, AWSError>;
78
78
  /**
79
- * Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one Amazon Web Services Region. A resource must be valid and active when you add it as an endpoint.
79
+ * Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one Amazon Web Services Region. A resource must be valid and active when you add it as an endpoint. For more information about endpoint types and requirements for endpoints that you can add to Global Accelerator, see Endpoints for standard accelerators in the Global Accelerator Developer Guide.
80
80
  */
81
81
  createEndpointGroup(params: GlobalAccelerator.Types.CreateEndpointGroupRequest, callback?: (err: AWSError, data: GlobalAccelerator.Types.CreateEndpointGroupResponse) => void): Request<GlobalAccelerator.Types.CreateEndpointGroupResponse, AWSError>;
82
82
  /**
83
- * Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one Amazon Web Services Region. A resource must be valid and active when you add it as an endpoint.
83
+ * Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one Amazon Web Services Region. A resource must be valid and active when you add it as an endpoint. For more information about endpoint types and requirements for endpoints that you can add to Global Accelerator, see Endpoints for standard accelerators in the Global Accelerator Developer Guide.
84
84
  */
85
85
  createEndpointGroup(callback?: (err: AWSError, data: GlobalAccelerator.Types.CreateEndpointGroupResponse) => void): Request<GlobalAccelerator.Types.CreateEndpointGroupResponse, AWSError>;
86
86
  /**
@@ -340,11 +340,11 @@ declare class GlobalAccelerator extends Service {
340
340
  */
341
341
  untagResource(callback?: (err: AWSError, data: GlobalAccelerator.Types.UntagResourceResponse) => void): Request<GlobalAccelerator.Types.UntagResourceResponse, AWSError>;
342
342
  /**
343
- * Update an accelerator. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.
343
+ * Update an accelerator to make changes, such as the following: Change the name of the accelerator. Disable the accelerator so that it no longer accepts or routes traffic, or so that you can delete it. Enable the accelerator, if it is disabled. Change the IP address type to dual-stack if it is IPv4, or change the IP address type to IPv4 if it's dual-stack. Be aware that static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete the accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.
344
344
  */
345
345
  updateAccelerator(params: GlobalAccelerator.Types.UpdateAcceleratorRequest, callback?: (err: AWSError, data: GlobalAccelerator.Types.UpdateAcceleratorResponse) => void): Request<GlobalAccelerator.Types.UpdateAcceleratorResponse, AWSError>;
346
346
  /**
347
- * Update an accelerator. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.
347
+ * Update an accelerator to make changes, such as the following: Change the name of the accelerator. Disable the accelerator so that it no longer accepts or routes traffic, or so that you can delete it. Enable the accelerator, if it is disabled. Change the IP address type to dual-stack if it is IPv4, or change the IP address type to IPv4 if it's dual-stack. Be aware that static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete the accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.
348
348
  */
349
349
  updateAccelerator(callback?: (err: AWSError, data: GlobalAccelerator.Types.UpdateAcceleratorResponse) => void): Request<GlobalAccelerator.Types.UpdateAcceleratorResponse, AWSError>;
350
350
  /**
@@ -420,11 +420,11 @@ declare class GuardDuty extends Service {
420
420
  */
421
421
  listPublishingDestinations(callback?: (err: AWSError, data: GuardDuty.Types.ListPublishingDestinationsResponse) => void): Request<GuardDuty.Types.ListPublishingDestinationsResponse, AWSError>;
422
422
  /**
423
- * Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, threat intel sets, publishing destination, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource.
423
+ * Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, threat intel sets, and publishing destination, with a limit of 50 tags per each resource. When invoked, this operation returns all assigned tags for a given resource.
424
424
  */
425
425
  listTagsForResource(params: GuardDuty.Types.ListTagsForResourceRequest, callback?: (err: AWSError, data: GuardDuty.Types.ListTagsForResourceResponse) => void): Request<GuardDuty.Types.ListTagsForResourceResponse, AWSError>;
426
426
  /**
427
- * Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, threat intel sets, publishing destination, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource.
427
+ * Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, threat intel sets, and publishing destination, with a limit of 50 tags per each resource. When invoked, this operation returns all assigned tags for a given resource.
428
428
  */
429
429
  listTagsForResource(callback?: (err: AWSError, data: GuardDuty.Types.ListTagsForResourceResponse) => void): Request<GuardDuty.Types.ListTagsForResourceResponse, AWSError>;
430
430
  /**
@@ -1092,7 +1092,7 @@ declare namespace GuardDuty {
1092
1092
  */
1093
1093
  Rank?: FilterRank;
1094
1094
  /**
1095
- * Represents the criteria to be used in the filter for querying findings. You can only use the following attributes to query findings: accountId region id resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.outpostArn resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.resourceType service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.userAgent service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.localIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.city.cityName service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.additionalInfo.threatListName resource.s3BucketDetails.publicAccess.effectivePermissions resource.s3BucketDetails.name resource.s3BucketDetails.tags.key resource.s3BucketDetails.tags.value resource.s3BucketDetails.type service.resourceRole severity type updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.
1095
+ * Represents the criteria to be used in the filter for querying findings. You can only use the following attributes to query findings: accountId id region severity To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition: Low: ["1", "2", "3"] Medium: ["4", "5", "6"] High: ["7", "8", "9"] For more information, see Severity levels for GuardDuty findings. type updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds. resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.outpostArn resource.resourceType resource.s3BucketDetails.publicAccess.effectivePermissions resource.s3BucketDetails.name resource.s3BucketDetails.tags.key resource.s3BucketDetails.tags.value resource.s3BucketDetails.type service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.remoteIpDetails.city.cityName service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.action.awsApiCallAction.remoteAccountDetails.affiliated service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4 service.action.kubernetesApiCallAction.requestUri service.action.networkConnectionAction.localIpDetails.ipAddressV4 service.action.networkConnectionAction.protocol service.action.awsApiCallAction.serviceName service.action.awsApiCallAction.remoteAccountDetails.accountId service.additionalInfo.threatListName service.resourceRole resource.eksClusterDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.namespace resource.kubernetesDetails.kubernetesUserDetails.username resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix service.ebsVolumeScanDetails.scanId service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash resource.ecsClusterDetails.name resource.ecsClusterDetails.taskDetails.containers.image resource.ecsClusterDetails.taskDetails.definitionArn resource.containerDetails.image resource.rdsDbInstanceDetails.dbInstanceIdentifier resource.rdsDbInstanceDetails.dbClusterIdentifier resource.rdsDbInstanceDetails.engine resource.rdsDbUserDetails.user resource.rdsDbInstanceDetails.tags.key resource.rdsDbInstanceDetails.tags.value service.runtimeDetails.process.executableSha256 service.runtimeDetails.process.name service.runtimeDetails.process.name resource.lambdaDetails.functionName resource.lambdaDetails.functionArn resource.lambdaDetails.tags.key resource.lambdaDetails.tags.value
1096
1096
  */
1097
1097
  FindingCriteria: FindingCriteria;
1098
1098
  /**
@@ -3197,7 +3197,7 @@ declare namespace GuardDuty {
3197
3197
  export type OrderBy = "ASC"|"DESC"|string;
3198
3198
  export type OrgFeature = "S3_DATA_EVENTS"|"EKS_AUDIT_LOGS"|"EBS_MALWARE_PROTECTION"|"RDS_LOGIN_EVENTS"|"EKS_RUNTIME_MONITORING"|"LAMBDA_NETWORK_LOGS"|string;
3199
3199
  export type OrgFeatureAdditionalConfiguration = "EKS_ADDON_MANAGEMENT"|string;
3200
- export type OrgFeatureStatus = "NEW"|"NONE"|string;
3200
+ export type OrgFeatureStatus = "NEW"|"NONE"|"ALL"|string;
3201
3201
  export interface Organization {
3202
3202
  /**
3203
3203
  * The Autonomous System Number (ASN) of the internet provider of the remote IP address.
@@ -5292,9 +5292,9 @@ declare namespace SageMaker {
5292
5292
  }
5293
5293
  export interface CreateModelCardExportJobRequest {
5294
5294
  /**
5295
- * The name of the model card to export.
5295
+ * The name or Amazon Resource Name (ARN) of the model card to export.
5296
5296
  */
5297
- ModelCardName: EntityName;
5297
+ ModelCardName: ModelCardNameOrArn;
5298
5298
  /**
5299
5299
  * The version of the model card to export. If a version is not provided, then the latest version of the model card is exported.
5300
5300
  */
@@ -8893,7 +8893,7 @@ declare namespace SageMaker {
8893
8893
  */
8894
8894
  Status: ModelCardExportJobStatus;
8895
8895
  /**
8896
- * The name of the model card that the model export job exports.
8896
+ * The name or Amazon Resource Name (ARN) of the model card that the model export job exports.
8897
8897
  */
8898
8898
  ModelCardName: EntityName;
8899
8899
  /**
@@ -8923,9 +8923,9 @@ declare namespace SageMaker {
8923
8923
  }
8924
8924
  export interface DescribeModelCardRequest {
8925
8925
  /**
8926
- * The name of the model card to describe.
8926
+ * The name or Amazon Resource Name (ARN) of the model card to describe.
8927
8927
  */
8928
- ModelCardName: EntityName;
8928
+ ModelCardName: ModelCardNameOrArn;
8929
8929
  /**
8930
8930
  * The version of the model card to describe. If a version is not provided, then the latest version of the model card is described.
8931
8931
  */
@@ -14819,9 +14819,9 @@ declare namespace SageMaker {
14819
14819
  */
14820
14820
  MaxResults?: MaxResults;
14821
14821
  /**
14822
- * List model card versions for the model card with the specified name.
14822
+ * List model card versions for the model card with the specified name or Amazon Resource Name (ARN).
14823
14823
  */
14824
- ModelCardName: EntityName;
14824
+ ModelCardName: ModelCardNameOrArn;
14825
14825
  /**
14826
14826
  * Only list model card versions with the specified approval status.
14827
14827
  */
@@ -16534,6 +16534,7 @@ declare namespace SageMaker {
16534
16534
  */
16535
16535
  S3OutputPath: S3Uri;
16536
16536
  }
16537
+ export type ModelCardNameOrArn = string;
16537
16538
  export type ModelCardProcessingStatus = "DeleteInProgress"|"DeletePending"|"ContentDeleted"|"ExportJobsDeleted"|"DeleteCompleted"|"DeleteFailed"|string;
16538
16539
  export interface ModelCardSecurityConfig {
16539
16540
  /**
@@ -18067,7 +18068,7 @@ declare namespace SageMaker {
18067
18068
  */
18068
18069
  TargetPlatform?: TargetPlatform;
18069
18070
  /**
18070
- * Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions. DTYPE: Specifies the data type for the input. When compiling for ml_* (except for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. "float32" is used if "DTYPE" is not specified. Options for data type are: float32: Use either "float" or "float32". int64: Use either "int64" or "long". For example, {"dtype" : "float32"}. CPU: Compilation for CPU supports the following compiler options. mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} ARM: Details of ARM CPU compilations. NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support. NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. gpu_code: Specifies the targeted architecture. trt-ver: Specifies the TensorRT versions in x.y.z. format. cuda-ver: Specifies the CUDA version in x.y format. For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} ANDROID: Compilation for the Android OS supports the following compiler options: ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support. INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". For information about supported compiler options, see Neuron Compiler CLI. CoreML: Compilation for the CoreML OutputConfig TargetDevice supports the following compiler options: class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by newlines. EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options: precision_mode: Specifies the precision of compiled artifacts. Supported values are "FP16" and "FP32". Default is "FP32". signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is TensorFlow's default signature def key. output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most one API field, either: signature_def_key or output_names. For example: {"precision_mode": "FP32", "output_names": ["output:0"]}
18071
+ * Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions. DTYPE: Specifies the data type for the input. When compiling for ml_* (except for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. "float32" is used if "DTYPE" is not specified. Options for data type are: float32: Use either "float" or "float32". int64: Use either "int64" or "long". For example, {"dtype" : "float32"}. CPU: Compilation for CPU supports the following compiler options. mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} ARM: Details of ARM CPU compilations. NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support. NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. gpu_code: Specifies the targeted architecture. trt-ver: Specifies the TensorRT versions in x.y.z. format. cuda-ver: Specifies the CUDA version in x.y format. For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} ANDROID: Compilation for the Android OS supports the following compiler options: ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support. INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". For information about supported compiler options, see Neuron Compiler CLI Reference Guide. CoreML: Compilation for the CoreML OutputConfig TargetDevice supports the following compiler options: class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by newlines. EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options: precision_mode: Specifies the precision of compiled artifacts. Supported values are "FP16" and "FP32". Default is "FP32". signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is TensorFlow's default signature def key. output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most one API field, either: signature_def_key or output_names. For example: {"precision_mode": "FP32", "output_names": ["output:0"]}
18071
18072
  */
18072
18073
  CompilerOptions?: CompilerOptions;
18073
18074
  /**
@@ -22131,9 +22132,9 @@ declare namespace SageMaker {
22131
22132
  }
22132
22133
  export interface UpdateModelCardRequest {
22133
22134
  /**
22134
- * The name of the model card to update.
22135
+ * The name or Amazon Resource Name (ARN) of the model card to update.
22135
22136
  */
22136
- ModelCardName: EntityName;
22137
+ ModelCardName: ModelCardNameOrArn;
22137
22138
  /**
22138
22139
  * The updated model card content. Content must be in model card JSON schema and provided as a string. When updating model card content, be sure to include the full content and not just updated content.
22139
22140
  */
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1432.0',
86
+ VERSION: '2.1433.0',
87
87
 
88
88
  /**
89
89
  * @api private