aws-sdk 2.1421.0 → 2.1422.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -35,6 +35,14 @@ declare class DataSync extends Service {
35
35
  * Activates an DataSync agent that you've deployed in your storage environment. The activation process associates the agent with your Amazon Web Services account. If you haven't deployed an agent yet, see the following topics to learn more: Agent requirements Create an agent If you're transferring between Amazon Web Services storage services, you don't need a DataSync agent.
36
36
  */
37
37
  createAgent(callback?: (err: AWSError, data: DataSync.Types.CreateAgentResponse) => void): Request<DataSync.Types.CreateAgentResponse, AWSError>;
38
+ /**
39
+ * Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync can use as a transfer source or destination. Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.
40
+ */
41
+ createLocationAzureBlob(params: DataSync.Types.CreateLocationAzureBlobRequest, callback?: (err: AWSError, data: DataSync.Types.CreateLocationAzureBlobResponse) => void): Request<DataSync.Types.CreateLocationAzureBlobResponse, AWSError>;
42
+ /**
43
+ * Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync can use as a transfer source or destination. Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.
44
+ */
45
+ createLocationAzureBlob(callback?: (err: AWSError, data: DataSync.Types.CreateLocationAzureBlobResponse) => void): Request<DataSync.Types.CreateLocationAzureBlobResponse, AWSError>;
38
46
  /**
39
47
  * Creates an endpoint for an Amazon EFS file system that DataSync can access for a transfer. For more information, see Creating a location for Amazon EFS.
40
48
  */
@@ -84,11 +92,11 @@ declare class DataSync extends Service {
84
92
  */
85
93
  createLocationHdfs(callback?: (err: AWSError, data: DataSync.Types.CreateLocationHdfsResponse) => void): Request<DataSync.Types.CreateLocationHdfsResponse, AWSError>;
86
94
  /**
87
- * Defines a file system on a Network File System (NFS) server that can be read from or written to.
95
+ * Creates an endpoint for an Network File System (NFS) file server that DataSync can use for a data transfer.
88
96
  */
89
97
  createLocationNfs(params: DataSync.Types.CreateLocationNfsRequest, callback?: (err: AWSError, data: DataSync.Types.CreateLocationNfsResponse) => void): Request<DataSync.Types.CreateLocationNfsResponse, AWSError>;
90
98
  /**
91
- * Defines a file system on a Network File System (NFS) server that can be read from or written to.
99
+ * Creates an endpoint for an Network File System (NFS) file server that DataSync can use for a data transfer.
92
100
  */
93
101
  createLocationNfs(callback?: (err: AWSError, data: DataSync.Types.CreateLocationNfsResponse) => void): Request<DataSync.Types.CreateLocationNfsResponse, AWSError>;
94
102
  /**
@@ -108,11 +116,11 @@ declare class DataSync extends Service {
108
116
  */
109
117
  createLocationS3(callback?: (err: AWSError, data: DataSync.Types.CreateLocationS3Response) => void): Request<DataSync.Types.CreateLocationS3Response, AWSError>;
110
118
  /**
111
- * Creates an endpoint for a Server Message Block (SMB) file server that DataSync can access for a transfer. For more information, see Creating an SMB location.
119
+ * Creates an endpoint for a Server Message Block (SMB) file server that DataSync can use for a data transfer. Before you begin, make sure that you understand how DataSync accesses an SMB file server.
112
120
  */
113
121
  createLocationSmb(params: DataSync.Types.CreateLocationSmbRequest, callback?: (err: AWSError, data: DataSync.Types.CreateLocationSmbResponse) => void): Request<DataSync.Types.CreateLocationSmbResponse, AWSError>;
114
122
  /**
115
- * Creates an endpoint for a Server Message Block (SMB) file server that DataSync can access for a transfer. For more information, see Creating an SMB location.
123
+ * Creates an endpoint for a Server Message Block (SMB) file server that DataSync can use for a data transfer. Before you begin, make sure that you understand how DataSync accesses an SMB file server.
116
124
  */
117
125
  createLocationSmb(callback?: (err: AWSError, data: DataSync.Types.CreateLocationSmbResponse) => void): Request<DataSync.Types.CreateLocationSmbResponse, AWSError>;
118
126
  /**
@@ -163,6 +171,14 @@ declare class DataSync extends Service {
163
171
  * Returns information about a DataSync discovery job.
164
172
  */
165
173
  describeDiscoveryJob(callback?: (err: AWSError, data: DataSync.Types.DescribeDiscoveryJobResponse) => void): Request<DataSync.Types.DescribeDiscoveryJobResponse, AWSError>;
174
+ /**
175
+ * Provides details about how an DataSync transfer location for Microsoft Azure Blob Storage is configured.
176
+ */
177
+ describeLocationAzureBlob(params: DataSync.Types.DescribeLocationAzureBlobRequest, callback?: (err: AWSError, data: DataSync.Types.DescribeLocationAzureBlobResponse) => void): Request<DataSync.Types.DescribeLocationAzureBlobResponse, AWSError>;
178
+ /**
179
+ * Provides details about how an DataSync transfer location for Microsoft Azure Blob Storage is configured.
180
+ */
181
+ describeLocationAzureBlob(callback?: (err: AWSError, data: DataSync.Types.DescribeLocationAzureBlobResponse) => void): Request<DataSync.Types.DescribeLocationAzureBlobResponse, AWSError>;
166
182
  /**
167
183
  * Returns metadata about your DataSync location for an Amazon EFS file system.
168
184
  */
@@ -268,19 +284,19 @@ declare class DataSync extends Service {
268
284
  */
269
285
  describeStorageSystemResources(callback?: (err: AWSError, data: DataSync.Types.DescribeStorageSystemResourcesResponse) => void): Request<DataSync.Types.DescribeStorageSystemResourcesResponse, AWSError>;
270
286
  /**
271
- * Returns metadata about a task.
287
+ * Provides information about an DataSync transfer task.
272
288
  */
273
289
  describeTask(params: DataSync.Types.DescribeTaskRequest, callback?: (err: AWSError, data: DataSync.Types.DescribeTaskResponse) => void): Request<DataSync.Types.DescribeTaskResponse, AWSError>;
274
290
  /**
275
- * Returns metadata about a task.
291
+ * Provides information about an DataSync transfer task.
276
292
  */
277
293
  describeTask(callback?: (err: AWSError, data: DataSync.Types.DescribeTaskResponse) => void): Request<DataSync.Types.DescribeTaskResponse, AWSError>;
278
294
  /**
279
- * Returns detailed metadata about a task that is being executed.
295
+ * Provides information about an DataSync transfer task that's running.
280
296
  */
281
297
  describeTaskExecution(params: DataSync.Types.DescribeTaskExecutionRequest, callback?: (err: AWSError, data: DataSync.Types.DescribeTaskExecutionResponse) => void): Request<DataSync.Types.DescribeTaskExecutionResponse, AWSError>;
282
298
  /**
283
- * Returns detailed metadata about a task that is being executed.
299
+ * Provides information about an DataSync transfer task that's running.
284
300
  */
285
301
  describeTaskExecution(callback?: (err: AWSError, data: DataSync.Types.DescribeTaskExecutionResponse) => void): Request<DataSync.Types.DescribeTaskExecutionResponse, AWSError>;
286
302
  /**
@@ -411,6 +427,14 @@ declare class DataSync extends Service {
411
427
  * Edits a DataSync discovery job configuration.
412
428
  */
413
429
  updateDiscoveryJob(callback?: (err: AWSError, data: DataSync.Types.UpdateDiscoveryJobResponse) => void): Request<DataSync.Types.UpdateDiscoveryJobResponse, AWSError>;
430
+ /**
431
+ * Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.
432
+ */
433
+ updateLocationAzureBlob(params: DataSync.Types.UpdateLocationAzureBlobRequest, callback?: (err: AWSError, data: DataSync.Types.UpdateLocationAzureBlobResponse) => void): Request<DataSync.Types.UpdateLocationAzureBlobResponse, AWSError>;
434
+ /**
435
+ * Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.
436
+ */
437
+ updateLocationAzureBlob(callback?: (err: AWSError, data: DataSync.Types.UpdateLocationAzureBlobResponse) => void): Request<DataSync.Types.UpdateLocationAzureBlobResponse, AWSError>;
414
438
  /**
415
439
  * Updates some parameters of a previously created location for a Hadoop Distributed File System cluster.
416
440
  */
@@ -480,7 +504,7 @@ declare namespace DataSync {
480
504
  */
481
505
  SystemType: DiscoverySystemType;
482
506
  /**
483
- * Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface.
507
+ * Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.
484
508
  */
485
509
  AgentArns: DiscoveryAgentArnList;
486
510
  /**
@@ -529,6 +553,18 @@ declare namespace DataSync {
529
553
  }
530
554
  export type AgentStatus = "ONLINE"|"OFFLINE"|string;
531
555
  export type Atime = "NONE"|"BEST_EFFORT"|string;
556
+ export type AzureAccessTier = "HOT"|"COOL"|"ARCHIVE"|string;
557
+ export type AzureBlobAuthenticationType = "SAS"|string;
558
+ export type AzureBlobContainerUrl = string;
559
+ export interface AzureBlobSasConfiguration {
560
+ /**
561
+ * Specifies a SAS token that provides permissions at the Azure storage account, container, or folder level. The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this: sp=r&amp;st=2023-12-20T14:54:52Z&amp;se=2023-12-20T22:54:52Z&amp;spr=https&amp;sv=2021-06-08&amp;sr=c&amp;sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D
562
+ */
563
+ Token: AzureBlobSasToken;
564
+ }
565
+ export type AzureBlobSasToken = string;
566
+ export type AzureBlobSubdirectory = string;
567
+ export type AzureBlobType = "BLOCK"|string;
532
568
  export type BytesPerSecond = number;
533
569
  export interface CancelTaskExecutionRequest {
534
570
  /**
@@ -571,11 +607,11 @@ declare namespace DataSync {
571
607
  */
572
608
  VpcEndpointId?: VpcEndpointId;
573
609
  /**
574
- * Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer.
610
+ * Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer. You can only specify one ARN.
575
611
  */
576
612
  SubnetArns?: PLSubnetArnList;
577
613
  /**
578
- * Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint.
614
+ * Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint. You can only specify one ARN.
579
615
  */
580
616
  SecurityGroupArns?: PLSecurityGroupArnList;
581
617
  }
@@ -585,6 +621,46 @@ declare namespace DataSync {
585
621
  */
586
622
  AgentArn?: AgentArn;
587
623
  }
624
+ export interface CreateLocationAzureBlobRequest {
625
+ /**
626
+ * Specifies the URL of the Azure Blob Storage container involved in your transfer.
627
+ */
628
+ ContainerUrl: AzureBlobContainerUrl;
629
+ /**
630
+ * Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
631
+ */
632
+ AuthenticationType: AzureBlobAuthenticationType;
633
+ /**
634
+ * Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.
635
+ */
636
+ SasConfiguration?: AzureBlobSasConfiguration;
637
+ /**
638
+ * Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
639
+ */
640
+ BlobType?: AzureBlobType;
641
+ /**
642
+ * Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
643
+ */
644
+ AccessTier?: AzureAccessTier;
645
+ /**
646
+ * Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images).
647
+ */
648
+ Subdirectory?: AzureBlobSubdirectory;
649
+ /**
650
+ * Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container. You can specify more than one agent. For more information, see Using multiple agents for your transfer.
651
+ */
652
+ AgentArns: AgentArnList;
653
+ /**
654
+ * Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your transfer location.
655
+ */
656
+ Tags?: InputTagList;
657
+ }
658
+ export interface CreateLocationAzureBlobResponse {
659
+ /**
660
+ * The ARN of the Azure Blob Storage transfer location that you created.
661
+ */
662
+ LocationArn?: LocationArn;
663
+ }
588
664
  export interface CreateLocationEfsRequest {
589
665
  /**
590
666
  * Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories. You must specify a value with forward slashes (for example, /path/to/folder).
@@ -796,29 +872,29 @@ declare namespace DataSync {
796
872
  }
797
873
  export interface CreateLocationNfsRequest {
798
874
  /**
799
- * The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network. To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication. To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information. For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.
875
+ * Specifies the subdirectory in the NFS file server that DataSync transfers to or from. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network. To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication. To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
800
876
  */
801
877
  Subdirectory: NfsSubdirectory;
802
878
  /**
803
- * The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information. This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.
879
+ * Specifies the IP address or domain name of your NFS file server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information. You must specify be an IP version 4 address or Domain Name System (DNS)-compliant name.
804
880
  */
805
881
  ServerHostname: ServerHostname;
806
882
  /**
807
- * Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
883
+ * Specifies the Amazon Resource Names (ARNs) of agents that DataSync uses to connect to your NFS file server. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
808
884
  */
809
885
  OnPremConfig: OnPremConfig;
810
886
  /**
811
- * The NFS mount options that DataSync can use to mount your NFS share.
887
+ * Specifies the mount options that DataSync can use to mount your NFS share.
812
888
  */
813
889
  MountOptions?: NfsMountOptions;
814
890
  /**
815
- * The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.
891
+ * Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.
816
892
  */
817
893
  Tags?: InputTagList;
818
894
  }
819
895
  export interface CreateLocationNfsResponse {
820
896
  /**
821
- * The Amazon Resource Name (ARN) of the source NFS file system location that is created.
897
+ * The ARN of the transfer location that you created for your NFS file server.
822
898
  */
823
899
  LocationArn?: LocationArn;
824
900
  }
@@ -1085,6 +1161,42 @@ declare namespace DataSync {
1085
1161
  */
1086
1162
  JobEndTime?: DiscoveryTime;
1087
1163
  }
1164
+ export interface DescribeLocationAzureBlobRequest {
1165
+ /**
1166
+ * Specifies the Amazon Resource Name (ARN) of your Azure Blob Storage transfer location.
1167
+ */
1168
+ LocationArn: LocationArn;
1169
+ }
1170
+ export interface DescribeLocationAzureBlobResponse {
1171
+ /**
1172
+ * The ARN of your Azure Blob Storage transfer location.
1173
+ */
1174
+ LocationArn?: LocationArn;
1175
+ /**
1176
+ * The URL of the Azure Blob Storage container involved in your transfer.
1177
+ */
1178
+ LocationUri?: LocationUri;
1179
+ /**
1180
+ * The authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
1181
+ */
1182
+ AuthenticationType?: AzureBlobAuthenticationType;
1183
+ /**
1184
+ * The type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
1185
+ */
1186
+ BlobType?: AzureBlobType;
1187
+ /**
1188
+ * The access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
1189
+ */
1190
+ AccessTier?: AzureAccessTier;
1191
+ /**
1192
+ * The ARNs of the DataSync agents that can connect with your Azure Blob Storage container.
1193
+ */
1194
+ AgentArns?: AgentArnList;
1195
+ /**
1196
+ * The time that your Azure Blob Storage transfer location was created.
1197
+ */
1198
+ CreationTime?: Time;
1199
+ }
1088
1200
  export interface DescribeLocationEfsRequest {
1089
1201
  /**
1090
1202
  * The Amazon Resource Name (ARN) of the Amazon EFS file system location that you want information about.
@@ -1308,7 +1420,7 @@ declare namespace DataSync {
1308
1420
  LocationUri?: LocationUri;
1309
1421
  OnPremConfig?: OnPremConfig;
1310
1422
  /**
1311
- * The NFS mount options that DataSync used to mount your NFS share.
1423
+ * The mount options that DataSync uses to mount your NFS share.
1312
1424
  */
1313
1425
  MountOptions?: NfsMountOptions;
1314
1426
  /**
@@ -1547,7 +1659,7 @@ declare namespace DataSync {
1547
1659
  }
1548
1660
  export interface DescribeTaskExecutionRequest {
1549
1661
  /**
1550
- * The Amazon Resource Name (ARN) of the task that is being executed.
1662
+ * Specifies the Amazon Resource Name (ARN) of the transfer task that's running.
1551
1663
  */
1552
1664
  TaskExecutionArn: TaskExecutionArn;
1553
1665
  }
@@ -1604,7 +1716,7 @@ declare namespace DataSync {
1604
1716
  }
1605
1717
  export interface DescribeTaskRequest {
1606
1718
  /**
1607
- * The Amazon Resource Name (ARN) of the task to describe.
1719
+ * Specifies the Amazon Resource Name (ARN) of the transfer task.
1608
1720
  */
1609
1721
  TaskArn: TaskArn;
1610
1722
  }
@@ -2375,19 +2487,19 @@ declare namespace DataSync {
2375
2487
  export type PreserveDevices = "NONE"|"PRESERVE"|string;
2376
2488
  export interface PrivateLinkConfig {
2377
2489
  /**
2378
- * The ID of the VPC endpoint that is configured for an agent. An agent that is configured with a VPC endpoint will not be accessible over the public internet.
2490
+ * Specifies the ID of the VPC endpoint that your agent connects to.
2379
2491
  */
2380
2492
  VpcEndpointId?: VpcEndpointId;
2381
2493
  /**
2382
- * The private endpoint that is configured for an agent that has access to IP addresses in a PrivateLink. An agent that is configured with this endpoint will not be accessible over the public internet.
2494
+ * Specifies the VPC endpoint provided by Amazon Web Services PrivateLink that your agent connects to.
2383
2495
  */
2384
2496
  PrivateLinkEndpoint?: Endpoint;
2385
2497
  /**
2386
- * The Amazon Resource Names (ARNs) of the subnets that are configured for an agent activated in a VPC or an agent that has access to a VPC endpoint.
2498
+ * Specifies the ARN of the subnet where your VPC endpoint is located. You can only specify one ARN.
2387
2499
  */
2388
2500
  SubnetArns?: PLSubnetArnList;
2389
2501
  /**
2390
- * The Amazon Resource Names (ARNs) of the security groups that are configured for the EC2 resource that hosts an agent activated in a VPC or an agent that has access to a VPC endpoint.
2502
+ * Specifies the Amazon Resource Names (ARN) of the security group that provides DataSync access to your VPC endpoint. You can only specify one ARN.
2391
2503
  */
2392
2504
  SecurityGroupArns?: PLSecurityGroupArnList;
2393
2505
  }
@@ -2741,6 +2853,38 @@ declare namespace DataSync {
2741
2853
  }
2742
2854
  export interface UpdateDiscoveryJobResponse {
2743
2855
  }
2856
+ export interface UpdateLocationAzureBlobRequest {
2857
+ /**
2858
+ * Specifies the ARN of the Azure Blob Storage transfer location that you're updating.
2859
+ */
2860
+ LocationArn: LocationArn;
2861
+ /**
2862
+ * Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images).
2863
+ */
2864
+ Subdirectory?: AzureBlobSubdirectory;
2865
+ /**
2866
+ * Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
2867
+ */
2868
+ AuthenticationType?: AzureBlobAuthenticationType;
2869
+ /**
2870
+ * Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.
2871
+ */
2872
+ SasConfiguration?: AzureBlobSasConfiguration;
2873
+ /**
2874
+ * Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
2875
+ */
2876
+ BlobType?: AzureBlobType;
2877
+ /**
2878
+ * Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
2879
+ */
2880
+ AccessTier?: AzureAccessTier;
2881
+ /**
2882
+ * Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container. You can specify more than one agent. For more information, see Using multiple agents for your transfer.
2883
+ */
2884
+ AgentArns?: AgentArnList;
2885
+ }
2886
+ export interface UpdateLocationAzureBlobResponse {
2887
+ }
2744
2888
  export interface UpdateLocationHdfsRequest {
2745
2889
  /**
2746
2890
  * The Amazon Resource Name (ARN) of the source HDFS cluster location.
@@ -2799,11 +2943,11 @@ declare namespace DataSync {
2799
2943
  }
2800
2944
  export interface UpdateLocationNfsRequest {
2801
2945
  /**
2802
- * The Amazon Resource Name (ARN) of the NFS location to update.
2946
+ * Specifies the Amazon Resource Name (ARN) of the NFS location that you want to update.
2803
2947
  */
2804
2948
  LocationArn: LocationArn;
2805
2949
  /**
2806
- * The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network. To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication. To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information. For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.
2950
+ * Specifies the subdirectory in your NFS file system that DataSync uses to read from or write to during a transfer. The NFS path should be exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network. To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication. To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
2807
2951
  */
2808
2952
  Subdirectory?: NfsSubdirectory;
2809
2953
  OnPremConfig?: OnPremConfig;
@@ -2886,7 +3030,7 @@ declare namespace DataSync {
2886
3030
  */
2887
3031
  ServerConfiguration?: DiscoveryServerConfiguration;
2888
3032
  /**
2889
- * Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system.
3033
+ * Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system. You can only specify one ARN.
2890
3034
  */
2891
3035
  AgentArns?: DiscoveryAgentArnList;
2892
3036
  /**
@@ -335,11 +335,11 @@ declare class DynamoDB extends DynamoDBCustomizations {
335
335
  */
336
336
  restoreTableToPointInTime(callback?: (err: AWSError, data: DynamoDB.Types.RestoreTableToPointInTimeOutput) => void): Request<DynamoDB.Types.RestoreTableToPointInTimeOutput, AWSError>;
337
337
  /**
338
- * The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria. A single Scan operation reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.
338
+ * The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount. Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table. A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level. DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested.
339
339
  */
340
340
  scan(params: DynamoDB.Types.ScanInput, callback?: (err: AWSError, data: DynamoDB.Types.ScanOutput) => void): Request<DynamoDB.Types.ScanOutput, AWSError>;
341
341
  /**
342
- * The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria. A single Scan operation reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.
342
+ * The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation. If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount. Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table. A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level. DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested.
343
343
  */
344
344
  scan(callback?: (err: AWSError, data: DynamoDB.Types.ScanOutput) => void): Request<DynamoDB.Types.ScanOutput, AWSError>;
345
345
  /**
@@ -752,7 +752,7 @@ declare namespace DynamoDB {
752
752
  }
753
753
  export interface BatchExecuteStatementOutput {
754
754
  /**
755
- * The response to each PartiQL statement in the batch.
755
+ * The response to each PartiQL statement in the batch. The values of the list are ordered according to the ordering of the request statements.
756
756
  */
757
757
  Responses?: PartiQLBatchResponse;
758
758
  /**
@@ -3591,7 +3591,7 @@ declare namespace DynamoDB {
3591
3591
  */
3592
3592
  ExpressionAttributeValues?: ExpressionAttributeValueMap;
3593
3593
  /**
3594
- * Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.
3594
+ * Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid values are: NONE and ALL_OLD.
3595
3595
  */
3596
3596
  ReturnValuesOnConditionCheckFailure?: ReturnValuesOnConditionCheckFailure;
3597
3597
  }
package/clients/ec2.d.ts CHANGED
@@ -6910,6 +6910,7 @@ declare namespace EC2 {
6910
6910
  export type AvailableInstanceCapacityList = InstanceCapacity[];
6911
6911
  export type BareMetal = "included"|"required"|"excluded"|string;
6912
6912
  export type BareMetalFlag = boolean;
6913
+ export type BaselineBandwidthInGbps = number;
6913
6914
  export type BaselineBandwidthInMbps = number;
6914
6915
  export interface BaselineEbsBandwidthMbps {
6915
6916
  /**
@@ -22903,6 +22904,10 @@ declare namespace EC2 {
22903
22904
  * Describes the Inference accelerators for the instance type.
22904
22905
  */
22905
22906
  Accelerators?: InferenceDeviceInfoList;
22907
+ /**
22908
+ * The total size of the memory for the inference accelerators for the instance type, in MiB.
22909
+ */
22910
+ TotalInferenceMemoryInMiB?: totalInferenceMemory;
22906
22911
  }
22907
22912
  export type InferenceDeviceCount = number;
22908
22913
  export interface InferenceDeviceInfo {
@@ -22918,9 +22923,20 @@ declare namespace EC2 {
22918
22923
  * The manufacturer of the Inference accelerator.
22919
22924
  */
22920
22925
  Manufacturer?: InferenceDeviceManufacturerName;
22926
+ /**
22927
+ * Describes the memory available to the inference accelerator.
22928
+ */
22929
+ MemoryInfo?: InferenceDeviceMemoryInfo;
22921
22930
  }
22922
22931
  export type InferenceDeviceInfoList = InferenceDeviceInfo[];
22923
22932
  export type InferenceDeviceManufacturerName = string;
22933
+ export interface InferenceDeviceMemoryInfo {
22934
+ /**
22935
+ * The size of the memory available to the inference accelerator, in MiB.
22936
+ */
22937
+ SizeInMiB?: InferenceDeviceMemorySize;
22938
+ }
22939
+ export type InferenceDeviceMemorySize = number;
22924
22940
  export type InferenceDeviceName = string;
22925
22941
  export type InsideCidrBlocksStringList = String[];
22926
22942
  export interface Instance {
@@ -29015,6 +29031,14 @@ declare namespace EC2 {
29015
29031
  * The maximum number of network interfaces for the network card.
29016
29032
  */
29017
29033
  MaximumNetworkInterfaces?: MaxNetworkInterfaces;
29034
+ /**
29035
+ * The baseline network performance of the network card, in Gbps.
29036
+ */
29037
+ BaselineBandwidthInGbps?: BaselineBandwidthInGbps;
29038
+ /**
29039
+ * The peak (burst) network performance of the network card, in Gbps.
29040
+ */
29041
+ PeakBandwidthInGbps?: PeakBandwidthInGbps;
29018
29042
  }
29019
29043
  export type NetworkCardInfoList = NetworkCardInfo[];
29020
29044
  export interface NetworkInfo {
@@ -29900,6 +29924,7 @@ declare namespace EC2 {
29900
29924
  */
29901
29925
  SubsystemVendorId?: String;
29902
29926
  }
29927
+ export type PeakBandwidthInGbps = number;
29903
29928
  export interface PeeringAttachmentStatus {
29904
29929
  /**
29905
29930
  * The status code.
@@ -38397,6 +38422,7 @@ declare namespace EC2 {
38397
38422
  export type snapshotTierStatusSet = SnapshotTierStatus[];
38398
38423
  export type totalFpgaMemory = number;
38399
38424
  export type totalGpuMemory = number;
38425
+ export type totalInferenceMemory = number;
38400
38426
  /**
38401
38427
  * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
38402
38428
  */
@@ -44,11 +44,11 @@ declare class EMRServerless extends Service {
44
44
  */
45
45
  getApplication(callback?: (err: AWSError, data: EMRServerless.Types.GetApplicationResponse) => void): Request<EMRServerless.Types.GetApplicationResponse, AWSError>;
46
46
  /**
47
- * Returns a URL to access the job run dashboard. The generated URL is valid for one hour, after which you must invoke the API again to generate a new URL.
47
+ * Creates and returns a URL that you can use to access the application UIs for a job run. For jobs in a running state, the application UI is a live user interface such as the Spark or Tez web UI. For completed jobs, the application UI is a persistent application user interface such as the Spark History Server or persistent Tez UI. The URL is valid for one hour after you generate it. To access the application UI after that hour elapses, you must invoke the API again to generate a new URL.
48
48
  */
49
49
  getDashboardForJobRun(params: EMRServerless.Types.GetDashboardForJobRunRequest, callback?: (err: AWSError, data: EMRServerless.Types.GetDashboardForJobRunResponse) => void): Request<EMRServerless.Types.GetDashboardForJobRunResponse, AWSError>;
50
50
  /**
51
- * Returns a URL to access the job run dashboard. The generated URL is valid for one hour, after which you must invoke the API again to generate a new URL.
51
+ * Creates and returns a URL that you can use to access the application UIs for a job run. For jobs in a running state, the application UI is a live user interface such as the Spark or Tez web UI. For completed jobs, the application UI is a persistent application user interface such as the Spark History Server or persistent Tez UI. The URL is valid for one hour after you generate it. To access the application UI after that hour elapses, you must invoke the API again to generate a new URL.
52
52
  */
53
53
  getDashboardForJobRun(callback?: (err: AWSError, data: EMRServerless.Types.GetDashboardForJobRunResponse) => void): Request<EMRServerless.Types.GetDashboardForJobRunResponse, AWSError>;
54
54
  /**
@@ -295,6 +295,28 @@ declare namespace EMRServerless {
295
295
  jobRunId: JobRunId;
296
296
  }
297
297
  export type ClientToken = string;
298
+ export interface CloudWatchLoggingConfiguration {
299
+ /**
300
+ * Enables CloudWatch logging.
301
+ */
302
+ enabled: Boolean;
303
+ /**
304
+ * The name of the log group in Amazon CloudWatch Logs where you want to publish your logs.
305
+ */
306
+ logGroupName?: LogGroupName;
307
+ /**
308
+ * Prefix for the CloudWatch log stream name.
309
+ */
310
+ logStreamNamePrefix?: LogStreamNamePrefix;
311
+ /**
312
+ * The Key Management Service (KMS) key ARN to encrypt the logs that you store in CloudWatch Logs.
313
+ */
314
+ encryptionKeyArn?: EncryptionKeyArn;
315
+ /**
316
+ * The types of logs that you want to publish to CloudWatch. If you don't specify any log types, driver STDOUT and STDERR logs will be published to CloudWatch Logs by default. For more information including the supported worker types for Hive and Spark, see Logging for EMR Serverless with CloudWatch. Key Valid Values: SPARK_DRIVER, SPARK_EXECUTOR, HIVE_DRIVER, TEZ_TASK Array Members Valid Values: STDOUT, STDERR, HIVE_LOG, TEZ_AM, SYSTEM_LOGS
317
+ */
318
+ logTypes?: LogTypeMap;
319
+ }
298
320
  export interface Configuration {
299
321
  /**
300
322
  * The classification within a configuration.
@@ -712,6 +734,11 @@ declare namespace EMRServerless {
712
734
  */
713
735
  tags?: TagMap;
714
736
  }
737
+ export type LogGroupName = string;
738
+ export type LogStreamNamePrefix = string;
739
+ export type LogTypeList = LogTypeString[];
740
+ export type LogTypeMap = {[key: string]: LogTypeList};
741
+ export type LogTypeString = string;
715
742
  export interface ManagedPersistenceMonitoringConfiguration {
716
743
  /**
717
744
  * Enables managed logging and defaults to true. If set to false, managed logging will be turned off.
@@ -746,6 +773,10 @@ declare namespace EMRServerless {
746
773
  * The managed log persistence configuration for a job run.
747
774
  */
748
775
  managedPersistenceMonitoringConfiguration?: ManagedPersistenceMonitoringConfiguration;
776
+ /**
777
+ * The Amazon CloudWatch configuration for monitoring logs. You can configure your jobs to send log information to CloudWatch.
778
+ */
779
+ cloudWatchLoggingConfiguration?: CloudWatchLoggingConfiguration;
749
780
  }
750
781
  export interface NetworkConfiguration {
751
782
  /**
@@ -2887,7 +2887,7 @@ declare namespace Lambda {
2887
2887
  export type ResourceArn = string;
2888
2888
  export type ResponseStreamingInvocationType = "RequestResponse"|"DryRun"|string;
2889
2889
  export type RoleArn = string;
2890
- export type Runtime = "nodejs"|"nodejs4.3"|"nodejs6.10"|"nodejs8.10"|"nodejs10.x"|"nodejs12.x"|"nodejs14.x"|"nodejs16.x"|"java8"|"java8.al2"|"java11"|"python2.7"|"python3.6"|"python3.7"|"python3.8"|"python3.9"|"dotnetcore1.0"|"dotnetcore2.0"|"dotnetcore2.1"|"dotnetcore3.1"|"dotnet6"|"nodejs4.3-edge"|"go1.x"|"ruby2.5"|"ruby2.7"|"provided"|"provided.al2"|"nodejs18.x"|"python3.10"|"java17"|"ruby3.2"|string;
2890
+ export type Runtime = "nodejs"|"nodejs4.3"|"nodejs6.10"|"nodejs8.10"|"nodejs10.x"|"nodejs12.x"|"nodejs14.x"|"nodejs16.x"|"java8"|"java8.al2"|"java11"|"python2.7"|"python3.6"|"python3.7"|"python3.8"|"python3.9"|"dotnetcore1.0"|"dotnetcore2.0"|"dotnetcore2.1"|"dotnetcore3.1"|"dotnet6"|"nodejs4.3-edge"|"go1.x"|"ruby2.5"|"ruby2.7"|"provided"|"provided.al2"|"nodejs18.x"|"python3.10"|"java17"|"ruby3.2"|"python3.11"|string;
2891
2891
  export type RuntimeVersionArn = string;
2892
2892
  export interface RuntimeVersionConfig {
2893
2893
  /**