aws-sdk 2.1429.0 → 2.1430.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1056,6 +1056,9 @@
1056
1056
  },
1057
1057
  "LogicalUsed": {
1058
1058
  "type": "long"
1059
+ },
1060
+ "ClusterCloudStorageUsed": {
1061
+ "type": "long"
1059
1062
  }
1060
1063
  }
1061
1064
  },
@@ -1219,6 +1222,9 @@
1219
1222
  "RecommendationStatus": {},
1220
1223
  "LunCount": {
1221
1224
  "type": "long"
1225
+ },
1226
+ "ClusterCloudStorageUsed": {
1227
+ "type": "long"
1222
1228
  }
1223
1229
  }
1224
1230
  }
@@ -17073,6 +17073,9 @@
17073
17073
  },
17074
17074
  "Tags": {
17075
17075
  "shape": "S7"
17076
+ },
17077
+ "DataCaptureConfig": {
17078
+ "shape": "Sq4"
17076
17079
  }
17077
17080
  }
17078
17081
  },
@@ -804,7 +804,7 @@ declare namespace ACMPCA {
804
804
  */
805
805
  ValidityNotBefore?: Validity;
806
806
  /**
807
- * Alphanumeric string that can be used to distinguish between calls to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after one minute. Therefore, if you call IssueCertificate multiple times with the same idempotency token within one minute, Amazon Web Services Private CA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, Amazon Web Services Private CA recognizes that you are requesting multiple certificates.
807
+ * Alphanumeric string that can be used to distinguish between calls to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after five minutes. Therefore, if you call IssueCertificate multiple times with the same idempotency token within five minutes, Amazon Web Services Private CA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, Amazon Web Services Private CA recognizes that you are requesting multiple certificates.
808
808
  */
809
809
  IdempotencyToken?: IdempotencyToken;
810
810
  }
@@ -1459,6 +1459,14 @@ declare class Connect extends Service {
1459
1459
  * Updates the name and description of a quick connect. The request accepts the following data in JSON format. At least Name or Description must be provided.
1460
1460
  */
1461
1461
  updateQuickConnectName(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
1462
+ /**
1463
+ * Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
1464
+ */
1465
+ updateRoutingProfileAgentAvailabilityTimer(params: Connect.Types.UpdateRoutingProfileAgentAvailabilityTimerRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
1466
+ /**
1467
+ * Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
1468
+ */
1469
+ updateRoutingProfileAgentAvailabilityTimer(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
1462
1470
  /**
1463
1471
  * Updates the channels that agents can handle in the Contact Control Panel (CCP) for a routing profile.
1464
1472
  */
@@ -1619,6 +1627,7 @@ declare namespace Connect {
1619
1627
  EvaluationFormVersion: VersionNumber;
1620
1628
  }
1621
1629
  export type AfterContactWorkTimeLimit = number;
1630
+ export type AgentAvailabilityTimer = "TIME_SINCE_LAST_ACTIVITY"|"TIME_SINCE_LAST_INBOUND"|string;
1622
1631
  export interface AgentContactReference {
1623
1632
  /**
1624
1633
  * The identifier of the contact in this instance of Amazon Connect.
@@ -2678,6 +2687,10 @@ declare namespace Connect {
2678
2687
  * The tags used to organize, track, or control access for this resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }.
2679
2688
  */
2680
2689
  Tags?: TagMap;
2690
+ /**
2691
+ * Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
2692
+ */
2693
+ AgentAvailabilityTimer?: AgentAvailabilityTimer;
2681
2694
  }
2682
2695
  export interface CreateRoutingProfileResponse {
2683
2696
  /**
@@ -6948,6 +6961,10 @@ declare namespace Connect {
6948
6961
  * The number of associated users in routing profile.
6949
6962
  */
6950
6963
  NumberOfAssociatedUsers?: Long;
6964
+ /**
6965
+ * Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
6966
+ */
6967
+ AgentAvailabilityTimer?: AgentAvailabilityTimer;
6951
6968
  }
6952
6969
  export type RoutingProfileDescription = string;
6953
6970
  export type RoutingProfileId = string;
@@ -8871,6 +8888,20 @@ declare namespace Connect {
8871
8888
  */
8872
8889
  Description?: UpdateQuickConnectDescription;
8873
8890
  }
8891
+ export interface UpdateRoutingProfileAgentAvailabilityTimerRequest {
8892
+ /**
8893
+ * The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
8894
+ */
8895
+ InstanceId: InstanceId;
8896
+ /**
8897
+ * The identifier of the routing profile.
8898
+ */
8899
+ RoutingProfileId: RoutingProfileId;
8900
+ /**
8901
+ * Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
8902
+ */
8903
+ AgentAvailabilityTimer: AgentAvailabilityTimer;
8904
+ }
8874
8905
  export interface UpdateRoutingProfileConcurrencyRequest {
8875
8906
  /**
8876
8907
  * The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
@@ -92,11 +92,11 @@ declare class DataSync extends Service {
92
92
  */
93
93
  createLocationHdfs(callback?: (err: AWSError, data: DataSync.Types.CreateLocationHdfsResponse) => void): Request<DataSync.Types.CreateLocationHdfsResponse, AWSError>;
94
94
  /**
95
- * Creates an endpoint for an Network File System (NFS) file server that DataSync can use for a data transfer.
95
+ * Creates an endpoint for a Network File System (NFS) file server that DataSync can use for a data transfer. For more information, see Configuring transfers to or from an NFS file server. If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone.
96
96
  */
97
97
  createLocationNfs(params: DataSync.Types.CreateLocationNfsRequest, callback?: (err: AWSError, data: DataSync.Types.CreateLocationNfsResponse) => void): Request<DataSync.Types.CreateLocationNfsResponse, AWSError>;
98
98
  /**
99
- * Creates an endpoint for an Network File System (NFS) file server that DataSync can use for a data transfer.
99
+ * Creates an endpoint for a Network File System (NFS) file server that DataSync can use for a data transfer. For more information, see Configuring transfers to or from an NFS file server. If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone.
100
100
  */
101
101
  createLocationNfs(callback?: (err: AWSError, data: DataSync.Types.CreateLocationNfsResponse) => void): Request<DataSync.Types.CreateLocationNfsResponse, AWSError>;
102
102
  /**
@@ -228,11 +228,11 @@ declare class DataSync extends Service {
228
228
  */
229
229
  describeLocationHdfs(callback?: (err: AWSError, data: DataSync.Types.DescribeLocationHdfsResponse) => void): Request<DataSync.Types.DescribeLocationHdfsResponse, AWSError>;
230
230
  /**
231
- * Returns metadata, such as the path information, about an NFS location.
231
+ * Provides details about how an DataSync transfer location for a Network File System (NFS) file server is configured.
232
232
  */
233
233
  describeLocationNfs(params: DataSync.Types.DescribeLocationNfsRequest, callback?: (err: AWSError, data: DataSync.Types.DescribeLocationNfsResponse) => void): Request<DataSync.Types.DescribeLocationNfsResponse, AWSError>;
234
234
  /**
235
- * Returns metadata, such as the path information, about an NFS location.
235
+ * Provides details about how an DataSync transfer location for a Network File System (NFS) file server is configured.
236
236
  */
237
237
  describeLocationNfs(callback?: (err: AWSError, data: DataSync.Types.DescribeLocationNfsResponse) => void): Request<DataSync.Types.DescribeLocationNfsResponse, AWSError>;
238
238
  /**
@@ -444,11 +444,11 @@ declare class DataSync extends Service {
444
444
  */
445
445
  updateLocationHdfs(callback?: (err: AWSError, data: DataSync.Types.UpdateLocationHdfsResponse) => void): Request<DataSync.Types.UpdateLocationHdfsResponse, AWSError>;
446
446
  /**
447
- * Updates some of the parameters of a previously created location for Network File System (NFS) access. For information about creating an NFS location, see Creating a location for NFS.
447
+ * Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers to or from an NFS file server.
448
448
  */
449
449
  updateLocationNfs(params: DataSync.Types.UpdateLocationNfsRequest, callback?: (err: AWSError, data: DataSync.Types.UpdateLocationNfsResponse) => void): Request<DataSync.Types.UpdateLocationNfsResponse, AWSError>;
450
450
  /**
451
- * Updates some of the parameters of a previously created location for Network File System (NFS) access. For information about creating an NFS location, see Creating a location for NFS.
451
+ * Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers to or from an NFS file server.
452
452
  */
453
453
  updateLocationNfs(callback?: (err: AWSError, data: DataSync.Types.UpdateLocationNfsResponse) => void): Request<DataSync.Types.UpdateLocationNfsResponse, AWSError>;
454
454
  /**
@@ -587,6 +587,10 @@ declare namespace DataSync {
587
587
  * The amount of space that's being used in a storage system resource without accounting for compression or deduplication.
588
588
  */
589
589
  LogicalUsed?: NonNegativeLong;
590
+ /**
591
+ * The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).
592
+ */
593
+ ClusterCloudStorageUsed?: NonNegativeLong;
590
594
  }
591
595
  export type CollectionDurationMinutes = number;
592
596
  export interface CreateAgentRequest {
@@ -872,19 +876,19 @@ declare namespace DataSync {
872
876
  }
873
877
  export interface CreateLocationNfsRequest {
874
878
  /**
875
- * Specifies the subdirectory in the NFS file server that DataSync transfers to or from. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network. To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication. To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
879
+ * Specifies the export path in your NFS file server that you want DataSync to mount. This path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see Accessing NFS file servers.
876
880
  */
877
881
  Subdirectory: NfsSubdirectory;
878
882
  /**
879
- * Specifies the IP address or domain name of your NFS file server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information. You must specify be an IP version 4 address or Domain Name System (DNS)-compliant name.
883
+ * Specifies the Domain Name System (DNS) name or IP version 4 address of the NFS file server that your DataSync agent connects to.
880
884
  */
881
885
  ServerHostname: ServerHostname;
882
886
  /**
883
- * Specifies the Amazon Resource Names (ARNs) of agents that DataSync uses to connect to your NFS file server. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
887
+ * Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server. You can specify more than one agent. For more information, see Using multiple agents for transfers.
884
888
  */
885
889
  OnPremConfig: OnPremConfig;
886
890
  /**
887
- * Specifies the mount options that DataSync can use to mount your NFS share.
891
+ * Specifies the options that DataSync can use to mount your NFS file server.
888
892
  */
889
893
  MountOptions?: NfsMountOptions;
890
894
  /**
@@ -1405,26 +1409,26 @@ declare namespace DataSync {
1405
1409
  }
1406
1410
  export interface DescribeLocationNfsRequest {
1407
1411
  /**
1408
- * The Amazon Resource Name (ARN) of the NFS location to describe.
1412
+ * Specifies the Amazon Resource Name (ARN) of the NFS location that you want information about.
1409
1413
  */
1410
1414
  LocationArn: LocationArn;
1411
1415
  }
1412
1416
  export interface DescribeLocationNfsResponse {
1413
1417
  /**
1414
- * The Amazon Resource Name (ARN) of the NFS location that was described.
1418
+ * The ARN of the NFS location.
1415
1419
  */
1416
1420
  LocationArn?: LocationArn;
1417
1421
  /**
1418
- * The URL of the source NFS location that was described.
1422
+ * The URL of the NFS location.
1419
1423
  */
1420
1424
  LocationUri?: LocationUri;
1421
1425
  OnPremConfig?: OnPremConfig;
1422
1426
  /**
1423
- * The mount options that DataSync uses to mount your NFS share.
1427
+ * The mount options that DataSync uses to mount your NFS file server.
1424
1428
  */
1425
1429
  MountOptions?: NfsMountOptions;
1426
1430
  /**
1427
- * The time that the NFS location was created.
1431
+ * The time when the NFS location was created.
1428
1432
  */
1429
1433
  CreationTime?: Time;
1430
1434
  }
@@ -2251,6 +2255,10 @@ declare namespace DataSync {
2251
2255
  * The number of LUNs (logical unit numbers) in the cluster.
2252
2256
  */
2253
2257
  LunCount?: NonNegativeLong;
2258
+ /**
2259
+ * The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).
2260
+ */
2261
+ ClusterCloudStorageUsed?: NonNegativeLong;
2254
2262
  }
2255
2263
  export type NetAppONTAPClusters = NetAppONTAPCluster[];
2256
2264
  export interface NetAppONTAPSVM {
@@ -2396,7 +2404,7 @@ declare namespace DataSync {
2396
2404
  export type ObjectTags = "PRESERVE"|"NONE"|string;
2397
2405
  export interface OnPremConfig {
2398
2406
  /**
2399
- * ARNs of the agents to use for an NFS location.
2407
+ * The Amazon Resource Names (ARNs) of the agents connecting to a transfer location.
2400
2408
  */
2401
2409
  AgentArns: AgentArnList;
2402
2410
  }
@@ -2943,11 +2951,11 @@ declare namespace DataSync {
2943
2951
  }
2944
2952
  export interface UpdateLocationNfsRequest {
2945
2953
  /**
2946
- * Specifies the Amazon Resource Name (ARN) of the NFS location that you want to update.
2954
+ * Specifies the Amazon Resource Name (ARN) of the NFS transfer location that you want to update.
2947
2955
  */
2948
2956
  LocationArn: LocationArn;
2949
2957
  /**
2950
- * Specifies the subdirectory in your NFS file system that DataSync uses to read from or write to during a transfer. The NFS path should be exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network. To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication. To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
2958
+ * Specifies the export path in your NFS file server that you want DataSync to mount. This path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see Accessing NFS file servers.
2951
2959
  */
2952
2960
  Subdirectory?: NfsSubdirectory;
2953
2961
  OnPremConfig?: OnPremConfig;
package/clients/ecs.d.ts CHANGED
@@ -165,11 +165,11 @@ declare class ECS extends Service {
165
165
  */
166
166
  describeTaskSets(callback?: (err: AWSError, data: ECS.Types.DescribeTaskSetsResponse) => void): Request<ECS.Types.DescribeTaskSetsResponse, AWSError>;
167
167
  /**
168
- * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour.
168
+ * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are not included in the response.
169
169
  */
170
170
  describeTasks(params: ECS.Types.DescribeTasksRequest, callback?: (err: AWSError, data: ECS.Types.DescribeTasksResponse) => void): Request<ECS.Types.DescribeTasksResponse, AWSError>;
171
171
  /**
172
- * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour.
172
+ * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are not included in the response.
173
173
  */
174
174
  describeTasks(callback?: (err: AWSError, data: ECS.Types.DescribeTasksResponse) => void): Request<ECS.Types.DescribeTasksResponse, AWSError>;
175
175
  /**
@@ -269,11 +269,11 @@ declare class ECS extends Service {
269
269
  */
270
270
  listTaskDefinitions(callback?: (err: AWSError, data: ECS.Types.ListTaskDefinitionsResponse) => void): Request<ECS.Types.ListTaskDefinitionsResponse, AWSError>;
271
271
  /**
272
- * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results. Currently, stopped tasks appear in the returned results for at least one hour.
272
+ * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results.
273
273
  */
274
274
  listTasks(params: ECS.Types.ListTasksRequest, callback?: (err: AWSError, data: ECS.Types.ListTasksResponse) => void): Request<ECS.Types.ListTasksResponse, AWSError>;
275
275
  /**
276
- * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results. Currently, stopped tasks appear in the returned results for at least one hour.
276
+ * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results.
277
277
  */
278
278
  listTasks(callback?: (err: AWSError, data: ECS.Types.ListTasksResponse) => void): Request<ECS.Types.ListTasksResponse, AWSError>;
279
279
  /**
@@ -429,11 +429,11 @@ declare class ECS extends Service {
429
429
  */
430
430
  updateContainerInstancesState(callback?: (err: AWSError, data: ECS.Types.UpdateContainerInstancesStateResponse) => void): Request<ECS.Types.UpdateContainerInstancesStateResponse, AWSError>;
431
431
  /**
432
- * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties. If you specified a custom role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide. loadBalancers, serviceRegistries
432
+ * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role .
433
433
  */
434
434
  updateService(params: ECS.Types.UpdateServiceRequest, callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
435
435
  /**
436
- * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties. If you specified a custom role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide. loadBalancers, serviceRegistries
436
+ * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role .
437
437
  */
438
438
  updateService(callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
439
439
  /**
@@ -549,11 +549,11 @@ declare namespace ECS {
549
549
  export type Attributes = Attribute[];
550
550
  export interface AutoScalingGroupProvider {
551
551
  /**
552
- * The Amazon Resource Name (ARN) that identifies the Auto Scaling group.
552
+ * The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.
553
553
  */
554
554
  autoScalingGroupArn: String;
555
555
  /**
556
- * The managed scaling settings for the Auto Scaling group capacity provider.
556
+ * he managed scaling settings for the Auto Scaling group capacity provider.
557
557
  */
558
558
  managedScaling?: ManagedScaling;
559
559
  /**
@@ -888,11 +888,11 @@ declare namespace ECS {
888
888
  */
889
889
  dependsOn?: ContainerDependencies;
890
890
  /**
891
- * Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state. When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
891
+ * Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state. When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values are 2-120 seconds.
892
892
  */
893
893
  startTimeout?: BoxedInteger;
894
894
  /**
895
- * Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used. For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
895
+ * Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used. For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values are 2-120 seconds.
896
896
  */
897
897
  stopTimeout?: BoxedInteger;
898
898
  /**
@@ -972,7 +972,7 @@ declare namespace ECS {
972
972
  */
973
973
  firelensConfiguration?: FirelensConfiguration;
974
974
  /**
975
- * A list of ARNs in SSM or Amazon S3 to a credential spec (credspeccode&gt;) file that configures a container for Active Directory authentication. This parameter is only used with domainless authentication. The format for each ARN is credentialspecdomainless:MyARN. Replace MyARN with the ARN in SSM or Amazon S3. The credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.
975
+ * A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the dockerSecurityOptions. The maximum number of ARNs is 1. There are two formats for each ARN. credentialspecdomainless:MyARN You use credentialspecdomainless:MyARN to provide a CredSpec with an additional section for a secret in Secrets Manager. You provide the login credentials to the domain in the secret. Each task that runs on any container instance can join different domains. You can use this format without joining the container instance to a domain. credentialspec:MyARN You use credentialspec:MyARN to provide a CredSpec for a single domain. You must join the container instance to the domain before you start any tasks that use this task definition. In both formats, replace MyARN with the ARN in SSM or Amazon S3. If you provide a credentialspecdomainless:MyARN, the credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.
976
976
  */
977
977
  credentialSpecs?: StringList;
978
978
  }
@@ -1313,7 +1313,7 @@ declare namespace ECS {
1313
1313
  */
1314
1314
  externalId?: String;
1315
1315
  /**
1316
- * The task definition for the tasks in the task set to use.
1316
+ * The task definition for the tasks in the task set to use. If a revision isn't specified, the latest ACTIVE revision is used.
1317
1317
  */
1318
1318
  taskDefinition: String;
1319
1319
  /**
@@ -2518,11 +2518,11 @@ declare namespace ECS {
2518
2518
  }
2519
2519
  export interface LoadBalancer {
2520
2520
  /**
2521
- * The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a Classic Load Balancer, omit the target group ARN. For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide. If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.
2521
+ * The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide. If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.
2522
2522
  */
2523
2523
  targetGroupArn?: String;
2524
2524
  /**
2525
- * The name of the load balancer to associate with the Amazon ECS service or task set. A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.
2525
+ * The name of the load balancer to associate with the Amazon ECS service or task set. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.
2526
2526
  */
2527
2527
  loadBalancerName?: String;
2528
2528
  /**
@@ -2605,7 +2605,7 @@ declare namespace ECS {
2605
2605
  */
2606
2606
  minimumScalingStepSize?: ManagedScalingStepSize;
2607
2607
  /**
2608
- * The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 1 is used.
2608
+ * The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 10000 is used.
2609
2609
  */
2610
2610
  maximumScalingStepSize?: ManagedScalingStepSize;
2611
2611
  /**
@@ -2726,7 +2726,7 @@ declare namespace ECS {
2726
2726
  */
2727
2727
  containerPort?: BoxedInteger;
2728
2728
  /**
2729
- * The port number on the container instance to reserve for your container. If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows: For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy. For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy. If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort. If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version. The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range. The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.
2729
+ * The port number on the container instance to reserve for your container. If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows: For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy. For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy. If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort. If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version. The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range. The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.
2730
2730
  */
2731
2731
  hostPort?: BoxedInteger;
2732
2732
  /**
@@ -3724,7 +3724,7 @@ declare namespace ECS {
3724
3724
  */
3725
3725
  startedBy?: String;
3726
3726
  /**
3727
- * The stop code indicating why a task was stopped. The stoppedReason might contain additional details. The following are valid values: TaskFailedToStart EssentialContainerExited UserInitiated TerminationNotice ServiceSchedulerInitiated SpotInterruption
3727
+ * The stop code indicating why a task was stopped. The stoppedReason might contain additional details. For more information about stop code, see Stopped tasks error codes in the Amazon ECS User Guide. The following are valid values: TaskFailedToStart EssentialContainerExited UserInitiated TerminationNotice ServiceSchedulerInitiated SpotInterruption
3728
3728
  */
3729
3729
  stopCode?: TaskStopCode;
3730
3730
  /**
@@ -3736,7 +3736,7 @@ declare namespace ECS {
3736
3736
  */
3737
3737
  stoppedReason?: String;
3738
3738
  /**
3739
- * The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPED.
3739
+ * The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPING.
3740
3740
  */
3741
3741
  stoppingAt?: Timestamp;
3742
3742
  /**
@@ -3814,7 +3814,7 @@ declare namespace ECS {
3814
3814
  */
3815
3815
  runtimePlatform?: RuntimePlatform;
3816
3816
  /**
3817
- * The task launch types the task definition was validated against. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
3817
+ * The task launch types the task definition was validated against. The valid values are EC2, FARGATE, and EXTERNAL. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
3818
3818
  */
3819
3819
  requiresCompatibilities?: CompatibilityList;
3820
3820
  /**
@@ -4319,7 +4319,7 @@ declare namespace ECS {
4319
4319
  }
4320
4320
  export interface Volume {
4321
4321
  /**
4322
- * The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
4322
+ * The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints. This is required wwhen you use an Amazon EFS volume.
4323
4323
  */
4324
4324
  name?: String;
4325
4325
  /**
@@ -21202,6 +21202,7 @@ declare namespace SageMaker {
21202
21202
  * A list of tags associated with the transform job.
21203
21203
  */
21204
21204
  Tags?: TagList;
21205
+ DataCaptureConfig?: BatchDataCaptureConfig;
21205
21206
  }
21206
21207
  export type TransformJobArn = string;
21207
21208
  export interface TransformJobDefinition {
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1429.0',
86
+ VERSION: '2.1430.0',
87
87
 
88
88
  /**
89
89
  * @api private