cdk-comprehend-s3olap 2.0.14 → 2.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +3 -3
- package/lib/cdk-comprehend-s3olap.js +2 -2
- package/lib/comprehend-lambdas.js +2 -2
- package/lib/iam-roles.js +4 -4
- package/node_modules/aws-sdk/CHANGELOG.md +15 -1
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +11 -4
- package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.min.json +54 -5
- package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.paginators.json +5 -0
- package/node_modules/aws-sdk/apis/mgn-2020-02-26.min.json +233 -44
- package/node_modules/aws-sdk/apis/mgn-2020-02-26.paginators.json +6 -0
- package/node_modules/aws-sdk/apis/migration-hub-refactor-spaces-2021-10-26.min.json +75 -25
- package/node_modules/aws-sdk/apis/pricing-2017-10-15.min.json +6 -0
- package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +231 -182
- package/node_modules/aws-sdk/apis/wellarchitected-2020-03-31.min.json +39 -22
- package/node_modules/aws-sdk/clients/apigateway.d.ts +1 -1
- package/node_modules/aws-sdk/clients/ec2.d.ts +25 -5
- package/node_modules/aws-sdk/clients/ecs.d.ts +40 -40
- package/node_modules/aws-sdk/clients/lookoutequipment.d.ts +69 -1
- package/node_modules/aws-sdk/clients/mediaconvert.d.ts +1 -1
- package/node_modules/aws-sdk/clients/mgn.d.ts +218 -3
- package/node_modules/aws-sdk/clients/migrationhubrefactorspaces.d.ts +70 -8
- package/node_modules/aws-sdk/clients/pricing.d.ts +4 -4
- package/node_modules/aws-sdk/clients/sagemaker.d.ts +69 -8
- package/node_modules/aws-sdk/clients/transfer.d.ts +9 -9
- package/node_modules/aws-sdk/clients/wellarchitected.d.ts +28 -7
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +10 -10
- package/node_modules/aws-sdk/dist/aws-sdk.js +20 -7
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +60 -60
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +4 -4
@@ -21,27 +21,27 @@ declare class ECS extends Service {
|
|
21
21
|
*/
|
22
22
|
createCapacityProvider(callback?: (err: AWSError, data: ECS.Types.CreateCapacityProviderResponse) => void): Request<ECS.Types.CreateCapacityProviderResponse, AWSError>;
|
23
23
|
/**
|
24
|
-
* Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action. When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can manage required resources in other Amazon Web Services services on your behalf. However, if the IAM user that makes the call doesn't have permissions to create the service-linked role, it isn't created. For more information, see Using
|
24
|
+
* Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action. When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can manage required resources in other Amazon Web Services services on your behalf. However, if the IAM user that makes the call doesn't have permissions to create the service-linked role, it isn't created. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
|
25
25
|
*/
|
26
26
|
createCluster(params: ECS.Types.CreateClusterRequest, callback?: (err: AWSError, data: ECS.Types.CreateClusterResponse) => void): Request<ECS.Types.CreateClusterResponse, AWSError>;
|
27
27
|
/**
|
28
|
-
* Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action. When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can manage required resources in other Amazon Web Services services on your behalf. However, if the IAM user that makes the call doesn't have permissions to create the service-linked role, it isn't created. For more information, see Using
|
28
|
+
* Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action. When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can manage required resources in other Amazon Web Services services on your behalf. However, if the IAM user that makes the call doesn't have permissions to create the service-linked role, it isn't created. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
|
29
29
|
*/
|
30
30
|
createCluster(callback?: (err: AWSError, data: ECS.Types.CreateClusterResponse) => void): Request<ECS.Types.CreateClusterResponse, AWSError>;
|
31
31
|
/**
|
32
|
-
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service
|
32
|
+
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic: Determine which of the container instances in your cluster can support the task definition of your service. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner. This is the case even if you can choose a different placement strategy with the placementStrategy parameter. Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone based on the previous steps, favoring container instances with the fewest number of running tasks for this service.
|
33
33
|
*/
|
34
34
|
createService(params: ECS.Types.CreateServiceRequest, callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
|
35
35
|
/**
|
36
|
-
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service
|
36
|
+
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic: Determine which of the container instances in your cluster can support the task definition of your service. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner. This is the case even if you can choose a different placement strategy with the placementStrategy parameter. Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone based on the previous steps, favoring container instances with the fewest number of running tasks for this service.
|
37
37
|
*/
|
38
38
|
createService(callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
|
39
39
|
/**
|
40
|
-
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS
|
40
|
+
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
|
41
41
|
*/
|
42
42
|
createTaskSet(params: ECS.Types.CreateTaskSetRequest, callback?: (err: AWSError, data: ECS.Types.CreateTaskSetResponse) => void): Request<ECS.Types.CreateTaskSetResponse, AWSError>;
|
43
43
|
/**
|
44
|
-
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS
|
44
|
+
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
|
45
45
|
*/
|
46
46
|
createTaskSet(callback?: (err: AWSError, data: ECS.Types.CreateTaskSetResponse) => void): Request<ECS.Types.CreateTaskSetResponse, AWSError>;
|
47
47
|
/**
|
@@ -85,11 +85,11 @@ declare class ECS extends Service {
|
|
85
85
|
*/
|
86
86
|
deleteService(callback?: (err: AWSError, data: ECS.Types.DeleteServiceResponse) => void): Request<ECS.Types.DeleteServiceResponse, AWSError>;
|
87
87
|
/**
|
88
|
-
* Deletes a specified task set within a service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS
|
88
|
+
* Deletes a specified task set within a service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
|
89
89
|
*/
|
90
90
|
deleteTaskSet(params: ECS.Types.DeleteTaskSetRequest, callback?: (err: AWSError, data: ECS.Types.DeleteTaskSetResponse) => void): Request<ECS.Types.DeleteTaskSetResponse, AWSError>;
|
91
91
|
/**
|
92
|
-
* Deletes a specified task set within a service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS
|
92
|
+
* Deletes a specified task set within a service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
|
93
93
|
*/
|
94
94
|
deleteTaskSet(callback?: (err: AWSError, data: ECS.Types.DeleteTaskSetResponse) => void): Request<ECS.Types.DeleteTaskSetResponse, AWSError>;
|
95
95
|
/**
|
@@ -157,11 +157,11 @@ declare class ECS extends Service {
|
|
157
157
|
*/
|
158
158
|
describeTaskSets(callback?: (err: AWSError, data: ECS.Types.DescribeTaskSetsResponse) => void): Request<ECS.Types.DescribeTaskSetsResponse, AWSError>;
|
159
159
|
/**
|
160
|
-
* Describes a specified task or tasks.
|
160
|
+
* Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour.
|
161
161
|
*/
|
162
162
|
describeTasks(params: ECS.Types.DescribeTasksRequest, callback?: (err: AWSError, data: ECS.Types.DescribeTasksResponse) => void): Request<ECS.Types.DescribeTasksResponse, AWSError>;
|
163
163
|
/**
|
164
|
-
* Describes a specified task or tasks.
|
164
|
+
* Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour.
|
165
165
|
*/
|
166
166
|
describeTasks(callback?: (err: AWSError, data: ECS.Types.DescribeTasksResponse) => void): Request<ECS.Types.DescribeTasksResponse, AWSError>;
|
167
167
|
/**
|
@@ -173,11 +173,11 @@ declare class ECS extends Service {
|
|
173
173
|
*/
|
174
174
|
discoverPollEndpoint(callback?: (err: AWSError, data: ECS.Types.DiscoverPollEndpointResponse) => void): Request<ECS.Types.DiscoverPollEndpointResponse, AWSError>;
|
175
175
|
/**
|
176
|
-
* Runs a command remotely on a container within a task.
|
176
|
+
* Runs a command remotely on a container within a task. If you use a condition key in your IAM policy to refine the conditions for the policy statement, for example limit the actions to a specific cluster, you recevie an AccessDeniedException when there is a mismatch between the condition key value and the corresponding parameter value.
|
177
177
|
*/
|
178
178
|
executeCommand(params: ECS.Types.ExecuteCommandRequest, callback?: (err: AWSError, data: ECS.Types.ExecuteCommandResponse) => void): Request<ECS.Types.ExecuteCommandResponse, AWSError>;
|
179
179
|
/**
|
180
|
-
* Runs a command remotely on a container within a task.
|
180
|
+
* Runs a command remotely on a container within a task. If you use a condition key in your IAM policy to refine the conditions for the policy statement, for example limit the actions to a specific cluster, you recevie an AccessDeniedException when there is a mismatch between the condition key value and the corresponding parameter value.
|
181
181
|
*/
|
182
182
|
executeCommand(callback?: (err: AWSError, data: ECS.Types.ExecuteCommandResponse) => void): Request<ECS.Types.ExecuteCommandResponse, AWSError>;
|
183
183
|
/**
|
@@ -405,11 +405,11 @@ declare class ECS extends Service {
|
|
405
405
|
*/
|
406
406
|
updateContainerInstancesState(callback?: (err: AWSError, data: ECS.Types.UpdateContainerInstancesStateResponse) => void): Request<ECS.Types.UpdateContainerInstancesStateResponse, AWSError>;
|
407
407
|
/**
|
408
|
-
*
|
408
|
+
* Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties. If you specified a custom IAM role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide. loadBalancers, serviceRegistries
|
409
409
|
*/
|
410
410
|
updateService(params: ECS.Types.UpdateServiceRequest, callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
|
411
411
|
/**
|
412
|
-
*
|
412
|
+
* Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties. If you specified a custom IAM role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide. loadBalancers, serviceRegistries
|
413
413
|
*/
|
414
414
|
updateService(callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
|
415
415
|
/**
|
@@ -610,7 +610,7 @@ declare namespace ECS {
|
|
610
610
|
export type CapacityProviders = CapacityProvider[];
|
611
611
|
export interface Cluster {
|
612
612
|
/**
|
613
|
-
* The Amazon Resource Name (ARN) that identifies the cluster.
|
613
|
+
* The Amazon Resource Name (ARN) that identifies the cluster. For more information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
|
614
614
|
*/
|
615
615
|
clusterArn?: String;
|
616
616
|
/**
|
@@ -662,11 +662,11 @@ declare namespace ECS {
|
|
662
662
|
*/
|
663
663
|
defaultCapacityProviderStrategy?: CapacityProviderStrategy;
|
664
664
|
/**
|
665
|
-
* The resources attached to a cluster. When using a capacity provider with a cluster, the
|
665
|
+
* The resources attached to a cluster. When using a capacity provider with a cluster, the capacity provider and associated resources are returned as cluster attachments.
|
666
666
|
*/
|
667
667
|
attachments?: Attachments;
|
668
668
|
/**
|
669
|
-
* The status of the capacity providers associated with the cluster. The following are the states that are returned. UPDATE_IN_PROGRESS The available capacity providers for the cluster are updating.
|
669
|
+
* The status of the capacity providers associated with the cluster. The following are the states that are returned. UPDATE_IN_PROGRESS The available capacity providers for the cluster are updating. UPDATE_COMPLETE The capacity providers have successfully updated. UPDATE_FAILED The capacity provider updates failed.
|
670
670
|
*/
|
671
671
|
attachmentsStatus?: String;
|
672
672
|
}
|
@@ -783,7 +783,7 @@ declare namespace ECS {
|
|
783
783
|
*/
|
784
784
|
cpu?: Integer;
|
785
785
|
/**
|
786
|
-
* The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used. The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container,
|
786
|
+
* The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used. The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
|
787
787
|
*/
|
788
788
|
memory?: BoxedInteger;
|
789
789
|
/**
|
@@ -937,7 +937,7 @@ declare namespace ECS {
|
|
937
937
|
}
|
938
938
|
export interface ContainerInstance {
|
939
939
|
/**
|
940
|
-
* The Amazon Resource Name (ARN) of the container instance.
|
940
|
+
* The Amazon Resource Name (ARN) of the container instance. For more information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
|
941
941
|
*/
|
942
942
|
containerInstanceArn?: String;
|
943
943
|
/**
|
@@ -965,7 +965,7 @@ declare namespace ECS {
|
|
965
965
|
*/
|
966
966
|
registeredResources?: Resources;
|
967
967
|
/**
|
968
|
-
* The status of the container instance. The valid values are REGISTERING, REGISTRATION_FAILED, ACTIVE, INACTIVE, DEREGISTERING, or DRAINING. If your account has opted in to the awsvpcTrunking account setting, then any newly registered container instance will transition to a REGISTERING status while the trunk elastic network interface is provisioned for the instance. If the registration fails, the instance will transition to a REGISTRATION_FAILED status. You can describe the container instance and see the reason for failure in the statusReason parameter. Once the container instance is terminated, the instance transitions to a DEREGISTERING status while the trunk elastic network interface is deprovisioned. The instance then transitions to an INACTIVE status. The ACTIVE status indicates that the container instance can accept tasks. The DRAINING indicates that new tasks aren't placed on the container instance and any service tasks running on the container instance are removed if possible. For more information, see Container
|
968
|
+
* The status of the container instance. The valid values are REGISTERING, REGISTRATION_FAILED, ACTIVE, INACTIVE, DEREGISTERING, or DRAINING. If your account has opted in to the awsvpcTrunking account setting, then any newly registered container instance will transition to a REGISTERING status while the trunk elastic network interface is provisioned for the instance. If the registration fails, the instance will transition to a REGISTRATION_FAILED status. You can describe the container instance and see the reason for failure in the statusReason parameter. Once the container instance is terminated, the instance transitions to a DEREGISTERING status while the trunk elastic network interface is deprovisioned. The instance then transitions to an INACTIVE status. The ACTIVE status indicates that the container instance can accept tasks. The DRAINING indicates that new tasks aren't placed on the container instance and any service tasks running on the container instance are removed if possible. For more information, see Container instance draining in the Amazon Elastic Container Service Developer Guide.
|
969
969
|
*/
|
970
970
|
status?: String;
|
971
971
|
/**
|
@@ -973,7 +973,7 @@ declare namespace ECS {
|
|
973
973
|
*/
|
974
974
|
statusReason?: String;
|
975
975
|
/**
|
976
|
-
* This parameter returns true if the agent is connected to Amazon ECS.
|
976
|
+
* This parameter returns true if the agent is connected to Amazon ECS. An instance with an agent that may be unhealthy or stopped return false. Only instances connected to an agent can accept task placement requests.
|
977
977
|
*/
|
978
978
|
agentConnected?: Boolean;
|
979
979
|
/**
|
@@ -997,7 +997,7 @@ declare namespace ECS {
|
|
997
997
|
*/
|
998
998
|
registeredAt?: Timestamp;
|
999
999
|
/**
|
1000
|
-
* The resources attached to a container instance, such as elastic network
|
1000
|
+
* The resources attached to a container instance, such as an elastic network interface.
|
1001
1001
|
*/
|
1002
1002
|
attachments?: Attachments;
|
1003
1003
|
/**
|
@@ -1156,7 +1156,7 @@ declare namespace ECS {
|
|
1156
1156
|
*/
|
1157
1157
|
taskDefinition?: String;
|
1158
1158
|
/**
|
1159
|
-
* A load balancer object representing the load balancers to use with your service. For more information, see Service
|
1159
|
+
* A load balancer object representing the load balancers to use with your service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. If the service uses the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The service-linked role is required for services that use multiple target groups. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. If the service uses the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, CodeDeploy determines which task set in your service has the status PRIMARY, and it associates one target group with it. Then, it also associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that you can use to perform validation tests with Lambda functions before routing production traffic to it. If you use the CODE_DEPLOY deployment controller, these values can be changed when updating the service. For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name, and the container port to access from the load balancer. The container name must be as it appears in a container definition. The load balancer name parameter must be omitted. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group that's specified here. For Classic Load Balancers, this object must contain the load balancer name, the container name , and the container port to access from the load balancer. The container name must be as it appears in a container definition. The target group ARN parameter must be omitted. When a task from this service is placed on a container instance, the container instance is registered with the load balancer that's specified here. Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers aren't supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance. This is because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.
|
1160
1160
|
*/
|
1161
1161
|
loadBalancers?: LoadBalancers;
|
1162
1162
|
/**
|
@@ -1220,7 +1220,7 @@ declare namespace ECS {
|
|
1220
1220
|
*/
|
1221
1221
|
tags?: Tags;
|
1222
1222
|
/**
|
1223
|
-
* Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging
|
1223
|
+
* Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.
|
1224
1224
|
*/
|
1225
1225
|
enableECSManagedTags?: Boolean;
|
1226
1226
|
/**
|
@@ -1264,11 +1264,11 @@ declare namespace ECS {
|
|
1264
1264
|
*/
|
1265
1265
|
loadBalancers?: LoadBalancers;
|
1266
1266
|
/**
|
1267
|
-
* The details of the service discovery registries to assign to this task set. For more information, see Service
|
1267
|
+
* The details of the service discovery registries to assign to this task set. For more information, see Service discovery.
|
1268
1268
|
*/
|
1269
1269
|
serviceRegistries?: ServiceRegistries;
|
1270
1270
|
/**
|
1271
|
-
* The launch type that new tasks in the task set uses. For more information, see Amazon ECS
|
1271
|
+
* The launch type that new tasks in the task set uses. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.
|
1272
1272
|
*/
|
1273
1273
|
launchType?: LaunchType;
|
1274
1274
|
/**
|
@@ -1480,11 +1480,11 @@ declare namespace ECS {
|
|
1480
1480
|
*/
|
1481
1481
|
deploymentCircuitBreaker?: DeploymentCircuitBreaker;
|
1482
1482
|
/**
|
1483
|
-
* If a service is using the rolling update (ECS) deployment type, the
|
1483
|
+
* If a service is using the rolling update (ECS) deployment type, the maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the REPLICA service scheduler and has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default maximumPercent value for a service using the REPLICA service scheduler is 200%. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and tasks that use the EC2 launch type, the maximum percent value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.
|
1484
1484
|
*/
|
1485
1485
|
maximumPercent?: BoxedInteger;
|
1486
1486
|
/**
|
1487
|
-
* If a service is using the rolling update (ECS) deployment type, the
|
1487
|
+
* If a service is using the rolling update (ECS) deployment type, the minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks. For services that do not use a load balancer, the following should be noted: A service is considered healthy if all essential containers within the tasks in the service pass their health checks. If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a RUNNING state before the task is counted towards the minimum healthy percent total. If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings. For services are that do use a load balancer, the following should be noted: If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total. If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.
|
1488
1488
|
*/
|
1489
1489
|
minimumHealthyPercent?: BoxedInteger;
|
1490
1490
|
}
|
@@ -1503,7 +1503,7 @@ declare namespace ECS {
|
|
1503
1503
|
*/
|
1504
1504
|
cluster?: String;
|
1505
1505
|
/**
|
1506
|
-
* The container instance ID or full ARN of the container instance to deregister.
|
1506
|
+
* The container instance ID or full ARN of the container instance to deregister. For more information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
|
1507
1507
|
*/
|
1508
1508
|
containerInstance: String;
|
1509
1509
|
/**
|
@@ -1721,7 +1721,7 @@ declare namespace ECS {
|
|
1721
1721
|
export type DevicesList = Device[];
|
1722
1722
|
export interface DiscoverPollEndpointRequest {
|
1723
1723
|
/**
|
1724
|
-
* The container instance ID or full ARN of the container instance.
|
1724
|
+
* The container instance ID or full ARN of the container instance. For more information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
|
1725
1725
|
*/
|
1726
1726
|
containerInstance?: String;
|
1727
1727
|
/**
|
@@ -1765,11 +1765,11 @@ declare namespace ECS {
|
|
1765
1765
|
export type Double = number;
|
1766
1766
|
export interface EFSAuthorizationConfig {
|
1767
1767
|
/**
|
1768
|
-
* The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration must either be omitted or set to / which will enforce the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS
|
1768
|
+
* The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration must either be omitted or set to / which will enforce the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS access points in the Amazon Elastic File System User Guide.
|
1769
1769
|
*/
|
1770
1770
|
accessPointId?: String;
|
1771
1771
|
/**
|
1772
|
-
* Determines whether to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. If this parameter is omitted, the default value of DISABLED is used. For more information, see Using Amazon EFS
|
1772
|
+
* Determines whether to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. If this parameter is omitted, the default value of DISABLED is used. For more information, see Using Amazon EFS access points in the Amazon Elastic Container Service Developer Guide.
|
1773
1773
|
*/
|
1774
1774
|
iam?: EFSAuthorizationConfigIAM;
|
1775
1775
|
}
|
@@ -1785,11 +1785,11 @@ declare namespace ECS {
|
|
1785
1785
|
*/
|
1786
1786
|
rootDirectory?: String;
|
1787
1787
|
/**
|
1788
|
-
* Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of DISABLED is used. For more information, see Encrypting
|
1788
|
+
* Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of DISABLED is used. For more information, see Encrypting data in transit in the Amazon Elastic File System User Guide.
|
1789
1789
|
*/
|
1790
1790
|
transitEncryption?: EFSTransitEncryption;
|
1791
1791
|
/**
|
1792
|
-
* The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. For more information, see EFS
|
1792
|
+
* The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. For more information, see EFS mount helper in the Amazon Elastic File System User Guide.
|
1793
1793
|
*/
|
1794
1794
|
transitEncryptionPort?: BoxedInteger;
|
1795
1795
|
/**
|
@@ -1903,7 +1903,7 @@ declare namespace ECS {
|
|
1903
1903
|
}
|
1904
1904
|
export interface FSxWindowsFileServerAuthorizationConfig {
|
1905
1905
|
/**
|
1906
|
-
* The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store parameter. The
|
1906
|
+
* The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.
|
1907
1907
|
*/
|
1908
1908
|
credentialsParameter: String;
|
1909
1909
|
/**
|
@@ -1946,7 +1946,7 @@ declare namespace ECS {
|
|
1946
1946
|
*/
|
1947
1947
|
type: FirelensConfigurationType;
|
1948
1948
|
/**
|
1949
|
-
* The options to use when configuring the log router. This field is optional and can be used to specify a custom configuration file or to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is "options":{"enable-ecs-log-metadata":"true|false","config-file-type:"s3|file","config-file-value":"arn:aws:s3:::mybucket/fluent.conf|filepath"}. For more information, see Creating a
|
1949
|
+
* The options to use when configuring the log router. This field is optional and can be used to specify a custom configuration file or to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is "options":{"enable-ecs-log-metadata":"true|false","config-file-type:"s3|file","config-file-value":"arn:aws:s3:::mybucket/fluent.conf|filepath"}. For more information, see Creating a task definition that uses a FireLens configuration in the Amazon Elastic Container Service Developer Guide. Tasks hosted on Fargate only support the file configuration file type.
|
1950
1950
|
*/
|
1951
1951
|
options?: FirelensConfigurationOptionsMap;
|
1952
1952
|
}
|
@@ -2367,7 +2367,7 @@ declare namespace ECS {
|
|
2367
2367
|
}
|
2368
2368
|
export interface LoadBalancer {
|
2369
2369
|
/**
|
2370
|
-
* The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a Classic Load Balancer, omit the target group ARN. For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering
|
2370
|
+
* The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a Classic Load Balancer, omit the target group ARN. For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide. If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.
|
2371
2371
|
*/
|
2372
2372
|
targetGroupArn?: String;
|
2373
2373
|
/**
|
@@ -2394,7 +2394,7 @@ declare namespace ECS {
|
|
2394
2394
|
*/
|
2395
2395
|
options?: LogConfigurationOptionsMap;
|
2396
2396
|
/**
|
2397
|
-
* The secrets to pass to the log configuration. For more information, see Specifying
|
2397
|
+
* The secrets to pass to the log configuration. For more information, see Specifying sensitive data in the Amazon Elastic Container Service Developer Guide.
|
2398
2398
|
*/
|
2399
2399
|
secretOptions?: SecretList;
|
2400
2400
|
}
|
@@ -2917,7 +2917,7 @@ declare namespace ECS {
|
|
2917
2917
|
}
|
2918
2918
|
export interface RuntimePlatform {
|
2919
2919
|
/**
|
2920
|
-
* The CPU architecture. You can run your Linux tasks on an ARM-based platform by setting the value to ARM64. This option is avaiable for tasks that run on
|
2920
|
+
* The CPU architecture. You can run your Linux tasks on an ARM-based platform by setting the value to ARM64. This option is avaiable for tasks that run on Linux Amazon EC2 instance or Linux containers on Fargate.
|
2921
2921
|
*/
|
2922
2922
|
cpuArchitecture?: CPUArchitecture;
|
2923
2923
|
/**
|
@@ -2952,7 +2952,7 @@ declare namespace ECS {
|
|
2952
2952
|
export type SensitiveString = string;
|
2953
2953
|
export interface Service {
|
2954
2954
|
/**
|
2955
|
-
* The ARN that identifies the service.
|
2955
|
+
* The ARN that identifies the service. For more information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
|
2956
2956
|
*/
|
2957
2957
|
serviceArn?: String;
|
2958
2958
|
/**
|
@@ -107,6 +107,14 @@ declare class LookoutEquipment extends Service {
|
|
107
107
|
* Lists all datasets currently available in your account, filtering on the dataset name.
|
108
108
|
*/
|
109
109
|
listDatasets(callback?: (err: AWSError, data: LookoutEquipment.Types.ListDatasetsResponse) => void): Request<LookoutEquipment.Types.ListDatasetsResponse, AWSError>;
|
110
|
+
/**
|
111
|
+
* Lists all inference events that have been found for the specified inference scheduler.
|
112
|
+
*/
|
113
|
+
listInferenceEvents(params: LookoutEquipment.Types.ListInferenceEventsRequest, callback?: (err: AWSError, data: LookoutEquipment.Types.ListInferenceEventsResponse) => void): Request<LookoutEquipment.Types.ListInferenceEventsResponse, AWSError>;
|
114
|
+
/**
|
115
|
+
* Lists all inference events that have been found for the specified inference scheduler.
|
116
|
+
*/
|
117
|
+
listInferenceEvents(callback?: (err: AWSError, data: LookoutEquipment.Types.ListInferenceEventsResponse) => void): Request<LookoutEquipment.Types.ListInferenceEventsResponse, AWSError>;
|
110
118
|
/**
|
111
119
|
* Lists all inference executions that have been performed by the specified inference scheduler.
|
112
120
|
*/
|
@@ -556,7 +564,7 @@ declare namespace LookoutEquipment {
|
|
556
564
|
*/
|
557
565
|
DatasetArn?: DatasetArn;
|
558
566
|
/**
|
559
|
-
* Specifies the time the dataset was created in
|
567
|
+
* Specifies the time the dataset was created in Lookout for Equipment.
|
560
568
|
*/
|
561
569
|
CreatedAt?: Timestamp;
|
562
570
|
/**
|
@@ -758,10 +766,38 @@ declare namespace LookoutEquipment {
|
|
758
766
|
*/
|
759
767
|
TotalNumberOfDuplicateTimestamps: Integer;
|
760
768
|
}
|
769
|
+
export type EventDurationInSeconds = number;
|
761
770
|
export type FileNameTimestampFormat = string;
|
762
771
|
export type Float = number;
|
763
772
|
export type IamRoleArn = string;
|
764
773
|
export type IdempotenceToken = string;
|
774
|
+
export type InferenceEventSummaries = InferenceEventSummary[];
|
775
|
+
export interface InferenceEventSummary {
|
776
|
+
/**
|
777
|
+
* The Amazon Resource Name (ARN) of the inference scheduler being used for the inference event.
|
778
|
+
*/
|
779
|
+
InferenceSchedulerArn?: InferenceSchedulerArn;
|
780
|
+
/**
|
781
|
+
* The name of the inference scheduler being used for the inference events.
|
782
|
+
*/
|
783
|
+
InferenceSchedulerName?: InferenceSchedulerName;
|
784
|
+
/**
|
785
|
+
* Indicates the starting time of an inference event.
|
786
|
+
*/
|
787
|
+
EventStartTime?: Timestamp;
|
788
|
+
/**
|
789
|
+
* Indicates the ending time of an inference event.
|
790
|
+
*/
|
791
|
+
EventEndTime?: Timestamp;
|
792
|
+
/**
|
793
|
+
* An array which specifies the names and values of all sensors contributing to an inference event.
|
794
|
+
*/
|
795
|
+
Diagnostics?: ModelMetrics;
|
796
|
+
/**
|
797
|
+
* Indicates the size of an inference event in seconds.
|
798
|
+
*/
|
799
|
+
EventDurationInSeconds?: EventDurationInSeconds;
|
800
|
+
}
|
765
801
|
export type InferenceExecutionStatus = "IN_PROGRESS"|"SUCCESS"|"FAILED"|string;
|
766
802
|
export type InferenceExecutionSummaries = InferenceExecutionSummary[];
|
767
803
|
export interface InferenceExecutionSummary {
|
@@ -1045,6 +1081,38 @@ declare namespace LookoutEquipment {
|
|
1045
1081
|
*/
|
1046
1082
|
DatasetSummaries?: DatasetSummaries;
|
1047
1083
|
}
|
1084
|
+
export interface ListInferenceEventsRequest {
|
1085
|
+
/**
|
1086
|
+
* An opaque pagination token indicating where to continue the listing of inference events.
|
1087
|
+
*/
|
1088
|
+
NextToken?: NextToken;
|
1089
|
+
/**
|
1090
|
+
* Specifies the maximum number of inference events to list.
|
1091
|
+
*/
|
1092
|
+
MaxResults?: MaxResults;
|
1093
|
+
/**
|
1094
|
+
* The name of the inference scheduler for the inference events listed.
|
1095
|
+
*/
|
1096
|
+
InferenceSchedulerName: InferenceSchedulerIdentifier;
|
1097
|
+
/**
|
1098
|
+
* Lookout for Equipment will return all the inference events with start time equal to or greater than the start time given.
|
1099
|
+
*/
|
1100
|
+
IntervalStartTime: Timestamp;
|
1101
|
+
/**
|
1102
|
+
* Lookout for Equipment will return all the inference events with end time equal to or less than the end time given.
|
1103
|
+
*/
|
1104
|
+
IntervalEndTime: Timestamp;
|
1105
|
+
}
|
1106
|
+
export interface ListInferenceEventsResponse {
|
1107
|
+
/**
|
1108
|
+
* An opaque pagination token indicating where to continue the listing of inference executions.
|
1109
|
+
*/
|
1110
|
+
NextToken?: NextToken;
|
1111
|
+
/**
|
1112
|
+
* Provides an array of information about the individual inference events returned from the ListInferenceEvents operation, including scheduler used, event start time, event end time, diagnostics, and so on.
|
1113
|
+
*/
|
1114
|
+
InferenceEventSummaries?: InferenceEventSummaries;
|
1115
|
+
}
|
1048
1116
|
export interface ListInferenceExecutionsRequest {
|
1049
1117
|
/**
|
1050
1118
|
* An opaque pagination token indicating where to continue the listing of inference executions.
|
@@ -1715,7 +1715,7 @@ declare namespace MediaConvert {
|
|
1715
1715
|
*/
|
1716
1716
|
Mapping?: DolbyVisionMapping;
|
1717
1717
|
/**
|
1718
|
-
* Required when you
|
1718
|
+
* Required when you enable Dolby Vision. Use Profile 5 to include frame-interleaved Dolby Vision metadata in your output. Your input must include Dolby Vision metadata or an HDR10 YUV color space. Use Profile 8.1 to include frame-interleaved Dolby Vision metadata and HDR10 metadata in your output. Your input must include Dolby Vision metadata.
|
1719
1719
|
*/
|
1720
1720
|
Profile?: DolbyVisionProfile;
|
1721
1721
|
}
|