cdk-comprehend-s3olap 2.0.97 → 2.0.98

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,11 +12,11 @@ declare class CodeStarNotifications extends Service {
12
12
  constructor(options?: CodeStarNotifications.Types.ClientConfiguration)
13
13
  config: Config & CodeStarNotifications.Types.ClientConfiguration;
14
14
  /**
15
- * Creates a notification rule for a resource. The rule specifies the events you want notifications about and the targets (such as SNS topics) where you want to receive them.
15
+ * Creates a notification rule for a resource. The rule specifies the events you want notifications about and the targets (such as Chatbot topics or Chatbot clients configured for Slack) where you want to receive them.
16
16
  */
17
17
  createNotificationRule(params: CodeStarNotifications.Types.CreateNotificationRuleRequest, callback?: (err: AWSError, data: CodeStarNotifications.Types.CreateNotificationRuleResult) => void): Request<CodeStarNotifications.Types.CreateNotificationRuleResult, AWSError>;
18
18
  /**
19
- * Creates a notification rule for a resource. The rule specifies the events you want notifications about and the targets (such as SNS topics) where you want to receive them.
19
+ * Creates a notification rule for a resource. The rule specifies the events you want notifications about and the targets (such as Chatbot topics or Chatbot clients configured for Slack) where you want to receive them.
20
20
  */
21
21
  createNotificationRule(callback?: (err: AWSError, data: CodeStarNotifications.Types.CreateNotificationRuleResult) => void): Request<CodeStarNotifications.Types.CreateNotificationRuleResult, AWSError>;
22
22
  /**
@@ -52,11 +52,11 @@ declare class CodeStarNotifications extends Service {
52
52
  */
53
53
  listEventTypes(callback?: (err: AWSError, data: CodeStarNotifications.Types.ListEventTypesResult) => void): Request<CodeStarNotifications.Types.ListEventTypesResult, AWSError>;
54
54
  /**
55
- * Returns a list of the notification rules for an AWS account.
55
+ * Returns a list of the notification rules for an Amazon Web Services account.
56
56
  */
57
57
  listNotificationRules(params: CodeStarNotifications.Types.ListNotificationRulesRequest, callback?: (err: AWSError, data: CodeStarNotifications.Types.ListNotificationRulesResult) => void): Request<CodeStarNotifications.Types.ListNotificationRulesResult, AWSError>;
58
58
  /**
59
- * Returns a list of the notification rules for an AWS account.
59
+ * Returns a list of the notification rules for an Amazon Web Services account.
60
60
  */
61
61
  listNotificationRules(callback?: (err: AWSError, data: CodeStarNotifications.Types.ListNotificationRulesResult) => void): Request<CodeStarNotifications.Types.ListNotificationRulesResult, AWSError>;
62
62
  /**
@@ -68,19 +68,19 @@ declare class CodeStarNotifications extends Service {
68
68
  */
69
69
  listTagsForResource(callback?: (err: AWSError, data: CodeStarNotifications.Types.ListTagsForResourceResult) => void): Request<CodeStarNotifications.Types.ListTagsForResourceResult, AWSError>;
70
70
  /**
71
- * Returns a list of the notification rule targets for an AWS account.
71
+ * Returns a list of the notification rule targets for an Amazon Web Services account.
72
72
  */
73
73
  listTargets(params: CodeStarNotifications.Types.ListTargetsRequest, callback?: (err: AWSError, data: CodeStarNotifications.Types.ListTargetsResult) => void): Request<CodeStarNotifications.Types.ListTargetsResult, AWSError>;
74
74
  /**
75
- * Returns a list of the notification rule targets for an AWS account.
75
+ * Returns a list of the notification rule targets for an Amazon Web Services account.
76
76
  */
77
77
  listTargets(callback?: (err: AWSError, data: CodeStarNotifications.Types.ListTargetsResult) => void): Request<CodeStarNotifications.Types.ListTargetsResult, AWSError>;
78
78
  /**
79
- * Creates an association between a notification rule and an SNS topic so that the associated target can receive notifications when the events described in the rule are triggered.
79
+ * Creates an association between a notification rule and an Chatbot topic or Chatbot client so that the associated target can receive notifications when the events described in the rule are triggered.
80
80
  */
81
81
  subscribe(params: CodeStarNotifications.Types.SubscribeRequest, callback?: (err: AWSError, data: CodeStarNotifications.Types.SubscribeResult) => void): Request<CodeStarNotifications.Types.SubscribeResult, AWSError>;
82
82
  /**
83
- * Creates an association between a notification rule and an SNS topic so that the associated target can receive notifications when the events described in the rule are triggered.
83
+ * Creates an association between a notification rule and an Chatbot topic or Chatbot client so that the associated target can receive notifications when the events described in the rule are triggered.
84
84
  */
85
85
  subscribe(callback?: (err: AWSError, data: CodeStarNotifications.Types.SubscribeResult) => void): Request<CodeStarNotifications.Types.SubscribeResult, AWSError>;
86
86
  /**
@@ -92,11 +92,11 @@ declare class CodeStarNotifications extends Service {
92
92
  */
93
93
  tagResource(callback?: (err: AWSError, data: CodeStarNotifications.Types.TagResourceResult) => void): Request<CodeStarNotifications.Types.TagResourceResult, AWSError>;
94
94
  /**
95
- * Removes an association between a notification rule and an Amazon SNS topic so that subscribers to that topic stop receiving notifications when the events described in the rule are triggered.
95
+ * Removes an association between a notification rule and an Chatbot topic so that subscribers to that topic stop receiving notifications when the events described in the rule are triggered.
96
96
  */
97
97
  unsubscribe(params: CodeStarNotifications.Types.UnsubscribeRequest, callback?: (err: AWSError, data: CodeStarNotifications.Types.UnsubscribeResult) => void): Request<CodeStarNotifications.Types.UnsubscribeResult, AWSError>;
98
98
  /**
99
- * Removes an association between a notification rule and an Amazon SNS topic so that subscribers to that topic stop receiving notifications when the events described in the rule are triggered.
99
+ * Removes an association between a notification rule and an Chatbot topic so that subscribers to that topic stop receiving notifications when the events described in the rule are triggered.
100
100
  */
101
101
  unsubscribe(callback?: (err: AWSError, data: CodeStarNotifications.Types.UnsubscribeResult) => void): Request<CodeStarNotifications.Types.UnsubscribeResult, AWSError>;
102
102
  /**
@@ -120,7 +120,7 @@ declare namespace CodeStarNotifications {
120
120
  export type ClientRequestToken = string;
121
121
  export interface CreateNotificationRuleRequest {
122
122
  /**
123
- * The name for the notification rule. Notifictaion rule names must be unique in your AWS account.
123
+ * The name for the notification rule. Notification rule names must be unique in your Amazon Web Services account.
124
124
  */
125
125
  Name: NotificationRuleName;
126
126
  /**
@@ -128,19 +128,19 @@ declare namespace CodeStarNotifications {
128
128
  */
129
129
  EventTypeIds: EventTypeIds;
130
130
  /**
131
- * The Amazon Resource Name (ARN) of the resource to associate with the notification rule. Supported resources include pipelines in AWS CodePipeline, repositories in AWS CodeCommit, and build projects in AWS CodeBuild.
131
+ * The Amazon Resource Name (ARN) of the resource to associate with the notification rule. Supported resources include pipelines in CodePipeline, repositories in CodeCommit, and build projects in CodeBuild.
132
132
  */
133
133
  Resource: NotificationRuleResource;
134
134
  /**
135
- * A list of Amazon Resource Names (ARNs) of SNS topics to associate with the notification rule.
135
+ * A list of Amazon Resource Names (ARNs) of Amazon Simple Notification Service topics and Chatbot clients to associate with the notification rule.
136
136
  */
137
137
  Targets: Targets;
138
138
  /**
139
- * The level of detail to include in the notifications for this resource. BASIC will include only the contents of the event as it would appear in AWS CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.
139
+ * The level of detail to include in the notifications for this resource. BASIC will include only the contents of the event as it would appear in Amazon CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.
140
140
  */
141
141
  DetailType: DetailType;
142
142
  /**
143
- * A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request with the same parameters is received and a token is included, the request returns information about the initial request that used that token. The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, an idempotency token is created for you.
143
+ * A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request with the same parameters is received and a token is included, the request returns information about the initial request that used that token. The Amazon Web Services SDKs prepopulate client request tokens. If you are using an Amazon Web Services SDK, an idempotency token is created for you.
144
144
  */
145
145
  ClientRequestToken?: ClientRequestToken;
146
146
  /**
@@ -173,11 +173,11 @@ declare namespace CodeStarNotifications {
173
173
  }
174
174
  export interface DeleteTargetRequest {
175
175
  /**
176
- * The Amazon Resource Name (ARN) of the SNS topic to delete.
176
+ * The Amazon Resource Name (ARN) of the Chatbot topic or Chatbot client to delete.
177
177
  */
178
178
  TargetAddress: TargetAddress;
179
179
  /**
180
- * A Boolean value that can be used to delete all associations with this SNS topic. The default value is FALSE. If set to TRUE, all associations between that target and every notification rule in your AWS account are deleted.
180
+ * A Boolean value that can be used to delete all associations with this Chatbot topic. The default value is FALSE. If set to TRUE, all associations between that target and every notification rule in your Amazon Web Services account are deleted.
181
181
  */
182
182
  ForceUnsubscribeAll?: ForceUnsubscribeAll;
183
183
  }
@@ -207,11 +207,11 @@ declare namespace CodeStarNotifications {
207
207
  */
208
208
  Resource?: NotificationRuleResource;
209
209
  /**
210
- * A list of the SNS topics associated with the notification rule.
210
+ * A list of the Chatbot topics and Chatbot clients associated with the notification rule.
211
211
  */
212
212
  Targets?: TargetsBatch;
213
213
  /**
214
- * The level of detail included in the notifications for this resource. BASIC will include only the contents of the event as it would appear in AWS CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.
214
+ * The level of detail included in the notifications for this resource. BASIC will include only the contents of the event as it would appear in Amazon CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.
215
215
  */
216
216
  DetailType?: DetailType;
217
217
  /**
@@ -242,7 +242,7 @@ declare namespace CodeStarNotifications {
242
242
  export type EventTypeName = string;
243
243
  export interface EventTypeSummary {
244
244
  /**
245
- * The system-generated ID of the event.
245
+ * The system-generated ID of the event. For a complete list of event types and IDs, see Notification concepts in the Developer Tools Console User Guide.
246
246
  */
247
247
  EventTypeId?: EventTypeId;
248
248
  /**
@@ -303,7 +303,7 @@ declare namespace CodeStarNotifications {
303
303
  */
304
304
  Name: ListNotificationRulesFilterName;
305
305
  /**
306
- * The value of the attribute you want to use to filter the returned notification rules. For example, if you specify filtering by RESOURCE in Name, you might specify the ARN of a pipeline in AWS CodePipeline for the value.
306
+ * The value of the attribute you want to use to filter the returned notification rules. For example, if you specify filtering by RESOURCE in Name, you might specify the ARN of a pipeline in CodePipeline for the value.
307
307
  */
308
308
  Value: ListNotificationRulesFilterValue;
309
309
  }
@@ -330,7 +330,7 @@ declare namespace CodeStarNotifications {
330
330
  */
331
331
  NextToken?: NextToken;
332
332
  /**
333
- * The list of notification rules for the AWS account, by Amazon Resource Name (ARN) and ID.
333
+ * The list of notification rules for the Amazon Web Services account, by Amazon Resource Name (ARN) and ID.
334
334
  */
335
335
  NotificationRules?: NotificationRuleBatch;
336
336
  }
@@ -443,11 +443,11 @@ declare namespace CodeStarNotifications {
443
443
  export type Tags = {[key: string]: TagValue};
444
444
  export interface Target {
445
445
  /**
446
- * The target type. Can be an Amazon SNS topic.
446
+ * The target type. Can be an Chatbot topic or Chatbot client. Chatbot topics are specified as SNS. Chatbot clients are specified as AWSChatbotSlack.
447
447
  */
448
448
  TargetType?: TargetType;
449
449
  /**
450
- * The Amazon Resource Name (ARN) of the SNS topic.
450
+ * The Amazon Resource Name (ARN) of the Chatbot topic or Chatbot client.
451
451
  */
452
452
  TargetAddress?: TargetAddress;
453
453
  }
@@ -455,11 +455,11 @@ declare namespace CodeStarNotifications {
455
455
  export type TargetStatus = "PENDING"|"ACTIVE"|"UNREACHABLE"|"INACTIVE"|"DEACTIVATED"|string;
456
456
  export interface TargetSummary {
457
457
  /**
458
- * The Amazon Resource Name (ARN) of the SNS topic.
458
+ * The Amazon Resource Name (ARN) of the Chatbot topic or Chatbot client.
459
459
  */
460
460
  TargetAddress?: TargetAddress;
461
461
  /**
462
- * The type of the target (for example, SNS).
462
+ * The type of the target (for example, SNS). Chatbot topics are specified as SNS. Chatbot clients are specified as AWSChatbotSlack.
463
463
  */
464
464
  TargetType?: TargetType;
465
465
  /**
@@ -476,7 +476,7 @@ declare namespace CodeStarNotifications {
476
476
  */
477
477
  Arn: NotificationRuleArn;
478
478
  /**
479
- * The ARN of the SNS topic to unsubscribe from the notification rule.
479
+ * The ARN of the Chatbot topic to unsubscribe from the notification rule.
480
480
  */
481
481
  TargetAddress: TargetAddress;
482
482
  }
@@ -512,7 +512,7 @@ declare namespace CodeStarNotifications {
512
512
  */
513
513
  Status?: NotificationRuleStatus;
514
514
  /**
515
- * A list of event types associated with this notification rule.
515
+ * A list of event types associated with this notification rule. For a complete list of event types and IDs, see Notification concepts in the Developer Tools Console User Guide.
516
516
  */
517
517
  EventTypeIds?: EventTypeIds;
518
518
  /**
@@ -520,7 +520,7 @@ declare namespace CodeStarNotifications {
520
520
  */
521
521
  Targets?: Targets;
522
522
  /**
523
- * The level of detail to include in the notifications for this resource. BASIC will include only the contents of the event as it would appear in AWS CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.
523
+ * The level of detail to include in the notifications for this resource. BASIC will include only the contents of the event as it would appear in Amazon CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.
524
524
  */
525
525
  DetailType?: DetailType;
526
526
  }
@@ -29,11 +29,11 @@ declare class ECS extends Service {
29
29
  */
30
30
  createCluster(callback?: (err: AWSError, data: ECS.Types.CreateClusterResponse) => void): Request<ECS.Types.CreateClusterResponse, AWSError>;
31
31
  /**
32
- * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic: Determine which of the container instances in your cluster can support the task definition of your service. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner. This is the case even if you can choose a different placement strategy with the placementStrategy parameter. Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone based on the previous steps, favoring container instances with the fewest number of running tasks for this service.
32
+ * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.
33
33
  */
34
34
  createService(params: ECS.Types.CreateServiceRequest, callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
35
35
  /**
36
- * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic: Determine which of the container instances in your cluster can support the task definition of your service. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner. This is the case even if you can choose a different placement strategy with the placementStrategy parameter. Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone based on the previous steps, favoring container instances with the fewest number of running tasks for this service.
36
+ * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.
37
37
  */
38
38
  createService(callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
39
39
  /**
@@ -787,7 +787,7 @@ declare namespace ECS {
787
787
  */
788
788
  memory?: BoxedInteger;
789
789
  /**
790
- * The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used. For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed. The Docker daemon reserves a minimum of 4 MiB of memory for a container. Therefore, we recommend that you specify fewer than 4 MiB of memory for your containers.
790
+ * The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory is used. For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed. The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
791
791
  */
792
792
  memoryReservation?: BoxedInteger;
793
793
  /**
@@ -835,7 +835,7 @@ declare namespace ECS {
835
835
  */
836
836
  secrets?: SecretList;
837
837
  /**
838
- * The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed. For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later.
838
+ * The dependencies defined for container startup and shutdown. A container can contain multiple dependencies on other containers in a task definition. When a dependency is defined for container startup, for container shutdown it is reversed. For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later.
839
839
  */
840
840
  dependsOn?: ContainerDependencies;
841
841
  /**
@@ -1955,7 +1955,7 @@ declare namespace ECS {
1955
1955
  export type GpuIds = String[];
1956
1956
  export interface HealthCheck {
1957
1957
  /**
1958
- * A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in brackets. [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] You don't need to include the brackets when you use the Amazon Web Services Management Console. "CMD-SHELL", "curl -f http://localhost/ || exit 1" An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.
1958
+ * A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to run the command arguments directly, or CMD-SHELL to run the command with the container's default shell. When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in brackets. [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] You don't need to include the brackets when you use the Amazon Web Services Management Console. "CMD-SHELL", "curl -f http://localhost/ || exit 1" An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.
1959
1959
  */
1960
1960
  command: StringList;
1961
1961
  /**
@@ -2743,11 +2743,11 @@ declare namespace ECS {
2743
2743
  */
2744
2744
  requiresCompatibilities?: CompatibilityList;
2745
2745
  /**
2746
- * The number of CPU units used by the task. It can be expressed as an integer using CPU units (for example, 1024) or as a string using vCPUs (for example, 1 vCPU or 1 vcpu) in a task definition. String values are converted to an integer indicating the CPU units when the task definition is registered. Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers. If you're using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). If you're using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
2746
+ * The number of CPU units used by the task. It can be expressed as an integer using CPU units (for example, 1024) or as a string using vCPUs (for example, 1 vCPU or 1 vcpu) in a task definition. String values are converted to an integer indicating the CPU units when the task definition is registered. Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers. If you're using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). If you do not specify a value, the parameter is ignored. If you're using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform 1.4.0 or later. 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments This option requires Linux platform 1.4.0 or later.
2747
2747
  */
2748
2748
  cpu?: String;
2749
2749
  /**
2750
- * The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB (for example ,1024) or as a string using GB (for example, 1GB or 1 GB) in a task definition. String values are converted to an integer indicating the MiB when the task definition is registered. Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers. If using the EC2 launch type, this field is optional. If using the Fargate launch type, this field is required and you must use one of the following values. This determines your range of supported values for the cpu parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)
2750
+ * The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB (for example ,1024) or as a string using GB (for example, 1GB or 1 GB) in a task definition. String values are converted to an integer indicating the MiB when the task definition is registered. Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers. If using the EC2 launch type, this field is optional. If using the Fargate launch type, this field is required and you must use one of the following values. This determines your range of supported values for the cpu parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later.
2751
2751
  */
2752
2752
  memory?: String;
2753
2753
  /**
@@ -3409,7 +3409,7 @@ declare namespace ECS {
3409
3409
  */
3410
3410
  containers?: Containers;
3411
3411
  /**
3412
- * The number of CPU units used by the task as expressed in a task definition. It can be expressed as an integer using CPU units (for example, 1024). It can also be expressed as a string using vCPUs (for example, 1 vCPU or 1 vcpu). String values are converted to an integer that indicates the CPU units when the task definition is registered. If you use the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). If you use the Fargate launch type, this field is required. You must use one of the following values. These values determine the range of supported values for the memory parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
3412
+ * The number of CPU units used by the task as expressed in a task definition. It can be expressed as an integer using CPU units (for example, 1024). It can also be expressed as a string using vCPUs (for example, 1 vCPU or 1 vcpu). String values are converted to an integer that indicates the CPU units when the task definition is registered. If you use the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). If you use the Fargate launch type, this field is required. You must use one of the following values. These values determine the range of supported values for the memory parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform 1.4.0 or later. 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments This option requires Linux platform 1.4.0 or later.
3413
3413
  */
3414
3414
  cpu?: String;
3415
3415
  /**
@@ -3449,7 +3449,7 @@ declare namespace ECS {
3449
3449
  */
3450
3450
  launchType?: LaunchType;
3451
3451
  /**
3452
- * The amount of memory (in MiB) that the task uses as expressed in a task definition. It can be expressed as an integer using MiB (for example, 1024). If it's expressed as a string using GB (for example, 1GB or 1 GB), it's converted to an integer indicating the MiB when the task definition is registered. If you use the EC2 launch type, this field is optional. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines the range of supported values for the cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)
3452
+ * The amount of memory (in MiB) that the task uses as expressed in a task definition. It can be expressed as an integer using MiB (for example, 1024). If it's expressed as a string using GB (for example, 1GB or 1 GB), it's converted to an integer indicating the MiB when the task definition is registered. If you use the EC2 launch type, this field is optional. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines the range of supported values for the cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later.
3453
3453
  */
3454
3454
  memory?: String;
3455
3455
  /**
@@ -3575,11 +3575,11 @@ declare namespace ECS {
3575
3575
  */
3576
3576
  requiresCompatibilities?: CompatibilityList;
3577
3577
  /**
3578
- * The number of cpu units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the memory parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
3578
+ * The number of cpu units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the memory parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform 1.4.0 or later. 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments This option requires Linux platform 1.4.0 or later.
3579
3579
  */
3580
3580
  cpu?: String;
3581
3581
  /**
3582
- * The amount (in MiB) of memory used by the task. If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified, the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition. If your tasks runs on Fargate, this field is required. You must use one of the following values. The value you choose determines your range of valid values for the cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)
3582
+ * The amount (in MiB) of memory used by the task. If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified, the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition. If your tasks runs on Fargate, this field is required. You must use one of the following values. The value you choose determines your range of valid values for the cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later.
3583
3583
  */
3584
3584
  memory?: String;
3585
3585
  /**
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1217.0',
86
+ VERSION: '2.1218.0',
87
87
 
88
88
  /**
89
89
  * @api private
@@ -395,7 +395,7 @@ return /******/ (function(modules) { // webpackBootstrap
395
395
  /**
396
396
  * @constant
397
397
  */
398
- VERSION: '2.1217.0',
398
+ VERSION: '2.1218.0',
399
399
 
400
400
  /**
401
401
  * @api private
@@ -56218,7 +56218,7 @@ return /******/ (function(modules) { // webpackBootstrap
56218
56218
  /* 814 */
56219
56219
  /***/ (function(module, exports) {
56220
56220
 
56221
- module.exports = {"version":"2.0","metadata":{"apiVersion":"2019-10-15","endpointPrefix":"codestar-notifications","jsonVersion":"1.1","protocol":"rest-json","serviceFullName":"AWS CodeStar Notifications","serviceId":"codestar notifications","signatureVersion":"v4","signingName":"codestar-notifications","uid":"codestar-notifications-2019-10-15"},"operations":{"CreateNotificationRule":{"http":{"requestUri":"/createNotificationRule"},"input":{"type":"structure","required":["Name","EventTypeIds","Resource","Targets","DetailType"],"members":{"Name":{"shape":"S2"},"EventTypeIds":{"shape":"S3"},"Resource":{},"Targets":{"shape":"S6"},"DetailType":{},"ClientRequestToken":{"idempotencyToken":true},"Tags":{"shape":"Sc"},"Status":{}}},"output":{"type":"structure","members":{"Arn":{}}}},"DeleteNotificationRule":{"http":{"requestUri":"/deleteNotificationRule"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{}}},"output":{"type":"structure","members":{"Arn":{}}}},"DeleteTarget":{"http":{"requestUri":"/deleteTarget"},"input":{"type":"structure","required":["TargetAddress"],"members":{"TargetAddress":{"shape":"S9"},"ForceUnsubscribeAll":{"type":"boolean"}}},"output":{"type":"structure","members":{}}},"DescribeNotificationRule":{"http":{"requestUri":"/describeNotificationRule"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{}}},"output":{"type":"structure","required":["Arn"],"members":{"Arn":{},"Name":{"shape":"S2"},"EventTypes":{"shape":"Sp"},"Resource":{},"Targets":{"shape":"Su"},"DetailType":{},"CreatedBy":{},"Status":{},"CreatedTimestamp":{"type":"timestamp"},"LastModifiedTimestamp":{"type":"timestamp"},"Tags":{"shape":"Sc"}}}},"ListEventTypes":{"http":{"requestUri":"/listEventTypes"},"input":{"type":"structure","members":{"Filters":{"type":"list","member":{"type":"structure","required":["Name","Value"],"members":{"Name":{},"Value":{}}}},"NextToken":{},"MaxResults":{"type":"integer"}}},"output":{"type":"structure","members":{"EventTypes":{"shape":"Sp"},"NextToken":{}}}},"ListNotificationRules":{"http":{"requestUri":"/listNotificationRules"},"input":{"type":"structure","members":{"Filters":{"type":"list","member":{"type":"structure","required":["Name","Value"],"members":{"Name":{},"Value":{}}}},"NextToken":{},"MaxResults":{"type":"integer"}}},"output":{"type":"structure","members":{"NextToken":{},"NotificationRules":{"type":"list","member":{"type":"structure","members":{"Id":{},"Arn":{}}}}}}},"ListTagsForResource":{"http":{"requestUri":"/listTagsForResource"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{}}},"output":{"type":"structure","members":{"Tags":{"shape":"Sc"}}}},"ListTargets":{"http":{"requestUri":"/listTargets"},"input":{"type":"structure","members":{"Filters":{"type":"list","member":{"type":"structure","required":["Name","Value"],"members":{"Name":{},"Value":{}}}},"NextToken":{},"MaxResults":{"type":"integer"}}},"output":{"type":"structure","members":{"Targets":{"shape":"Su"},"NextToken":{}}}},"Subscribe":{"http":{"requestUri":"/subscribe"},"input":{"type":"structure","required":["Arn","Target"],"members":{"Arn":{},"Target":{"shape":"S7"},"ClientRequestToken":{}}},"output":{"type":"structure","members":{"Arn":{}}}},"TagResource":{"http":{"requestUri":"/tagResource"},"input":{"type":"structure","required":["Arn","Tags"],"members":{"Arn":{},"Tags":{"shape":"Sc"}}},"output":{"type":"structure","members":{"Tags":{"shape":"Sc"}}}},"Unsubscribe":{"http":{"requestUri":"/unsubscribe"},"input":{"type":"structure","required":["Arn","TargetAddress"],"members":{"Arn":{},"TargetAddress":{"shape":"S9"}}},"output":{"type":"structure","required":["Arn"],"members":{"Arn":{}}}},"UntagResource":{"http":{"requestUri":"/untagResource"},"input":{"type":"structure","required":["Arn","TagKeys"],"members":{"Arn":{},"TagKeys":{"type":"list","member":{}}}},"output":{"type":"structure","members":{}}},"UpdateNotificationRule":{"http":{"requestUri":"/updateNotificationRule"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{},"Name":{"shape":"S2"},"Status":{},"EventTypeIds":{"shape":"S3"},"Targets":{"shape":"S6"},"DetailType":{}}},"output":{"type":"structure","members":{}}}},"shapes":{"S2":{"type":"string","sensitive":true},"S3":{"type":"list","member":{}},"S6":{"type":"list","member":{"shape":"S7"}},"S7":{"type":"structure","members":{"TargetType":{},"TargetAddress":{"shape":"S9"}}},"S9":{"type":"string","sensitive":true},"Sc":{"type":"map","key":{},"value":{}},"Sp":{"type":"list","member":{"type":"structure","members":{"EventTypeId":{},"ServiceName":{},"EventTypeName":{},"ResourceType":{}}}},"Su":{"type":"list","member":{"type":"structure","members":{"TargetAddress":{"shape":"S9"},"TargetType":{},"TargetStatus":{}}}}}}
56221
+ module.exports = {"version":"2.0","metadata":{"apiVersion":"2019-10-15","endpointPrefix":"codestar-notifications","jsonVersion":"1.1","protocol":"rest-json","serviceFullName":"AWS CodeStar Notifications","serviceId":"codestar notifications","signatureVersion":"v4","signingName":"codestar-notifications","uid":"codestar-notifications-2019-10-15"},"operations":{"CreateNotificationRule":{"http":{"requestUri":"/createNotificationRule"},"input":{"type":"structure","required":["Name","EventTypeIds","Resource","Targets","DetailType"],"members":{"Name":{"shape":"S2"},"EventTypeIds":{"shape":"S3"},"Resource":{},"Targets":{"shape":"S6"},"DetailType":{},"ClientRequestToken":{"idempotencyToken":true},"Tags":{"shape":"Sc"},"Status":{}}},"output":{"type":"structure","members":{"Arn":{}}}},"DeleteNotificationRule":{"http":{"requestUri":"/deleteNotificationRule"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{}}},"output":{"type":"structure","members":{"Arn":{}}}},"DeleteTarget":{"http":{"requestUri":"/deleteTarget"},"input":{"type":"structure","required":["TargetAddress"],"members":{"TargetAddress":{"shape":"S9"},"ForceUnsubscribeAll":{"type":"boolean"}}},"output":{"type":"structure","members":{}}},"DescribeNotificationRule":{"http":{"requestUri":"/describeNotificationRule"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{}}},"output":{"type":"structure","required":["Arn"],"members":{"Arn":{},"Name":{"shape":"S2"},"EventTypes":{"shape":"Sp"},"Resource":{},"Targets":{"shape":"Su"},"DetailType":{},"CreatedBy":{},"Status":{},"CreatedTimestamp":{"type":"timestamp"},"LastModifiedTimestamp":{"type":"timestamp"},"Tags":{"shape":"Sc"}}}},"ListEventTypes":{"http":{"requestUri":"/listEventTypes"},"input":{"type":"structure","members":{"Filters":{"type":"list","member":{"type":"structure","required":["Name","Value"],"members":{"Name":{},"Value":{}}}},"NextToken":{},"MaxResults":{"type":"integer"}}},"output":{"type":"structure","members":{"EventTypes":{"shape":"Sp"},"NextToken":{}}}},"ListNotificationRules":{"http":{"requestUri":"/listNotificationRules"},"input":{"type":"structure","members":{"Filters":{"type":"list","member":{"type":"structure","required":["Name","Value"],"members":{"Name":{},"Value":{}}}},"NextToken":{},"MaxResults":{"type":"integer"}}},"output":{"type":"structure","members":{"NextToken":{},"NotificationRules":{"type":"list","member":{"type":"structure","members":{"Id":{},"Arn":{}}}}}}},"ListTagsForResource":{"http":{"requestUri":"/listTagsForResource"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{}}},"output":{"type":"structure","members":{"Tags":{"shape":"Sc"}}}},"ListTargets":{"http":{"requestUri":"/listTargets"},"input":{"type":"structure","members":{"Filters":{"type":"list","member":{"type":"structure","required":["Name","Value"],"members":{"Name":{},"Value":{}}}},"NextToken":{},"MaxResults":{"type":"integer"}}},"output":{"type":"structure","members":{"Targets":{"shape":"Su"},"NextToken":{}}}},"Subscribe":{"http":{"requestUri":"/subscribe"},"input":{"type":"structure","required":["Arn","Target"],"members":{"Arn":{},"Target":{"shape":"S7"},"ClientRequestToken":{}}},"output":{"type":"structure","members":{"Arn":{}}}},"TagResource":{"http":{"requestUri":"/tagResource"},"input":{"type":"structure","required":["Arn","Tags"],"members":{"Arn":{},"Tags":{"shape":"Sc"}}},"output":{"type":"structure","members":{"Tags":{"shape":"Sc"}}}},"Unsubscribe":{"http":{"requestUri":"/unsubscribe"},"input":{"type":"structure","required":["Arn","TargetAddress"],"members":{"Arn":{},"TargetAddress":{"shape":"S9"}}},"output":{"type":"structure","required":["Arn"],"members":{"Arn":{}}}},"UntagResource":{"http":{"requestUri":"/untagResource/{resourceArn}"},"input":{"type":"structure","required":["Arn","TagKeys"],"members":{"Arn":{"location":"uri","locationName":"resourceArn"},"TagKeys":{"location":"querystring","locationName":"tagKeys","type":"list","member":{}}}},"output":{"type":"structure","members":{}}},"UpdateNotificationRule":{"http":{"requestUri":"/updateNotificationRule"},"input":{"type":"structure","required":["Arn"],"members":{"Arn":{},"Name":{"shape":"S2"},"Status":{},"EventTypeIds":{"shape":"S3"},"Targets":{"shape":"S6"},"DetailType":{}}},"output":{"type":"structure","members":{}}}},"shapes":{"S2":{"type":"string","sensitive":true},"S3":{"type":"list","member":{}},"S6":{"type":"list","member":{"shape":"S7"}},"S7":{"type":"structure","members":{"TargetType":{},"TargetAddress":{"shape":"S9"}}},"S9":{"type":"string","sensitive":true},"Sc":{"type":"map","key":{},"value":{}},"Sp":{"type":"list","member":{"type":"structure","members":{"EventTypeId":{},"ServiceName":{},"EventTypeName":{},"ResourceType":{}}}},"Su":{"type":"list","member":{"type":"structure","members":{"TargetAddress":{"shape":"S9"},"TargetType":{},"TargetStatus":{}}}}}}
56222
56222
 
56223
56223
  /***/ }),
56224
56224
  /* 815 */
@@ -1,4 +1,4 @@
1
- // AWS SDK for JavaScript v2.1217.0
1
+ // AWS SDK for JavaScript v2.1218.0
2
2
  // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
3
  // License at https://sdk.amazonaws.com/js/BUNDLE_LICENSE.txt
4
4
  (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i<t.length;i++)o(t[i]);return o}return r})()({1:[function(require,module,exports){
@@ -243007,7 +243007,7 @@ AWS.util.update(AWS, {
243007
243007
  /**
243008
243008
  * @constant
243009
243009
  */
243010
- VERSION: '2.1217.0',
243010
+ VERSION: '2.1218.0',
243011
243011
 
243012
243012
  /**
243013
243013
  * @api private
@@ -264991,7 +264991,7 @@ var LRUCache = /** @class */ (function () {
264991
264991
  }());
264992
264992
  exports.LRUCache = LRUCache;
264993
264993
  },{}],458:[function(require,module,exports){
264994
- // AWS SDK for JavaScript v2.1217.0
264994
+ // AWS SDK for JavaScript v2.1218.0
264995
264995
  // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
264996
264996
  // License at https://sdk.amazonaws.com/js/BUNDLE_LICENSE.txt
264997
264997
  require('./browser_loader');