cdk-lambda-subminute 2.0.226 → 2.0.228

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/.jsii +15 -15
  2. package/README.md +12 -0
  3. package/lib/cdk-lambda-subminute.js +3 -3
  4. package/node_modules/aws-sdk/CHANGELOG.md +174 -1
  5. package/node_modules/aws-sdk/README.md +1 -1
  6. package/node_modules/aws-sdk/apis/amplifybackend-2020-08-11.min.json +4 -0
  7. package/node_modules/aws-sdk/apis/amplifyuibuilder-2021-08-11.min.json +40 -9
  8. package/node_modules/aws-sdk/apis/apigateway-2015-07-09.min.json +2 -1
  9. package/node_modules/aws-sdk/apis/appflow-2020-08-23.min.json +115 -87
  10. package/node_modules/aws-sdk/apis/application-insights-2018-11-25.min.json +228 -43
  11. package/node_modules/aws-sdk/apis/application-insights-2018-11-25.paginators.json +5 -0
  12. package/node_modules/aws-sdk/apis/auditmanager-2017-07-25.min.json +238 -63
  13. package/node_modules/aws-sdk/apis/autoscaling-2011-01-01.examples.json +74 -11
  14. package/node_modules/aws-sdk/apis/autoscaling-2011-01-01.min.json +61 -52
  15. package/node_modules/aws-sdk/apis/backup-2018-11-15.min.json +217 -104
  16. package/node_modules/aws-sdk/apis/backup-2018-11-15.paginators.json +6 -0
  17. package/node_modules/aws-sdk/apis/ce-2017-10-25.min.json +3 -1
  18. package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.min.json +261 -21
  19. package/node_modules/aws-sdk/apis/chime-sdk-voice-2022-08-03.min.json +198 -182
  20. package/node_modules/aws-sdk/apis/cleanrooms-2022-02-17.min.json +657 -102
  21. package/node_modules/aws-sdk/apis/cleanrooms-2022-02-17.paginators.json +12 -0
  22. package/node_modules/aws-sdk/apis/cloudformation-2010-05-15.min.json +95 -80
  23. package/node_modules/aws-sdk/apis/cloudfront-2020-05-31.min.json +4 -1
  24. package/node_modules/aws-sdk/apis/cloudhsm-2014-05-30.min.json +60 -20
  25. package/node_modules/aws-sdk/apis/codecommit-2015-04-13.min.json +44 -0
  26. package/node_modules/aws-sdk/apis/codecommit-2015-04-13.paginators.json +5 -0
  27. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.examples.json +849 -0
  28. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.min.json +110 -24
  29. package/node_modules/aws-sdk/apis/compute-optimizer-2019-11-01.min.json +200 -57
  30. package/node_modules/aws-sdk/apis/connect-2017-08-08.min.json +903 -316
  31. package/node_modules/aws-sdk/apis/connect-2017-08-08.paginators.json +18 -0
  32. package/node_modules/aws-sdk/apis/connectcampaigns-2021-01-30.min.json +103 -90
  33. package/node_modules/aws-sdk/apis/connectparticipant-2018-09-07.min.json +62 -0
  34. package/node_modules/aws-sdk/apis/customer-profiles-2020-08-15.min.json +269 -118
  35. package/node_modules/aws-sdk/apis/datasync-2018-11-09.min.json +103 -14
  36. package/node_modules/aws-sdk/apis/detective-2018-10-26.min.json +14 -3
  37. package/node_modules/aws-sdk/apis/dms-2016-01-01.min.json +1123 -217
  38. package/node_modules/aws-sdk/apis/dms-2016-01-01.paginators.json +50 -0
  39. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +1254 -1191
  40. package/node_modules/aws-sdk/apis/elasticache-2015-02-02.min.json +37 -10
  41. package/node_modules/aws-sdk/apis/elasticloadbalancingv2-2015-12-01.min.json +49 -46
  42. package/node_modules/aws-sdk/apis/finspace-2021-03-12.min.json +74 -13
  43. package/node_modules/aws-sdk/apis/fsx-2018-03-01.min.json +190 -143
  44. package/node_modules/aws-sdk/apis/glue-2017-03-31.min.json +235 -216
  45. package/node_modules/aws-sdk/apis/grafana-2020-08-18.min.json +2 -1
  46. package/node_modules/aws-sdk/apis/health-2016-08-04.min.json +116 -34
  47. package/node_modules/aws-sdk/apis/inspector2-2020-06-08.min.json +291 -192
  48. package/node_modules/aws-sdk/apis/internetmonitor-2021-06-03.min.json +37 -19
  49. package/node_modules/aws-sdk/apis/kafka-2018-11-14.min.json +237 -52
  50. package/node_modules/aws-sdk/apis/kafka-2018-11-14.paginators.json +6 -0
  51. package/node_modules/aws-sdk/apis/kafkaconnect-2021-09-14.min.json +38 -36
  52. package/node_modules/aws-sdk/apis/kinesis-video-archived-media-2017-09-30.min.json +0 -1
  53. package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.min.json +337 -23
  54. package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.paginators.json +5 -0
  55. package/node_modules/aws-sdk/apis/mediaconvert-2017-08-29.min.json +134 -96
  56. package/node_modules/aws-sdk/apis/medialive-2017-10-14.min.json +249 -225
  57. package/node_modules/aws-sdk/apis/mediapackage-2017-10-12.min.json +93 -87
  58. package/node_modules/aws-sdk/apis/metadata.json +7 -0
  59. package/node_modules/aws-sdk/apis/neptunedata-2023-08-01.examples.json +5 -0
  60. package/node_modules/aws-sdk/apis/neptunedata-2023-08-01.min.json +1923 -0
  61. package/node_modules/aws-sdk/apis/neptunedata-2023-08-01.paginators.json +4 -0
  62. package/node_modules/aws-sdk/apis/omics-2022-11-28.min.json +756 -204
  63. package/node_modules/aws-sdk/apis/omics-2022-11-28.paginators.json +12 -0
  64. package/node_modules/aws-sdk/apis/omics-2022-11-28.waiters2.json +48 -0
  65. package/node_modules/aws-sdk/apis/payment-cryptography-data-2022-02-03.min.json +29 -16
  66. package/node_modules/aws-sdk/apis/pca-connector-ad-2018-05-10.examples.json +5 -0
  67. package/node_modules/aws-sdk/apis/pca-connector-ad-2018-05-10.min.json +1465 -0
  68. package/node_modules/aws-sdk/apis/pca-connector-ad-2018-05-10.paginators.json +34 -0
  69. package/node_modules/aws-sdk/apis/pi-2018-02-27.min.json +304 -11
  70. package/node_modules/aws-sdk/apis/pi-2018-02-27.paginators.json +5 -0
  71. package/node_modules/aws-sdk/apis/pinpoint-2016-12-01.examples.json +6 -12
  72. package/node_modules/aws-sdk/apis/pinpoint-2016-12-01.min.json +286 -243
  73. package/node_modules/aws-sdk/apis/quicksight-2018-04-01.min.json +1194 -1134
  74. package/node_modules/aws-sdk/apis/quicksight-2018-04-01.paginators.json +30 -0
  75. package/node_modules/aws-sdk/apis/rds-2014-10-31.min.json +334 -180
  76. package/node_modules/aws-sdk/apis/rds-2014-10-31.paginators.json +6 -0
  77. package/node_modules/aws-sdk/apis/rekognition-2016-06-27.examples.json +501 -1
  78. package/node_modules/aws-sdk/apis/resiliencehub-2020-04-30.min.json +354 -125
  79. package/node_modules/aws-sdk/apis/resiliencehub-2020-04-30.paginators.json +5 -0
  80. package/node_modules/aws-sdk/apis/route53domains-2014-05-15.min.json +103 -44
  81. package/node_modules/aws-sdk/apis/runtime.sagemaker-2017-05-13.min.json +104 -0
  82. package/node_modules/aws-sdk/apis/s3-2006-03-01.examples.json +131 -131
  83. package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +868 -726
  84. package/node_modules/aws-sdk/apis/scheduler-2021-06-30.min.json +15 -12
  85. package/node_modules/aws-sdk/apis/securityhub-2018-10-26.min.json +100 -61
  86. package/node_modules/aws-sdk/apis/service-quotas-2019-06-24.min.json +45 -23
  87. package/node_modules/aws-sdk/apis/sesv2-2019-09-27.examples.json +244 -0
  88. package/node_modules/aws-sdk/apis/sesv2-2019-09-27.min.json +491 -153
  89. package/node_modules/aws-sdk/apis/sesv2-2019-09-27.paginators.json +5 -0
  90. package/node_modules/aws-sdk/apis/swf-2012-01-25.min.json +12 -3
  91. package/node_modules/aws-sdk/apis/verifiedpermissions-2021-12-01.min.json +127 -33
  92. package/node_modules/aws-sdk/apis/workspaces-web-2020-07-08.min.json +136 -58
  93. package/node_modules/aws-sdk/clients/acmpca.d.ts +1 -1
  94. package/node_modules/aws-sdk/clients/all.d.ts +2 -0
  95. package/node_modules/aws-sdk/clients/all.js +3 -1
  96. package/node_modules/aws-sdk/clients/amplifyuibuilder.d.ts +48 -4
  97. package/node_modules/aws-sdk/clients/apigateway.d.ts +34 -30
  98. package/node_modules/aws-sdk/clients/appflow.d.ts +30 -0
  99. package/node_modules/aws-sdk/clients/applicationinsights.d.ts +342 -2
  100. package/node_modules/aws-sdk/clients/apprunner.d.ts +5 -5
  101. package/node_modules/aws-sdk/clients/autoscaling.d.ts +13 -2
  102. package/node_modules/aws-sdk/clients/backup.d.ts +131 -8
  103. package/node_modules/aws-sdk/clients/batch.d.ts +9 -9
  104. package/node_modules/aws-sdk/clients/budgets.d.ts +1 -1
  105. package/node_modules/aws-sdk/clients/chimesdkmediapipelines.d.ts +228 -2
  106. package/node_modules/aws-sdk/clients/chimesdkvoice.d.ts +17 -0
  107. package/node_modules/aws-sdk/clients/cleanrooms.d.ts +557 -14
  108. package/node_modules/aws-sdk/clients/cloud9.d.ts +1 -1
  109. package/node_modules/aws-sdk/clients/cloudformation.d.ts +21 -0
  110. package/node_modules/aws-sdk/clients/cloudfront.d.ts +14 -10
  111. package/node_modules/aws-sdk/clients/cloudtrail.d.ts +4 -4
  112. package/node_modules/aws-sdk/clients/cloudwatch.d.ts +7 -7
  113. package/node_modules/aws-sdk/clients/cloudwatchevents.d.ts +2 -2
  114. package/node_modules/aws-sdk/clients/codecommit.d.ts +87 -30
  115. package/node_modules/aws-sdk/clients/codestarconnections.d.ts +10 -10
  116. package/node_modules/aws-sdk/clients/cognitoidentityserviceprovider.d.ts +292 -215
  117. package/node_modules/aws-sdk/clients/computeoptimizer.d.ts +214 -2
  118. package/node_modules/aws-sdk/clients/configservice.d.ts +1 -1
  119. package/node_modules/aws-sdk/clients/connect.d.ts +609 -17
  120. package/node_modules/aws-sdk/clients/connectcampaigns.d.ts +30 -23
  121. package/node_modules/aws-sdk/clients/connectparticipant.d.ts +71 -2
  122. package/node_modules/aws-sdk/clients/costexplorer.d.ts +10 -2
  123. package/node_modules/aws-sdk/clients/customerprofiles.d.ts +56 -52
  124. package/node_modules/aws-sdk/clients/datasync.d.ts +170 -39
  125. package/node_modules/aws-sdk/clients/dms.d.ts +1289 -13
  126. package/node_modules/aws-sdk/clients/drs.d.ts +1 -1
  127. package/node_modules/aws-sdk/clients/ec2.d.ts +87 -20
  128. package/node_modules/aws-sdk/clients/ecs.d.ts +32 -32
  129. package/node_modules/aws-sdk/clients/elasticache.d.ts +22 -1
  130. package/node_modules/aws-sdk/clients/elbv2.d.ts +23 -9
  131. package/node_modules/aws-sdk/clients/finspace.d.ts +73 -5
  132. package/node_modules/aws-sdk/clients/fsx.d.ts +108 -33
  133. package/node_modules/aws-sdk/clients/gamelift.d.ts +91 -91
  134. package/node_modules/aws-sdk/clients/globalaccelerator.d.ts +12 -12
  135. package/node_modules/aws-sdk/clients/glue.d.ts +40 -2
  136. package/node_modules/aws-sdk/clients/guardduty.d.ts +4 -4
  137. package/node_modules/aws-sdk/clients/health.d.ts +86 -2
  138. package/node_modules/aws-sdk/clients/identitystore.d.ts +26 -26
  139. package/node_modules/aws-sdk/clients/inspector2.d.ts +101 -2
  140. package/node_modules/aws-sdk/clients/internetmonitor.d.ts +49 -26
  141. package/node_modules/aws-sdk/clients/ivs.d.ts +4 -4
  142. package/node_modules/aws-sdk/clients/ivsrealtime.d.ts +2 -2
  143. package/node_modules/aws-sdk/clients/kafka.d.ts +204 -0
  144. package/node_modules/aws-sdk/clients/kafkaconnect.d.ts +6 -8
  145. package/node_modules/aws-sdk/clients/kinesisvideo.d.ts +2 -2
  146. package/node_modules/aws-sdk/clients/kinesisvideoarchivedmedia.d.ts +16 -16
  147. package/node_modules/aws-sdk/clients/lookoutequipment.d.ts +522 -15
  148. package/node_modules/aws-sdk/clients/mediaconvert.d.ts +52 -6
  149. package/node_modules/aws-sdk/clients/medialive.d.ts +35 -0
  150. package/node_modules/aws-sdk/clients/mediapackage.d.ts +3 -2
  151. package/node_modules/aws-sdk/clients/mediatailor.d.ts +2 -2
  152. package/node_modules/aws-sdk/clients/neptunedata.d.ts +1976 -0
  153. package/node_modules/aws-sdk/clients/neptunedata.js +18 -0
  154. package/node_modules/aws-sdk/clients/networkfirewall.d.ts +9 -9
  155. package/node_modules/aws-sdk/clients/omics.d.ts +619 -21
  156. package/node_modules/aws-sdk/clients/organizations.d.ts +55 -55
  157. package/node_modules/aws-sdk/clients/paymentcryptographydata.d.ts +8 -6
  158. package/node_modules/aws-sdk/clients/pcaconnectorad.d.ts +1606 -0
  159. package/node_modules/aws-sdk/clients/pcaconnectorad.js +18 -0
  160. package/node_modules/aws-sdk/clients/pi.d.ts +382 -5
  161. package/node_modules/aws-sdk/clients/pinpoint.d.ts +69 -5
  162. package/node_modules/aws-sdk/clients/polly.d.ts +2 -2
  163. package/node_modules/aws-sdk/clients/quicksight.d.ts +148 -33
  164. package/node_modules/aws-sdk/clients/rds.d.ts +255 -23
  165. package/node_modules/aws-sdk/clients/rekognition.d.ts +19 -19
  166. package/node_modules/aws-sdk/clients/resiliencehub.d.ts +588 -274
  167. package/node_modules/aws-sdk/clients/route53.d.ts +9 -9
  168. package/node_modules/aws-sdk/clients/route53domains.d.ts +9 -3
  169. package/node_modules/aws-sdk/clients/sagemaker.d.ts +227 -22
  170. package/node_modules/aws-sdk/clients/sagemakerruntime.d.ts +86 -8
  171. package/node_modules/aws-sdk/clients/scheduler.d.ts +16 -3
  172. package/node_modules/aws-sdk/clients/securityhub.d.ts +68 -4
  173. package/node_modules/aws-sdk/clients/servicecatalog.d.ts +16 -16
  174. package/node_modules/aws-sdk/clients/servicequotas.d.ts +138 -80
  175. package/node_modules/aws-sdk/clients/ses.d.ts +158 -158
  176. package/node_modules/aws-sdk/clients/sesv2.d.ts +374 -3
  177. package/node_modules/aws-sdk/clients/sqs.d.ts +9 -9
  178. package/node_modules/aws-sdk/clients/swf.d.ts +18 -1
  179. package/node_modules/aws-sdk/clients/transfer.d.ts +12 -12
  180. package/node_modules/aws-sdk/clients/verifiedpermissions.d.ts +27 -27
  181. package/node_modules/aws-sdk/clients/workspacesweb.d.ts +69 -16
  182. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
  183. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +152 -78
  184. package/node_modules/aws-sdk/dist/aws-sdk.js +3098 -1970
  185. package/node_modules/aws-sdk/dist/aws-sdk.min.js +101 -101
  186. package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +4 -0
  187. package/node_modules/aws-sdk/lib/core.js +1 -1
  188. package/node_modules/aws-sdk/lib/token/sso_token_provider.js +3 -3
  189. package/node_modules/aws-sdk/package.json +1 -1
  190. package/package.json +13 -13
@@ -165,11 +165,11 @@ declare class ECS extends Service {
165
165
  */
166
166
  describeTaskSets(callback?: (err: AWSError, data: ECS.Types.DescribeTaskSetsResponse) => void): Request<ECS.Types.DescribeTaskSetsResponse, AWSError>;
167
167
  /**
168
- * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour.
168
+ * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are not included in the response.
169
169
  */
170
170
  describeTasks(params: ECS.Types.DescribeTasksRequest, callback?: (err: AWSError, data: ECS.Types.DescribeTasksResponse) => void): Request<ECS.Types.DescribeTasksResponse, AWSError>;
171
171
  /**
172
- * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour.
172
+ * Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are not included in the response.
173
173
  */
174
174
  describeTasks(callback?: (err: AWSError, data: ECS.Types.DescribeTasksResponse) => void): Request<ECS.Types.DescribeTasksResponse, AWSError>;
175
175
  /**
@@ -269,19 +269,19 @@ declare class ECS extends Service {
269
269
  */
270
270
  listTaskDefinitions(callback?: (err: AWSError, data: ECS.Types.ListTaskDefinitionsResponse) => void): Request<ECS.Types.ListTaskDefinitionsResponse, AWSError>;
271
271
  /**
272
- * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results. Currently, stopped tasks appear in the returned results for at least one hour.
272
+ * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results.
273
273
  */
274
274
  listTasks(params: ECS.Types.ListTasksRequest, callback?: (err: AWSError, data: ECS.Types.ListTasksResponse) => void): Request<ECS.Types.ListTasksResponse, AWSError>;
275
275
  /**
276
- * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results. Currently, stopped tasks appear in the returned results for at least one hour.
276
+ * Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task. Recently stopped tasks might appear in the returned results.
277
277
  */
278
278
  listTasks(callback?: (err: AWSError, data: ECS.Types.ListTasksResponse) => void): Request<ECS.Types.ListTasksResponse, AWSError>;
279
279
  /**
280
- * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When containerInsights is specified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
280
+ * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When you specify serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When you specify awsvpcTrunking, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When you specify containerInsights, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide. When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
281
281
  */
282
282
  putAccountSetting(params: ECS.Types.PutAccountSettingRequest, callback?: (err: AWSError, data: ECS.Types.PutAccountSettingResponse) => void): Request<ECS.Types.PutAccountSettingResponse, AWSError>;
283
283
  /**
284
- * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When containerInsights is specified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
284
+ * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When you specify serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When you specify awsvpcTrunking, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When you specify containerInsights, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide. When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
285
285
  */
286
286
  putAccountSetting(callback?: (err: AWSError, data: ECS.Types.PutAccountSettingResponse) => void): Request<ECS.Types.PutAccountSettingResponse, AWSError>;
287
287
  /**
@@ -429,11 +429,11 @@ declare class ECS extends Service {
429
429
  */
430
430
  updateContainerInstancesState(callback?: (err: AWSError, data: ECS.Types.UpdateContainerInstancesStateResponse) => void): Request<ECS.Types.UpdateContainerInstancesStateResponse, AWSError>;
431
431
  /**
432
- * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties. If you specified a custom role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide. loadBalancers, serviceRegistries
432
+ * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role .
433
433
  */
434
434
  updateService(params: ECS.Types.UpdateServiceRequest, callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
435
435
  /**
436
- * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties. If you specified a custom role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide. loadBalancers, serviceRegistries
436
+ * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role .
437
437
  */
438
438
  updateService(callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
439
439
  /**
@@ -549,7 +549,7 @@ declare namespace ECS {
549
549
  export type Attributes = Attribute[];
550
550
  export interface AutoScalingGroupProvider {
551
551
  /**
552
- * The Amazon Resource Name (ARN) that identifies the Auto Scaling group.
552
+ * The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.
553
553
  */
554
554
  autoScalingGroupArn: String;
555
555
  /**
@@ -888,11 +888,11 @@ declare namespace ECS {
888
888
  */
889
889
  dependsOn?: ContainerDependencies;
890
890
  /**
891
- * Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state. When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
891
+ * Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state. When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values are 2-120 seconds.
892
892
  */
893
893
  startTimeout?: BoxedInteger;
894
894
  /**
895
- * Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used. For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
895
+ * Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used. For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values are 2-120 seconds.
896
896
  */
897
897
  stopTimeout?: BoxedInteger;
898
898
  /**
@@ -960,7 +960,7 @@ declare namespace ECS {
960
960
  */
961
961
  healthCheck?: HealthCheck;
962
962
  /**
963
- * A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. We don't recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that's started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.
963
+ * A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections. We don't recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that's started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.
964
964
  */
965
965
  systemControls?: SystemControls;
966
966
  /**
@@ -972,7 +972,7 @@ declare namespace ECS {
972
972
  */
973
973
  firelensConfiguration?: FirelensConfiguration;
974
974
  /**
975
- * A list of ARNs in SSM or Amazon S3 to a credential spec (credspeccode&gt;) file that configures a container for Active Directory authentication. This parameter is only used with domainless authentication. The format for each ARN is credentialspecdomainless:MyARN. Replace MyARN with the ARN in SSM or Amazon S3. The credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.
975
+ * A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the dockerSecurityOptions. The maximum number of ARNs is 1. There are two formats for each ARN. credentialspecdomainless:MyARN You use credentialspecdomainless:MyARN to provide a CredSpec with an additional section for a secret in Secrets Manager. You provide the login credentials to the domain in the secret. Each task that runs on any container instance can join different domains. You can use this format without joining the container instance to a domain. credentialspec:MyARN You use credentialspec:MyARN to provide a CredSpec for a single domain. You must join the container instance to the domain before you start any tasks that use this task definition. In both formats, replace MyARN with the ARN in SSM or Amazon S3. If you provide a credentialspecdomainless:MyARN, the credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.
976
976
  */
977
977
  credentialSpecs?: StringList;
978
978
  }
@@ -1030,7 +1030,7 @@ declare namespace ECS {
1030
1030
  */
1031
1031
  agentConnected?: Boolean;
1032
1032
  /**
1033
- * The number of tasks on the container instance that are in the RUNNING status.
1033
+ * The number of tasks on the container instance that have a desired status (desiredStatus) of RUNNING.
1034
1034
  */
1035
1035
  runningTasksCount?: Integer;
1036
1036
  /**
@@ -1313,7 +1313,7 @@ declare namespace ECS {
1313
1313
  */
1314
1314
  externalId?: String;
1315
1315
  /**
1316
- * The task definition for the tasks in the task set to use.
1316
+ * The task definition for the tasks in the task set to use. If a revision isn't specified, the latest ACTIVE revision is used.
1317
1317
  */
1318
1318
  taskDefinition: String;
1319
1319
  /**
@@ -2518,11 +2518,11 @@ declare namespace ECS {
2518
2518
  }
2519
2519
  export interface LoadBalancer {
2520
2520
  /**
2521
- * The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a Classic Load Balancer, omit the target group ARN. For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide. If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.
2521
+ * The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide. If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.
2522
2522
  */
2523
2523
  targetGroupArn?: String;
2524
2524
  /**
2525
- * The name of the load balancer to associate with the Amazon ECS service or task set. A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.
2525
+ * The name of the load balancer to associate with the Amazon ECS service or task set. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.
2526
2526
  */
2527
2527
  loadBalancerName?: String;
2528
2528
  /**
@@ -2605,7 +2605,7 @@ declare namespace ECS {
2605
2605
  */
2606
2606
  minimumScalingStepSize?: ManagedScalingStepSize;
2607
2607
  /**
2608
- * The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 1 is used.
2608
+ * The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 10000 is used.
2609
2609
  */
2610
2610
  maximumScalingStepSize?: ManagedScalingStepSize;
2611
2611
  /**
@@ -2726,7 +2726,7 @@ declare namespace ECS {
2726
2726
  */
2727
2727
  containerPort?: BoxedInteger;
2728
2728
  /**
2729
- * The port number on the container instance to reserve for your container. If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows: For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy. For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy. If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort. If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version. The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range. The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.
2729
+ * The port number on the container instance to reserve for your container. If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows: For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy. For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy. If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort. If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version. The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range. The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.
2730
2730
  */
2731
2731
  hostPort?: BoxedInteger;
2732
2732
  /**
@@ -2782,11 +2782,11 @@ declare namespace ECS {
2782
2782
  export type ProxyConfigurationType = "APPMESH"|string;
2783
2783
  export interface PutAccountSettingDefaultRequest {
2784
2784
  /**
2785
- * The resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If tagResourceAuthorization is specified, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide.
2785
+ * The resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the ENI limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the default wait time to retire a Fargate task due to required maintenance is affected. When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide. When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to set the wait time to retire a Fargate task to the default. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
2786
2786
  */
2787
2787
  name: SettingName;
2788
2788
  /**
2789
- * The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.
2789
+ * The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off. When you specify fargateTaskRetirementWaitPeriod for the name, the following are the valid values: 0 - immediately retire the tasks and patch Fargate There is no advanced notification. Your tasks are retired immediately, and Fargate is patched without any notification. 7 -wait 7 calendar days to retire the tasks and patch Fargate 14 - wait 14 calendar days to retire the tasks and patch Fargate
2790
2790
  */
2791
2791
  value: String;
2792
2792
  }
@@ -2798,15 +2798,15 @@ declare namespace ECS {
2798
2798
  }
2799
2799
  export interface PutAccountSettingRequest {
2800
2800
  /**
2801
- * The Amazon ECS resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If fargateFIPSMode is specified, Fargate FIPS 140 compliance is affected. If tagResourceAuthorization is specified, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide.
2801
+ * The Amazon ECS resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the wait time to retire a Fargate task is affected.
2802
2802
  */
2803
2803
  name: SettingName;
2804
2804
  /**
2805
- * The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.
2805
+ * The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off. When you specify fargateTaskRetirementWaitPeriod for the name, the following are the valid values: 0 - immediately retire the tasks and patch Fargate There is no advanced notification. Your tasks are retired immediately, and Fargate is patched without any notification. 7 -wait 7 calendar days to retire the tasks and patch Fargate 14 - wait 14 calendar days to retire the tasks and patch Fargate
2806
2806
  */
2807
2807
  value: String;
2808
2808
  /**
2809
- * The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it modifies the account setting for all users, roles, and the root user of the account unless a user or role explicitly overrides these settings. If this field is omitted, the setting is changed only for the authenticated user. Federated users assume the account setting of the root user and can't have explicit account settings set for them.
2809
+ * The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it modifies the account setting for all users, roles, and the root user of the account unless a user or role explicitly overrides these settings. If this field is omitted, the setting is changed only for the authenticated user. You must use the root user when you set the Fargate wait time (fargateTaskRetirementWaitPeriod). Federated users assume the account setting of the root user and can't have explicit account settings set for them.
2810
2810
  */
2811
2811
  principalArn?: String;
2812
2812
  }
@@ -2942,7 +2942,7 @@ declare namespace ECS {
2942
2942
  */
2943
2943
  tags?: Tags;
2944
2944
  /**
2945
- * The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference. If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security. This parameter is not supported for Windows containers or tasks run on Fargate.
2945
+ * The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.
2946
2946
  */
2947
2947
  pidMode?: PidMode;
2948
2948
  /**
@@ -3384,7 +3384,7 @@ declare namespace ECS {
3384
3384
  */
3385
3385
  principalArn?: String;
3386
3386
  }
3387
- export type SettingName = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights"|"fargateFIPSMode"|"tagResourceAuthorization"|string;
3387
+ export type SettingName = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights"|"fargateFIPSMode"|"tagResourceAuthorization"|"fargateTaskRetirementWaitPeriod"|string;
3388
3388
  export type Settings = Setting[];
3389
3389
  export type SortOrder = "ASC"|"DESC"|string;
3390
3390
  export type StabilityStatus = "STEADY_STATE"|"STABILIZING"|string;
@@ -3582,7 +3582,7 @@ declare namespace ECS {
3582
3582
  */
3583
3583
  namespace?: String;
3584
3584
  /**
3585
- * The value for the namespaced kernel parameter that's specified in namespace.
3585
+ * The namespaced kernel parameter to set a value for. Valid IPC namespace values: "kernel.msgmax" | "kernel.msgmnb" | "kernel.msgmni" | "kernel.sem" | "kernel.shmall" | "kernel.shmmax" | "kernel.shmmni" | "kernel.shm_rmid_forced", and Sysctls that start with "fs.mqueue.*" Valid network namespace values: Sysctls that start with "net.*" All of these values are supported by Fargate.
3586
3586
  */
3587
3587
  value?: String;
3588
3588
  }
@@ -3724,7 +3724,7 @@ declare namespace ECS {
3724
3724
  */
3725
3725
  startedBy?: String;
3726
3726
  /**
3727
- * The stop code indicating why a task was stopped. The stoppedReason might contain additional details. The following are valid values: TaskFailedToStart EssentialContainerExited UserInitiated TerminationNotice ServiceSchedulerInitiated SpotInterruption
3727
+ * The stop code indicating why a task was stopped. The stoppedReason might contain additional details. For more information about stop code, see Stopped tasks error codes in the Amazon ECS User Guide. The following are valid values: TaskFailedToStart EssentialContainerExited UserInitiated TerminationNotice ServiceSchedulerInitiated SpotInterruption
3728
3728
  */
3729
3729
  stopCode?: TaskStopCode;
3730
3730
  /**
@@ -3736,7 +3736,7 @@ declare namespace ECS {
3736
3736
  */
3737
3737
  stoppedReason?: String;
3738
3738
  /**
3739
- * The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPED.
3739
+ * The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPING.
3740
3740
  */
3741
3741
  stoppingAt?: Timestamp;
3742
3742
  /**
@@ -3814,7 +3814,7 @@ declare namespace ECS {
3814
3814
  */
3815
3815
  runtimePlatform?: RuntimePlatform;
3816
3816
  /**
3817
- * The task launch types the task definition was validated against. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
3817
+ * The task launch types the task definition was validated against. The valid values are EC2, FARGATE, and EXTERNAL. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
3818
3818
  */
3819
3819
  requiresCompatibilities?: CompatibilityList;
3820
3820
  /**
@@ -3830,7 +3830,7 @@ declare namespace ECS {
3830
3830
  */
3831
3831
  inferenceAccelerators?: InferenceAccelerators;
3832
3832
  /**
3833
- * The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference. If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security. This parameter is not supported for Windows containers or tasks run on Fargate.
3833
+ * The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.
3834
3834
  */
3835
3835
  pidMode?: PidMode;
3836
3836
  /**
@@ -4319,7 +4319,7 @@ declare namespace ECS {
4319
4319
  }
4320
4320
  export interface Volume {
4321
4321
  /**
4322
- * The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
4322
+ * The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints. This is required wwhen you use an Amazon EFS volume.
4323
4323
  */
4324
4324
  name?: String;
4325
4325
  /**
@@ -532,6 +532,14 @@ declare class ElastiCache extends Service {
532
532
  * Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group &lt;node-group-id&gt; Cache cluster message: Failover from primary node &lt;primary-node-id&gt; to replica node &lt;node-id&gt; completed Replication group message: Failover from primary node &lt;primary-node-id&gt; to replica node &lt;node-id&gt; completed Cache cluster message: Recovering cache nodes &lt;node-id&gt; Cache cluster message: Finished recovery for cache nodes &lt;node-id&gt; For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide.
533
533
  */
534
534
  testFailover(callback?: (err: AWSError, data: ElastiCache.Types.TestFailoverResult) => void): Request<ElastiCache.Types.TestFailoverResult, AWSError>;
535
+ /**
536
+ * Async API to test connection between source and target replication group.
537
+ */
538
+ testMigration(params: ElastiCache.Types.TestMigrationMessage, callback?: (err: AWSError, data: ElastiCache.Types.TestMigrationResponse) => void): Request<ElastiCache.Types.TestMigrationResponse, AWSError>;
539
+ /**
540
+ * Async API to test connection between source and target replication group.
541
+ */
542
+ testMigration(callback?: (err: AWSError, data: ElastiCache.Types.TestMigrationResponse) => void): Request<ElastiCache.Types.TestMigrationResponse, AWSError>;
535
543
  /**
536
544
  * Waits for the cacheClusterAvailable state by periodically calling the underlying ElastiCache.describeCacheClustersoperation every 15 seconds (at most 40 times). Wait until ElastiCache cluster is available.
537
545
  */
@@ -2413,7 +2421,7 @@ declare namespace ElastiCache {
2413
2421
  */
2414
2422
  GlobalReplicationGroupId: String;
2415
2423
  /**
2416
- * The number of node groups you wish to add
2424
+ * Total number of node groups you want
2417
2425
  */
2418
2426
  NodeGroupCount: Integer;
2419
2427
  /**
@@ -3872,6 +3880,19 @@ declare namespace ElastiCache {
3872
3880
  export interface TestFailoverResult {
3873
3881
  ReplicationGroup?: ReplicationGroup;
3874
3882
  }
3883
+ export interface TestMigrationMessage {
3884
+ /**
3885
+ * The ID of the replication group to which data is to be migrated.
3886
+ */
3887
+ ReplicationGroupId: String;
3888
+ /**
3889
+ * List of endpoints from which data should be migrated. List should have only one element.
3890
+ */
3891
+ CustomerNodeEndpointList: CustomerNodeEndpointList;
3892
+ }
3893
+ export interface TestMigrationResponse {
3894
+ ReplicationGroup?: ReplicationGroup;
3895
+ }
3875
3896
  export interface TimeRangeFilter {
3876
3897
  /**
3877
3898
  * The start time of the time range filter
@@ -93,11 +93,11 @@ declare class ELBv2 extends Service {
93
93
  */
94
94
  deleteTargetGroup(callback?: (err: AWSError, data: ELBv2.Types.DeleteTargetGroupOutput) => void): Request<ELBv2.Types.DeleteTargetGroupOutput, AWSError>;
95
95
  /**
96
- * Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.
96
+ * Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer. Note: If the specified target does not exist, the action returns successfully.
97
97
  */
98
98
  deregisterTargets(params: ELBv2.Types.DeregisterTargetsInput, callback?: (err: AWSError, data: ELBv2.Types.DeregisterTargetsOutput) => void): Request<ELBv2.Types.DeregisterTargetsOutput, AWSError>;
99
99
  /**
100
- * Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.
100
+ * Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer. Note: If the specified target does not exist, the action returns successfully.
101
101
  */
102
102
  deregisterTargets(callback?: (err: AWSError, data: ELBv2.Types.DeregisterTargetsOutput) => void): Request<ELBv2.Types.DeregisterTargetsOutput, AWSError>;
103
103
  /**
@@ -269,11 +269,11 @@ declare class ELBv2 extends Service {
269
269
  */
270
270
  setRulePriorities(callback?: (err: AWSError, data: ELBv2.Types.SetRulePrioritiesOutput) => void): Request<ELBv2.Types.SetRulePrioritiesOutput, AWSError>;
271
271
  /**
272
- * Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups. You can't specify a security group for a Network Load Balancer or Gateway Load Balancer.
272
+ * Associates the specified security groups with the specified Application Load Balancer or Network Load Balancer. The specified security groups override the previously associated security groups. You can't perform this operation on a Network Load Balancer unless you specified a security group for the load balancer when you created it. You can't associate a security group with a Gateway Load Balancer.
273
273
  */
274
274
  setSecurityGroups(params: ELBv2.Types.SetSecurityGroupsInput, callback?: (err: AWSError, data: ELBv2.Types.SetSecurityGroupsOutput) => void): Request<ELBv2.Types.SetSecurityGroupsOutput, AWSError>;
275
275
  /**
276
- * Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups. You can't specify a security group for a Network Load Balancer or Gateway Load Balancer.
276
+ * Associates the specified security groups with the specified Application Load Balancer or Network Load Balancer. The specified security groups override the previously associated security groups. You can't perform this operation on a Network Load Balancer unless you specified a security group for the load balancer when you created it. You can't associate a security group with a Gateway Load Balancer.
277
277
  */
278
278
  setSecurityGroups(callback?: (err: AWSError, data: ELBv2.Types.SetSecurityGroupsOutput) => void): Request<ELBv2.Types.SetSecurityGroupsOutput, AWSError>;
279
279
  /**
@@ -602,7 +602,7 @@ declare namespace ELBv2 {
602
602
  */
603
603
  SubnetMappings?: SubnetMappings;
604
604
  /**
605
- * [Application Load Balancers] The IDs of the security groups for the load balancer.
605
+ * [Application Load Balancers and Network Load Balancers] The IDs of the security groups for the load balancer.
606
606
  */
607
607
  SecurityGroups?: SecurityGroups;
608
608
  /**
@@ -1025,6 +1025,8 @@ declare namespace ELBv2 {
1025
1025
  TargetHealthDescriptions?: TargetHealthDescriptions;
1026
1026
  }
1027
1027
  export type Description = string;
1028
+ export type EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = string;
1029
+ export type EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum = "on"|"off"|string;
1028
1030
  export interface FixedResponseActionConfig {
1029
1031
  /**
1030
1032
  * The message.
@@ -1188,6 +1190,10 @@ declare namespace ELBv2 {
1188
1190
  * [Application Load Balancers on Outposts] The ID of the customer-owned address pool.
1189
1191
  */
1190
1192
  CustomerOwnedIpv4Pool?: CustomerOwnedIpv4Pool;
1193
+ /**
1194
+ * Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink.
1195
+ */
1196
+ EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic?: EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic;
1191
1197
  }
1192
1198
  export interface LoadBalancerAddress {
1193
1199
  /**
@@ -1599,12 +1605,20 @@ declare namespace ELBv2 {
1599
1605
  * The IDs of the security groups.
1600
1606
  */
1601
1607
  SecurityGroups: SecurityGroups;
1608
+ /**
1609
+ * Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink. The default is on.
1610
+ */
1611
+ EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic?: EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum;
1602
1612
  }
1603
1613
  export interface SetSecurityGroupsOutput {
1604
1614
  /**
1605
1615
  * The IDs of the security groups associated with the load balancer.
1606
1616
  */
1607
1617
  SecurityGroupIds?: SecurityGroups;
1618
+ /**
1619
+ * Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink.
1620
+ */
1621
+ EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic?: EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum;
1608
1622
  }
1609
1623
  export interface SetSubnetsInput {
1610
1624
  /**
@@ -1620,7 +1634,7 @@ declare namespace ELBv2 {
1620
1634
  */
1621
1635
  SubnetMappings?: SubnetMappings;
1622
1636
  /**
1623
- * [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. .
1637
+ * [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.
1624
1638
  */
1625
1639
  IpAddressType?: IpAddressType;
1626
1640
  }
@@ -1718,7 +1732,7 @@ declare namespace ELBv2 {
1718
1732
  */
1719
1733
  Id: TargetId;
1720
1734
  /**
1721
- * The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is alb, the targeted Application Load Balancer must have at least one listener whose port matches the target group port. Not used if the target is a Lambda function.
1735
+ * The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is alb, the targeted Application Load Balancer must have at least one listener whose port matches the target group port. This parameter is not used if the target is a Lambda function.
1722
1736
  */
1723
1737
  Port?: Port;
1724
1738
  /**
@@ -1741,7 +1755,7 @@ declare namespace ELBv2 {
1741
1755
  */
1742
1756
  Protocol?: ProtocolEnum;
1743
1757
  /**
1744
- * The port on which the targets are listening. Not used if the target is a Lambda function.
1758
+ * The port on which the targets are listening. This parameter is not used if the target is a Lambda function.
1745
1759
  */
1746
1760
  Port?: Port;
1747
1761
  /**
@@ -1785,7 +1799,7 @@ declare namespace ELBv2 {
1785
1799
  */
1786
1800
  Matcher?: Matcher;
1787
1801
  /**
1788
- * The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
1802
+ * The Amazon Resource Name (ARN) of the load balancer that routes traffic to this target group. You can use each target group with only one load balancer.
1789
1803
  */
1790
1804
  LoadBalancerArns?: LoadBalancerArns;
1791
1805
  /**