cdk-lambda-subminute 2.0.411 → 2.0.413
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +4 -4
- package/lib/cdk-lambda-subminute.js +3 -3
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/codebuild-2016-10-06.min.json +2 -1
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +129 -70
- package/node_modules/aws-sdk/apis/globalaccelerator-2018-08-08.min.json +2 -3
- package/node_modules/aws-sdk/apis/medialive-2017-10-14.min.json +219 -199
- package/node_modules/aws-sdk/apis/securityhub-2018-10-26.min.json +4 -3
- package/node_modules/aws-sdk/clients/codebuild.d.ts +16 -12
- package/node_modules/aws-sdk/clients/ec2.d.ts +85 -7
- package/node_modules/aws-sdk/clients/ecs.d.ts +15 -15
- package/node_modules/aws-sdk/clients/firehose.d.ts +1 -1
- package/node_modules/aws-sdk/clients/globalaccelerator.d.ts +41 -33
- package/node_modules/aws-sdk/clients/kendra.d.ts +33 -33
- package/node_modules/aws-sdk/clients/medialive.d.ts +37 -0
- package/node_modules/aws-sdk/clients/rolesanywhere.d.ts +5 -5
- package/node_modules/aws-sdk/clients/sagemaker.d.ts +1 -1
- package/node_modules/aws-sdk/clients/securityhub.d.ts +9 -5
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +6 -6
- package/node_modules/aws-sdk/dist/aws-sdk.js +134 -74
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +71 -71
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +5 -5
| @@ -2836,14 +2836,14 @@ | |
| 2836 2836 | 
             
                      "Lte": {
         | 
| 2837 2837 | 
             
                        "type": "double"
         | 
| 2838 2838 | 
             
                      },
         | 
| 2839 | 
            -
                      "Eq": {
         | 
| 2840 | 
            -
                        "type": "double"
         | 
| 2841 | 
            -
                      },
         | 
| 2842 2839 | 
             
                      "Gt": {
         | 
| 2843 2840 | 
             
                        "type": "double"
         | 
| 2844 2841 | 
             
                      },
         | 
| 2845 2842 | 
             
                      "Lt": {
         | 
| 2846 2843 | 
             
                        "type": "double"
         | 
| 2844 | 
            +
                      },
         | 
| 2845 | 
            +
                      "Eq": {
         | 
| 2846 | 
            +
                        "type": "double"
         | 
| 2847 2847 | 
             
                      }
         | 
| 2848 2848 | 
             
                    }
         | 
| 2849 2849 | 
             
                  }
         | 
| @@ -9445,6 +9445,7 @@ | |
| 9445 9445 | 
             
                            "type": "double"
         | 
| 9446 9446 | 
             
                          },
         | 
| 9447 9447 | 
             
                          "ExploitAvailable": {},
         | 
| 9448 | 
            +
                          "LastKnownExploitAt": {},
         | 
| 9448 9449 | 
             
                          "CodeVulnerabilities": {
         | 
| 9449 9450 | 
             
                            "type": "list",
         | 
| 9450 9451 | 
             
                            "member": {
         | 
| @@ -416,7 +416,7 @@ declare namespace CodeBuild { | |
| 416 416 | 
             
              export type ArtifactNamespace = "NONE"|"BUILD_ID"|string;
         | 
| 417 417 | 
             
              export type ArtifactPackaging = "NONE"|"ZIP"|string;
         | 
| 418 418 | 
             
              export type ArtifactsType = "CODEPIPELINE"|"S3"|"NO_ARTIFACTS"|string;
         | 
| 419 | 
            -
              export type AuthType = "OAUTH"|"BASIC_AUTH"|"PERSONAL_ACCESS_TOKEN"|string;
         | 
| 419 | 
            +
              export type AuthType = "OAUTH"|"BASIC_AUTH"|"PERSONAL_ACCESS_TOKEN"|"CODECONNECTIONS"|string;
         | 
| 420 420 | 
             
              export interface BatchDeleteBuildsInput {
         | 
| 421 421 | 
             
                /**
         | 
| 422 422 | 
             
                 * The IDs of the builds to delete.
         | 
| @@ -1493,7 +1493,7 @@ declare namespace CodeBuild { | |
| 1493 1493 | 
             
              export type FleetSortByType = "NAME"|"CREATED_TIME"|"LAST_MODIFIED_TIME"|string;
         | 
| 1494 1494 | 
             
              export interface FleetStatus {
         | 
| 1495 1495 | 
             
                /**
         | 
| 1496 | 
            -
                 * The status code of the compute fleet. Valid values include:    CREATING: The compute fleet is being created.    UPDATING: The compute fleet is being updated.    ROTATING: The compute fleet is being rotated.    DELETING: The compute fleet is being deleted.    CREATE_FAILED: The compute fleet has failed to create.    UPDATE_ROLLBACK_FAILED: The compute fleet has failed to update and could not rollback to previous state.    ACTIVE: The compute fleet has succeeded and is active.  
         | 
| 1496 | 
            +
                 * The status code of the compute fleet. Valid values include:    CREATING: The compute fleet is being created.    UPDATING: The compute fleet is being updated.    ROTATING: The compute fleet is being rotated.    PENDING_DELETION: The compute fleet is pending deletion.    DELETING: The compute fleet is being deleted.    CREATE_FAILED: The compute fleet has failed to create.    UPDATE_ROLLBACK_FAILED: The compute fleet has failed to update and could not rollback to previous state.    ACTIVE: The compute fleet has succeeded and is active.  
         | 
| 1497 1497 | 
             
                 */
         | 
| 1498 1498 | 
             
                statusCode?: FleetStatusCode;
         | 
| 1499 1499 | 
             
                /**
         | 
| @@ -2251,11 +2251,11 @@ declare namespace CodeBuild { | |
| 2251 2251 | 
             
              export type ProjectSortByType = "NAME"|"CREATED_TIME"|"LAST_MODIFIED_TIME"|string;
         | 
| 2252 2252 | 
             
              export interface ProjectSource {
         | 
| 2253 2253 | 
             
                /**
         | 
| 2254 | 
            -
                 * The type of repository that contains the source code to be built. Valid values include:    BITBUCKET: The source code is in a Bitbucket repository.    CODECOMMIT: The source code is in an CodeCommit repository.    CODEPIPELINE: The source code settings are specified in the source action of a pipeline in CodePipeline.    GITHUB: The source code is in a GitHub  | 
| 2254 | 
            +
                 * The type of repository that contains the source code to be built. Valid values include:    BITBUCKET: The source code is in a Bitbucket repository.    CODECOMMIT: The source code is in an CodeCommit repository.    CODEPIPELINE: The source code settings are specified in the source action of a pipeline in CodePipeline.    GITHUB: The source code is in a GitHub repository.    GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise Server repository.    GITLAB: The source code is in a GitLab repository.    GITLAB_SELF_MANAGED: The source code is in a self-managed GitLab repository.    NO_SOURCE: The project does not have input source code.    S3: The source code is in an Amazon S3 bucket.  
         | 
| 2255 2255 | 
             
                 */
         | 
| 2256 2256 | 
             
                type: SourceType;
         | 
| 2257 2257 | 
             
                /**
         | 
| 2258 | 
            -
                 * Information about the location of the source code to be built. Valid values include:   For source code settings that are specified in the source action of a pipeline in CodePipeline, location should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.   For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>).   For source code in an Amazon S3 input bucket, one of the following.    The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip).    The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/).      For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.   For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.    If you specify CODEPIPELINE for the Type property, don't specify this property. For all of the other types, you must specify Location. 
         | 
| 2258 | 
            +
                 * Information about the location of the source code to be built. Valid values include:   For source code settings that are specified in the source action of a pipeline in CodePipeline, location should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.   For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>).   For source code in an Amazon S3 input bucket, one of the following.    The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip).    The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/).      For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.   For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitLab account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections Authorize application page, choose Authorize. Then on the CodeStar Connections Create GitLab connection page, choose Connect to GitLab. (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to override the default connection and use this connection instead, set the auth object's type value to CODECONNECTIONS in the source object.   For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH.    If you specify CODEPIPELINE for the Type property, don't specify this property. For all of the other types, you must specify Location. 
         | 
| 2259 2259 | 
             
                 */
         | 
| 2260 2260 | 
             
                location?: String;
         | 
| 2261 2261 | 
             
                /**
         | 
| @@ -2275,7 +2275,7 @@ declare namespace CodeBuild { | |
| 2275 2275 | 
             
                 */
         | 
| 2276 2276 | 
             
                auth?: SourceAuth;
         | 
| 2277 2277 | 
             
                /**
         | 
| 2278 | 
            -
                 *  Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.  To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide. The status of a build triggered by a webhook is always reported to your source provider.  If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect.
         | 
| 2278 | 
            +
                 *  Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.  To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide. The status of a build triggered by a webhook is always reported to your source provider.  If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect.
         | 
| 2279 2279 | 
             
                 */
         | 
| 2280 2280 | 
             
                reportBuildStatus?: WrapperBoolean;
         | 
| 2281 2281 | 
             
                /**
         | 
| @@ -2297,7 +2297,7 @@ declare namespace CodeBuild { | |
| 2297 2297 | 
             
                 */
         | 
| 2298 2298 | 
             
                sourceIdentifier: String;
         | 
| 2299 2299 | 
             
                /**
         | 
| 2300 | 
            -
                 * The source version for the corresponding source identifier. If specified, must be one of:   For CodeCommit: the commit ID, branch, or Git tag to use.   For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.   For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.   For Amazon S3: the version ID of the object that represents the build input ZIP file to use.    For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. 
         | 
| 2300 | 
            +
                 * The source version for the corresponding source identifier. If specified, must be one of:   For CodeCommit: the commit ID, branch, or Git tag to use.   For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.   For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.   For Amazon S3: the version ID of the object that represents the build input ZIP file to use.    For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. 
         | 
| 2301 2301 | 
             
                 */
         | 
| 2302 2302 | 
             
                sourceVersion: String;
         | 
| 2303 2303 | 
             
              }
         | 
| @@ -2591,12 +2591,12 @@ declare namespace CodeBuild { | |
| 2591 2591 | 
             
              export type SecurityGroupIds = NonEmptyString[];
         | 
| 2592 2592 | 
             
              export type SensitiveNonEmptyString = string;
         | 
| 2593 2593 | 
             
              export type SensitiveString = string;
         | 
| 2594 | 
            -
              export type ServerType = "GITHUB"|"BITBUCKET"|"GITHUB_ENTERPRISE"|string;
         | 
| 2594 | 
            +
              export type ServerType = "GITHUB"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"GITLAB"|"GITLAB_SELF_MANAGED"|string;
         | 
| 2595 2595 | 
             
              export type SharedResourceSortByType = "ARN"|"MODIFIED_TIME"|string;
         | 
| 2596 2596 | 
             
              export type SortOrderType = "ASCENDING"|"DESCENDING"|string;
         | 
| 2597 2597 | 
             
              export interface SourceAuth {
         | 
| 2598 2598 | 
             
                /**
         | 
| 2599 | 
            -
                 * | 
| 2599 | 
            +
                 * The authorization type to use. Valid options are OAUTH or CODECONNECTIONS.
         | 
| 2600 2600 | 
             
                 */
         | 
| 2601 2601 | 
             
                type: SourceAuthType;
         | 
| 2602 2602 | 
             
                /**
         | 
| @@ -2604,23 +2604,27 @@ declare namespace CodeBuild { | |
| 2604 2604 | 
             
                 */
         | 
| 2605 2605 | 
             
                resource?: String;
         | 
| 2606 2606 | 
             
              }
         | 
| 2607 | 
            -
              export type SourceAuthType = "OAUTH"|string;
         | 
| 2607 | 
            +
              export type SourceAuthType = "OAUTH"|"CODECONNECTIONS"|string;
         | 
| 2608 2608 | 
             
              export interface SourceCredentialsInfo {
         | 
| 2609 2609 | 
             
                /**
         | 
| 2610 2610 | 
             
                 *  The Amazon Resource Name (ARN) of the token. 
         | 
| 2611 2611 | 
             
                 */
         | 
| 2612 2612 | 
             
                arn?: NonEmptyString;
         | 
| 2613 2613 | 
             
                /**
         | 
| 2614 | 
            -
                 *  The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, or BITBUCKET. 
         | 
| 2614 | 
            +
                 *  The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET. 
         | 
| 2615 2615 | 
             
                 */
         | 
| 2616 2616 | 
             
                serverType?: ServerType;
         | 
| 2617 2617 | 
             
                /**
         | 
| 2618 | 
            -
                 *  The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or  | 
| 2618 | 
            +
                 *  The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS. 
         | 
| 2619 2619 | 
             
                 */
         | 
| 2620 2620 | 
             
                authType?: AuthType;
         | 
| 2621 | 
            +
                /**
         | 
| 2622 | 
            +
                 * The connection ARN if your serverType type is GITLAB or GITLAB_SELF_MANAGED and your authType is CODECONNECTIONS.
         | 
| 2623 | 
            +
                 */
         | 
| 2624 | 
            +
                resource?: String;
         | 
| 2621 2625 | 
             
              }
         | 
| 2622 2626 | 
             
              export type SourceCredentialsInfos = SourceCredentialsInfo[];
         | 
| 2623 | 
            -
              export type SourceType = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE"|string;
         | 
| 2627 | 
            +
              export type SourceType = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"GITLAB"|"GITLAB_SELF_MANAGED"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE"|string;
         | 
| 2624 2628 | 
             
              export interface StartBuildBatchInput {
         | 
| 2625 2629 | 
             
                /**
         | 
| 2626 2630 | 
             
                 * The name of the project.
         | 
| @@ -3532,6 +3532,14 @@ declare class EC2 extends Service { | |
| 3532 3532 | 
             
               * Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region. For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.
         | 
| 3533 3533 | 
             
               */
         | 
| 3534 3534 | 
             
              getImageBlockPublicAccessState(callback?: (err: AWSError, data: EC2.Types.GetImageBlockPublicAccessStateResult) => void): Request<EC2.Types.GetImageBlockPublicAccessStateResult, AWSError>;
         | 
| 3535 | 
            +
              /**
         | 
| 3536 | 
            +
               * Gets the default instance metadata service (IMDS) settings that are set at the account level in the specified Amazon Web Services
 Region. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
         | 
| 3537 | 
            +
               */
         | 
| 3538 | 
            +
              getInstanceMetadataDefaults(params: EC2.Types.GetInstanceMetadataDefaultsRequest, callback?: (err: AWSError, data: EC2.Types.GetInstanceMetadataDefaultsResult) => void): Request<EC2.Types.GetInstanceMetadataDefaultsResult, AWSError>;
         | 
| 3539 | 
            +
              /**
         | 
| 3540 | 
            +
               * Gets the default instance metadata service (IMDS) settings that are set at the account level in the specified Amazon Web Services
 Region. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
         | 
| 3541 | 
            +
               */
         | 
| 3542 | 
            +
              getInstanceMetadataDefaults(callback?: (err: AWSError, data: EC2.Types.GetInstanceMetadataDefaultsResult) => void): Request<EC2.Types.GetInstanceMetadataDefaultsResult, AWSError>;
         | 
| 3535 3543 | 
             
              /**
         | 
| 3536 3544 | 
             
               * Returns a list of instance types with the specified instance attributes. You can use the response to preview the instance types without launching instances. Note that the response does not consider capacity. When you specify multiple parameters, you get instance types that satisfy all of the specified parameters. If you specify multiple values for a parameter, you get instance types that satisfy any of the specified values. For more information, see Preview instance types with specified attributes, Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide, and Creating an Auto Scaling group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide.
         | 
| 3537 3545 | 
             
               */
         | 
| @@ -4020,6 +4028,14 @@ declare class EC2 extends Service { | |
| 4020 4028 | 
             
               * Modifies the recovery behavior of your instance to disable simplified automatic recovery or set the recovery behavior to default. The default configuration will not enable simplified automatic recovery for an unsupported instance type. For more information, see Simplified automatic recovery.
         | 
| 4021 4029 | 
             
               */
         | 
| 4022 4030 | 
             
              modifyInstanceMaintenanceOptions(callback?: (err: AWSError, data: EC2.Types.ModifyInstanceMaintenanceOptionsResult) => void): Request<EC2.Types.ModifyInstanceMaintenanceOptionsResult, AWSError>;
         | 
| 4031 | 
            +
              /**
         | 
| 4032 | 
            +
               * Modifies the default instance metadata service (IMDS) settings at the account level in the specified Amazon Web Services
 Region.  To remove a parameter's account-level default setting, specify no-preference. At instance launch, the value will come from the AMI, or from the launch parameter if specified. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide. 
         | 
| 4033 | 
            +
               */
         | 
| 4034 | 
            +
              modifyInstanceMetadataDefaults(params: EC2.Types.ModifyInstanceMetadataDefaultsRequest, callback?: (err: AWSError, data: EC2.Types.ModifyInstanceMetadataDefaultsResult) => void): Request<EC2.Types.ModifyInstanceMetadataDefaultsResult, AWSError>;
         | 
| 4035 | 
            +
              /**
         | 
| 4036 | 
            +
               * Modifies the default instance metadata service (IMDS) settings at the account level in the specified Amazon Web Services
 Region.  To remove a parameter's account-level default setting, specify no-preference. At instance launch, the value will come from the AMI, or from the launch parameter if specified. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide. 
         | 
| 4037 | 
            +
               */
         | 
| 4038 | 
            +
              modifyInstanceMetadataDefaults(callback?: (err: AWSError, data: EC2.Types.ModifyInstanceMetadataDefaultsResult) => void): Request<EC2.Types.ModifyInstanceMetadataDefaultsResult, AWSError>;
         | 
| 4023 4039 | 
             
              /**
         | 
| 4024 4040 | 
             
               * Modify the instance metadata parameters on a running or stopped instance. When you modify the parameters on a stopped instance, they are applied when the instance is started. When you modify the parameters on a running instance, the API responds with a state of “pending”. After the parameter modifications are successfully applied to the instance, the state of the modifications changes from “pending” to “applied” in subsequent describe-instances API calls. For more information, see Instance metadata and user data in the Amazon EC2 User Guide.
         | 
| 4025 4041 | 
             
               */
         | 
| @@ -7221,6 +7237,7 @@ declare namespace EC2 { | |
| 7221 7237 | 
             
              export type BootModeTypeList = BootModeType[];
         | 
| 7222 7238 | 
             
              export type BootModeValues = "legacy-bios"|"uefi"|"uefi-preferred"|string;
         | 
| 7223 7239 | 
             
              export type BoxedDouble = number;
         | 
| 7240 | 
            +
              export type BoxedInteger = number;
         | 
| 7224 7241 | 
             
              export type BundleId = string;
         | 
| 7225 7242 | 
             
              export type BundleIdStringList = BundleId[];
         | 
| 7226 7243 | 
             
              export interface BundleInstanceRequest {
         | 
| @@ -12061,6 +12078,8 @@ declare namespace EC2 { | |
| 12061 12078 | 
             
              export type DedicatedHostFlag = boolean;
         | 
| 12062 12079 | 
             
              export type DedicatedHostId = string;
         | 
| 12063 12080 | 
             
              export type DedicatedHostIdList = DedicatedHostId[];
         | 
| 12081 | 
            +
              export type DefaultInstanceMetadataEndpointState = "disabled"|"enabled"|"no-preference"|string;
         | 
| 12082 | 
            +
              export type DefaultInstanceMetadataTagsState = "disabled"|"enabled"|"no-preference"|string;
         | 
| 12064 12083 | 
             
              export type DefaultNetworkCardIndex = number;
         | 
| 12065 12084 | 
             
              export type DefaultRouteTableAssociationValue = "enable"|"disable"|string;
         | 
| 12066 12085 | 
             
              export type DefaultRouteTablePropagationValue = "enable"|"disable"|string;
         | 
| @@ -21627,6 +21646,18 @@ declare namespace EC2 { | |
| 21627 21646 | 
             
                 */
         | 
| 21628 21647 | 
             
                ImageBlockPublicAccessState?: String;
         | 
| 21629 21648 | 
             
              }
         | 
| 21649 | 
            +
              export interface GetInstanceMetadataDefaultsRequest {
         | 
| 21650 | 
            +
                /**
         | 
| 21651 | 
            +
                 * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
         | 
| 21652 | 
            +
                 */
         | 
| 21653 | 
            +
                DryRun?: Boolean;
         | 
| 21654 | 
            +
              }
         | 
| 21655 | 
            +
              export interface GetInstanceMetadataDefaultsResult {
         | 
| 21656 | 
            +
                /**
         | 
| 21657 | 
            +
                 * The account-level default IMDS settings.
         | 
| 21658 | 
            +
                 */
         | 
| 21659 | 
            +
                AccountLevel?: InstanceMetadataDefaultsResponse;
         | 
| 21660 | 
            +
              }
         | 
| 21630 21661 | 
             
              export interface GetInstanceTypesFromInstanceRequirementsRequest {
         | 
| 21631 21662 | 
             
                /**
         | 
| 21632 21663 | 
             
                 * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
         | 
| @@ -23131,7 +23162,7 @@ declare namespace EC2 { | |
| 23131 23162 | 
             
                 */
         | 
| 23132 23163 | 
             
                Hypervisor?: HypervisorType;
         | 
| 23133 23164 | 
             
                /**
         | 
| 23134 | 
            -
                 * The  | 
| 23165 | 
            +
                 * The owner alias (amazon | aws-marketplace).
         | 
| 23135 23166 | 
             
                 */
         | 
| 23136 23167 | 
             
                ImageOwnerAlias?: String;
         | 
| 23137 23168 | 
             
                /**
         | 
| @@ -24478,14 +24509,32 @@ declare namespace EC2 { | |
| 24478 24509 | 
             
                SpotOptions?: SpotMarketOptions;
         | 
| 24479 24510 | 
             
              }
         | 
| 24480 24511 | 
             
              export type InstanceMatchCriteria = "open"|"targeted"|string;
         | 
| 24512 | 
            +
              export interface InstanceMetadataDefaultsResponse {
         | 
| 24513 | 
            +
                /**
         | 
| 24514 | 
            +
                 * Indicates whether IMDSv2 is required.    optional – IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.    required – IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.  
         | 
| 24515 | 
            +
                 */
         | 
| 24516 | 
            +
                HttpTokens?: HttpTokensState;
         | 
| 24517 | 
            +
                /**
         | 
| 24518 | 
            +
                 * The maximum number of hops that the metadata token can travel.
         | 
| 24519 | 
            +
                 */
         | 
| 24520 | 
            +
                HttpPutResponseHopLimit?: BoxedInteger;
         | 
| 24521 | 
            +
                /**
         | 
| 24522 | 
            +
                 * Indicates whether the IMDS endpoint for an instance is enabled or disabled. When disabled, the instance metadata can't be accessed.
         | 
| 24523 | 
            +
                 */
         | 
| 24524 | 
            +
                HttpEndpoint?: InstanceMetadataEndpointState;
         | 
| 24525 | 
            +
                /**
         | 
| 24526 | 
            +
                 * Indicates whether access to instance tags from the instance metadata is enabled or disabled. For more information, see Work with instance tags using the instance metadata in the Amazon EC2 User Guide.
         | 
| 24527 | 
            +
                 */
         | 
| 24528 | 
            +
                InstanceMetadataTags?: InstanceMetadataTagsState;
         | 
| 24529 | 
            +
              }
         | 
| 24481 24530 | 
             
              export type InstanceMetadataEndpointState = "disabled"|"enabled"|string;
         | 
| 24482 24531 | 
             
              export interface InstanceMetadataOptionsRequest {
         | 
| 24483 24532 | 
             
                /**
         | 
| 24484 | 
            -
                 * Indicates whether IMDSv2 is required.    optional - IMDSv2 is optional | 
| 24533 | 
            +
                 * Indicates whether IMDSv2 is required.    optional - IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.    required - IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.   Default:   If the value of ImdsSupport for the Amazon Machine Image (AMI) for your instance is v2.0 and the account level default is set to no-preference, the default is required.   If the value of ImdsSupport for the Amazon Machine Image (AMI) for your instance is v2.0, but the account level default is set to V1 or V2, the default is optional.   The default value can also be affected by other combinations of parameters. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
         | 
| 24485 24534 | 
             
                 */
         | 
| 24486 24535 | 
             
                HttpTokens?: HttpTokensState;
         | 
| 24487 24536 | 
             
                /**
         | 
| 24488 | 
            -
                 * The  | 
| 24537 | 
            +
                 * The maximum number of hops that the metadata token can travel. Possible values: Integers from 1 to 64
         | 
| 24489 24538 | 
             
                 */
         | 
| 24490 24539 | 
             
                HttpPutResponseHopLimit?: Integer;
         | 
| 24491 24540 | 
             
                /**
         | 
| @@ -24507,11 +24556,11 @@ declare namespace EC2 { | |
| 24507 24556 | 
             
                 */
         | 
| 24508 24557 | 
             
                State?: InstanceMetadataOptionsState;
         | 
| 24509 24558 | 
             
                /**
         | 
| 24510 | 
            -
                 * Indicates whether IMDSv2 is required.    optional - IMDSv2 is optional | 
| 24559 | 
            +
                 * Indicates whether IMDSv2 is required.    optional - IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.    required - IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.  
         | 
| 24511 24560 | 
             
                 */
         | 
| 24512 24561 | 
             
                HttpTokens?: HttpTokensState;
         | 
| 24513 24562 | 
             
                /**
         | 
| 24514 | 
            -
                 * The  | 
| 24563 | 
            +
                 * The maximum number of hops that the metadata token can travel. Possible values: Integers from 1 to 64 
         | 
| 24515 24564 | 
             
                 */
         | 
| 24516 24565 | 
             
                HttpPutResponseHopLimit?: Integer;
         | 
| 24517 24566 | 
             
                /**
         | 
| @@ -27922,6 +27971,7 @@ declare namespace EC2 { | |
| 27922 27971 | 
             
                Max?: Integer;
         | 
| 27923 27972 | 
             
              }
         | 
| 27924 27973 | 
             
              export type MemorySize = number;
         | 
| 27974 | 
            +
              export type MetadataDefaultHttpTokensState = "optional"|"required"|"no-preference"|string;
         | 
| 27925 27975 | 
             
              export interface MetricPoint {
         | 
| 27926 27976 | 
             
                /**
         | 
| 27927 27977 | 
             
                 * The start date for the metric point. The starting date for the metric point. The starting time must be formatted as yyyy-mm-ddThh:mm:ss. For example, 2022-06-10T12:00:00.000Z.
         | 
| @@ -28529,13 +28579,41 @@ declare namespace EC2 { | |
| 28529 28579 | 
             
                 */
         | 
| 28530 28580 | 
             
                AutoRecovery?: InstanceAutoRecoveryState;
         | 
| 28531 28581 | 
             
              }
         | 
| 28582 | 
            +
              export interface ModifyInstanceMetadataDefaultsRequest {
         | 
| 28583 | 
            +
                /**
         | 
| 28584 | 
            +
                 * Indicates whether IMDSv2 is required.    optional – IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.    required – IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.  
         | 
| 28585 | 
            +
                 */
         | 
| 28586 | 
            +
                HttpTokens?: MetadataDefaultHttpTokensState;
         | 
| 28587 | 
            +
                /**
         | 
| 28588 | 
            +
                 * The maximum number of hops that the metadata token can travel. Minimum: 1  Maximum: 64 
         | 
| 28589 | 
            +
                 */
         | 
| 28590 | 
            +
                HttpPutResponseHopLimit?: BoxedInteger;
         | 
| 28591 | 
            +
                /**
         | 
| 28592 | 
            +
                 * Enables or disables the IMDS endpoint on an instance. When disabled, the instance metadata can't be accessed.
         | 
| 28593 | 
            +
                 */
         | 
| 28594 | 
            +
                HttpEndpoint?: DefaultInstanceMetadataEndpointState;
         | 
| 28595 | 
            +
                /**
         | 
| 28596 | 
            +
                 * Enables or disables access to an instance's tags from the instance metadata. For more information, see Work with instance tags using the instance metadata in the Amazon EC2 User Guide.
         | 
| 28597 | 
            +
                 */
         | 
| 28598 | 
            +
                InstanceMetadataTags?: DefaultInstanceMetadataTagsState;
         | 
| 28599 | 
            +
                /**
         | 
| 28600 | 
            +
                 * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
         | 
| 28601 | 
            +
                 */
         | 
| 28602 | 
            +
                DryRun?: Boolean;
         | 
| 28603 | 
            +
              }
         | 
| 28604 | 
            +
              export interface ModifyInstanceMetadataDefaultsResult {
         | 
| 28605 | 
            +
                /**
         | 
| 28606 | 
            +
                 * If the request succeeds, the response returns true. If the request fails, no response is returned, and instead an error message is returned.
         | 
| 28607 | 
            +
                 */
         | 
| 28608 | 
            +
                Return?: Boolean;
         | 
| 28609 | 
            +
              }
         | 
| 28532 28610 | 
             
              export interface ModifyInstanceMetadataOptionsRequest {
         | 
| 28533 28611 | 
             
                /**
         | 
| 28534 28612 | 
             
                 * The ID of the instance.
         | 
| 28535 28613 | 
             
                 */
         | 
| 28536 28614 | 
             
                InstanceId: InstanceId;
         | 
| 28537 28615 | 
             
                /**
         | 
| 28538 | 
            -
                 * Indicates whether IMDSv2 is required.    optional - IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.    required - IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.   Default: | 
| 28616 | 
            +
                 * Indicates whether IMDSv2 is required.    optional - IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.    required - IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.   Default:   If the value of ImdsSupport for the Amazon Machine Image (AMI) for your instance is v2.0 and the account level default is set to no-preference, the default is required.   If the value of ImdsSupport for the Amazon Machine Image (AMI) for your instance is v2.0, but the account level default is set to V1 or V2, the default is optional.   The default value can also be affected by other combinations of parameters. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
         | 
| 28539 28617 | 
             
                 */
         | 
| 28540 28618 | 
             
                HttpTokens?: HttpTokensState;
         | 
| 28541 28619 | 
             
                /**
         | 
| @@ -35026,7 +35104,7 @@ declare namespace EC2 { | |
| 35026 35104 | 
             
                 */
         | 
| 35027 35105 | 
             
                Filters: FilterList;
         | 
| 35028 35106 | 
             
                /**
         | 
| 35029 | 
            -
                 * The maximum number of routes to return.
         | 
| 35107 | 
            +
                 * The maximum number of routes to return. If a value is not provided, the default is 1000.
         | 
| 35030 35108 | 
             
                 */
         | 
| 35031 35109 | 
             
                MaxResults?: TransitGatewayMaxResults;
         | 
| 35032 35110 | 
             
                /**
         | 
| @@ -29,19 +29,19 @@ declare class ECS extends Service { | |
| 29 29 | 
             
               */
         | 
| 30 30 | 
             
              createCluster(callback?: (err: AWSError, data: ECS.Types.CreateClusterResponse) => void): Request<ECS.Types.CreateClusterResponse, AWSError>;
         | 
| 31 31 | 
             
              /**
         | 
| 32 | 
            -
               * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.   | 
| 32 | 
            +
               * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available:    REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.    DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.   You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide  Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. 
         | 
| 33 33 | 
             
               */
         | 
| 34 34 | 
             
              createService(params: ECS.Types.CreateServiceRequest, callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
         | 
| 35 35 | 
             
              /**
         | 
| 36 | 
            -
               * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.   | 
| 36 | 
            +
               * Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available:    REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.    DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.   You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide  Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. 
         | 
| 37 37 | 
             
               */
         | 
| 38 38 | 
             
              createService(callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
         | 
| 39 39 | 
             
              /**
         | 
| 40 | 
            -
               * Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
         | 
| 40 | 
            +
               * Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
         | 
| 41 41 | 
             
               */
         | 
| 42 42 | 
             
              createTaskSet(params: ECS.Types.CreateTaskSetRequest, callback?: (err: AWSError, data: ECS.Types.CreateTaskSetResponse) => void): Request<ECS.Types.CreateTaskSetResponse, AWSError>;
         | 
| 43 43 | 
             
              /**
         | 
| 44 | 
            -
               * Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
         | 
| 44 | 
            +
               * Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
         | 
| 45 45 | 
             
               */
         | 
| 46 46 | 
             
              createTaskSet(callback?: (err: AWSError, data: ECS.Types.CreateTaskSetResponse) => void): Request<ECS.Types.CreateTaskSetResponse, AWSError>;
         | 
| 47 47 | 
             
              /**
         | 
| @@ -325,19 +325,19 @@ declare class ECS extends Service { | |
| 325 325 | 
             
               */
         | 
| 326 326 | 
             
              registerTaskDefinition(callback?: (err: AWSError, data: ECS.Types.RegisterTaskDefinitionResponse) => void): Request<ECS.Types.RegisterTaskDefinitionResponse, AWSError>;
         | 
| 327 327 | 
             
              /**
         | 
| 328 | 
            -
               * Starts a new task using the specified task definition. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. | 
| 328 | 
            +
               * Starts a new task using the specified task definition.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.  You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following:   Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.   Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.  
         | 
| 329 329 | 
             
               */
         | 
| 330 330 | 
             
              runTask(params: ECS.Types.RunTaskRequest, callback?: (err: AWSError, data: ECS.Types.RunTaskResponse) => void): Request<ECS.Types.RunTaskResponse, AWSError>;
         | 
| 331 331 | 
             
              /**
         | 
| 332 | 
            -
               * Starts a new task using the specified task definition. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. | 
| 332 | 
            +
               * Starts a new task using the specified task definition.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.  You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following:   Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.   Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.  
         | 
| 333 333 | 
             
               */
         | 
| 334 334 | 
             
              runTask(callback?: (err: AWSError, data: ECS.Types.RunTaskResponse) => void): Request<ECS.Types.RunTaskResponse, AWSError>;
         | 
| 335 335 | 
             
              /**
         | 
| 336 | 
            -
               * Starts a new task from the specified task definition on the specified container instance or instances.  Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. | 
| 336 | 
            +
               * Starts a new task from the specified task definition on the specified container instance or instances.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.  Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
         | 
| 337 337 | 
             
               */
         | 
| 338 338 | 
             
              startTask(params: ECS.Types.StartTaskRequest, callback?: (err: AWSError, data: ECS.Types.StartTaskResponse) => void): Request<ECS.Types.StartTaskResponse, AWSError>;
         | 
| 339 339 | 
             
              /**
         | 
| 340 | 
            -
               * Starts a new task from the specified task definition on the specified container instance or instances.  Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. | 
| 340 | 
            +
               * Starts a new task from the specified task definition on the specified container instance or instances.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.  Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
         | 
| 341 341 | 
             
               */
         | 
| 342 342 | 
             
              startTask(callback?: (err: AWSError, data: ECS.Types.StartTaskResponse) => void): Request<ECS.Types.StartTaskResponse, AWSError>;
         | 
| 343 343 | 
             
              /**
         | 
| @@ -429,11 +429,11 @@ declare class ECS extends Service { | |
| 429 429 | 
             
               */
         | 
| 430 430 | 
             
              updateContainerInstancesState(callback?: (err: AWSError, data: ECS.Types.UpdateContainerInstancesStateResponse) => void): Request<ECS.Types.UpdateContainerInstancesStateResponse, AWSError>;
         | 
| 431 431 | 
             
              /**
         | 
| 432 | 
            -
               * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.  You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.  If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.  You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.   If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.   The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).   When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.   Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.   By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.   Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.   Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.     When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:    Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.   Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.    You must have a service-linked role when you update any of the following service properties:    loadBalancers,    serviceRegistries    For more information about the role see the CreateService request parameter  role .  
         | 
| 432 | 
            +
               * Modifies the parameters of a service.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.  You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.  If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.  You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.   If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.   The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).   When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.   Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.   By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.   Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.   Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.     When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:    Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.   Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.    You must have a service-linked role when you update any of the following service properties:    loadBalancers,    serviceRegistries    For more information about the role see the CreateService request parameter  role .  
         | 
| 433 433 | 
             
               */
         | 
| 434 434 | 
             
              updateService(params: ECS.Types.UpdateServiceRequest, callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
         | 
| 435 435 | 
             
              /**
         | 
| 436 | 
            -
               * Modifies the parameters of a service. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.  You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.  If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.  You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.   If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.   The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).   When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.   Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.   By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.   Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.   Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.     When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:    Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.   Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.    You must have a service-linked role when you update any of the following service properties:    loadBalancers,    serviceRegistries    For more information about the role see the CreateService request parameter  role .  
         | 
| 436 | 
            +
               * Modifies the parameters of a service.  The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.  For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.  You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.  If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.  You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.   If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.   The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).   When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.   Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.   By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.   Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.   Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.     When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:    Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.   Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.    You must have a service-linked role when you update any of the following service properties:    loadBalancers,    serviceRegistries    For more information about the role see the CreateService request parameter  role .  
         | 
| 437 437 | 
             
               */
         | 
| 438 438 | 
             
              updateService(callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
         | 
| 439 439 | 
             
              /**
         | 
| @@ -511,7 +511,7 @@ declare namespace ECS { | |
| 511 511 | 
             
                 */
         | 
| 512 512 | 
             
                status?: String;
         | 
| 513 513 | 
             
                /**
         | 
| 514 | 
            -
                 * Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address. For Service Connect services, this includes portName, clientAliases, discoveryName, and ingressPortOverride. For  | 
| 514 | 
            +
                 * Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address. For Service Connect services, this includes portName, clientAliases, discoveryName, and ingressPortOverride. For Elastic Block Storage, this includes roleArn, deleteOnTermination, volumeName, volumeId, and statusReason (only when the attachment fails to create or attach).
         | 
| 515 515 | 
             
                 */
         | 
| 516 516 | 
             
                details?: AttachmentDetails;
         | 
| 517 517 | 
             
              }
         | 
| @@ -1237,7 +1237,7 @@ declare namespace ECS { | |
| 1237 1237 | 
             
                 */
         | 
| 1238 1238 | 
             
                clientToken?: String;
         | 
| 1239 1239 | 
             
                /**
         | 
| 1240 | 
            -
                 * The infrastructure that you run your service on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.  Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS  | 
| 1240 | 
            +
                 * The infrastructure that you run your service on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.  Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS Developer Guide.  The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster. The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster. A service can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.
         | 
| 1241 1241 | 
             
                 */
         | 
| 1242 1242 | 
             
                launchType?: LaunchType;
         | 
| 1243 1243 | 
             
                /**
         | 
| @@ -1289,7 +1289,7 @@ declare namespace ECS { | |
| 1289 1289 | 
             
                 */
         | 
| 1290 1290 | 
             
                enableECSManagedTags?: Boolean;
         | 
| 1291 1291 | 
             
                /**
         | 
| 1292 | 
            -
                 * Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action. The default is NONE.
         | 
| 1292 | 
            +
                 * Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action. You must set this to a value other than NONE when you use Cost Explorer. For more information, see Amazon ECS usage reports in the Amazon Elastic Container Service Developer Guide. The default is NONE.
         | 
| 1293 1293 | 
             
                 */
         | 
| 1294 1294 | 
             
                propagateTags?: PropagateTags;
         | 
| 1295 1295 | 
             
                /**
         | 
| @@ -1947,7 +1947,7 @@ declare namespace ECS { | |
| 1947 1947 | 
             
                 */
         | 
| 1948 1948 | 
             
                value: String;
         | 
| 1949 1949 | 
             
                /**
         | 
| 1950 | 
            -
                 * The file type to use. The only supported value is s3.
         | 
| 1950 | 
            +
                 * The file type to use. Environment files are objects in Amazon S3. The only supported value is s3.
         | 
| 1951 1951 | 
             
                 */
         | 
| 1952 1952 | 
             
                type: EnvironmentFileType;
         | 
| 1953 1953 | 
             
              }
         | 
| @@ -1956,7 +1956,7 @@ declare namespace ECS { | |
| 1956 1956 | 
             
              export type EnvironmentVariables = KeyValuePair[];
         | 
| 1957 1957 | 
             
              export interface EphemeralStorage {
         | 
| 1958 1958 | 
             
                /**
         | 
| 1959 | 
            -
                 * The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is  | 
| 1959 | 
            +
                 * The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 20 GiB and the maximum supported value is 200 GiB.
         | 
| 1960 1960 | 
             
                 */
         | 
| 1961 1961 | 
             
                sizeInGiB: Integer;
         | 
| 1962 1962 | 
             
              }
         | 
| @@ -478,7 +478,7 @@ declare namespace Firehose { | |
| 478 478 | 
             
                 */
         | 
| 479 479 | 
             
                HttpEndpointDestinationConfiguration?: HttpEndpointDestinationConfiguration;
         | 
| 480 480 | 
             
                /**
         | 
| 481 | 
            -
                 * A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. You can specify up to 50 tags when creating a delivery stream.
         | 
| 481 | 
            +
                 * A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. You can specify up to 50 tags when creating a delivery stream. If you specify tags in the CreateDeliveryStream action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an AccessDeniedException such as following.  AccessDeniedException  User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy. For an example IAM policy, see Tag example. 
         | 
| 482 482 | 
             
                 */
         | 
| 483 483 | 
             
                Tags?: TagDeliveryStreamInputTagList;
         | 
| 484 484 | 
             
                /**
         |