aws-sdk 2.1624.0 → 2.1626.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +1 -1
  2. package/apis/chatbot-2017-10-11.min.json +138 -24
  3. package/apis/cloudformation-2010-05-15.min.json +48 -43
  4. package/apis/cloudfront-2020-05-31.min.json +3 -0
  5. package/apis/glue-2017-03-31.min.json +350 -343
  6. package/apis/kms-2014-11-01.min.json +3 -0
  7. package/apis/lightsail-2016-11-28.min.json +7 -1
  8. package/apis/mailmanager-2023-10-17.examples.json +5 -0
  9. package/apis/mailmanager-2023-10-17.min.json +1836 -0
  10. package/apis/mailmanager-2023-10-17.paginators.json +58 -0
  11. package/apis/metadata.json +3 -0
  12. package/apis/opensearch-2021-01-01.min.json +18 -12
  13. package/apis/pi-2018-02-27.min.json +24 -17
  14. package/apis/wafv2-2019-07-29.min.json +42 -32
  15. package/clients/all.d.ts +1 -0
  16. package/clients/all.js +2 -1
  17. package/clients/chatbot.d.ts +100 -0
  18. package/clients/cloudformation.d.ts +11 -2
  19. package/clients/glue.d.ts +20 -3
  20. package/clients/kms.d.ts +6 -6
  21. package/clients/lightsail.d.ts +29 -25
  22. package/clients/mailmanager.d.ts +2320 -0
  23. package/clients/mailmanager.js +18 -0
  24. package/clients/opensearch.d.ts +14 -1
  25. package/clients/pi.d.ts +15 -8
  26. package/clients/rds.d.ts +4 -4
  27. package/clients/storagegateway.d.ts +7 -7
  28. package/clients/wafv2.d.ts +34 -4
  29. package/dist/aws-sdk-core-react-native.js +5 -3
  30. package/dist/aws-sdk-react-native.js +53 -14
  31. package/dist/aws-sdk.js +63 -47
  32. package/dist/aws-sdk.min.js +53 -53
  33. package/lib/config_service_placeholders.d.ts +2 -0
  34. package/lib/core.js +1 -1
  35. package/lib/query/query_param_serializer.js +3 -1
  36. package/package.json +1 -1
@@ -163,6 +163,30 @@ declare class Chatbot extends Service {
163
163
  * Lists all Microsoft Teams user identities with a mapped role.
164
164
  */
165
165
  listMicrosoftTeamsUserIdentities(callback?: (err: AWSError, data: Chatbot.Types.ListMicrosoftTeamsUserIdentitiesResult) => void): Request<Chatbot.Types.ListMicrosoftTeamsUserIdentitiesResult, AWSError>;
166
+ /**
167
+ * Retrieves the list of tags applied to a configuration.
168
+ */
169
+ listTagsForResource(params: Chatbot.Types.ListTagsForResourceRequest, callback?: (err: AWSError, data: Chatbot.Types.ListTagsForResourceResponse) => void): Request<Chatbot.Types.ListTagsForResourceResponse, AWSError>;
170
+ /**
171
+ * Retrieves the list of tags applied to a configuration.
172
+ */
173
+ listTagsForResource(callback?: (err: AWSError, data: Chatbot.Types.ListTagsForResourceResponse) => void): Request<Chatbot.Types.ListTagsForResourceResponse, AWSError>;
174
+ /**
175
+ * Applies the supplied tags to a configuration.
176
+ */
177
+ tagResource(params: Chatbot.Types.TagResourceRequest, callback?: (err: AWSError, data: Chatbot.Types.TagResourceResponse) => void): Request<Chatbot.Types.TagResourceResponse, AWSError>;
178
+ /**
179
+ * Applies the supplied tags to a configuration.
180
+ */
181
+ tagResource(callback?: (err: AWSError, data: Chatbot.Types.TagResourceResponse) => void): Request<Chatbot.Types.TagResourceResponse, AWSError>;
182
+ /**
183
+ * Removes the supplied tags from a configuration
184
+ */
185
+ untagResource(params: Chatbot.Types.UntagResourceRequest, callback?: (err: AWSError, data: Chatbot.Types.UntagResourceResponse) => void): Request<Chatbot.Types.UntagResourceResponse, AWSError>;
186
+ /**
187
+ * Removes the supplied tags from a configuration
188
+ */
189
+ untagResource(callback?: (err: AWSError, data: Chatbot.Types.UntagResourceResponse) => void): Request<Chatbot.Types.UntagResourceResponse, AWSError>;
166
190
  /**
167
191
  * Update Chatbot account level preferences
168
192
  */
@@ -207,6 +231,7 @@ declare namespace Chatbot {
207
231
  */
208
232
  TrainingDataCollectionEnabled?: BooleanAccountPreference;
209
233
  }
234
+ export type AmazonResourceName = string;
210
235
  export type Arn = string;
211
236
  export type AwsUserIdentity = string;
212
237
  export type BooleanAccountPreference = boolean;
@@ -236,6 +261,10 @@ declare namespace Chatbot {
236
261
  * Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.Logging levels include ERROR, INFO, or NONE.
237
262
  */
238
263
  LoggingLevel?: CustomerCwLogLevel;
264
+ /**
265
+ * A list of tags applied to the configuration.
266
+ */
267
+ Tags?: Tags;
239
268
  }
240
269
  export type ChimeWebhookConfigurationList = ChimeWebhookConfiguration[];
241
270
  export type ChimeWebhookDescription = string;
@@ -281,6 +310,10 @@ declare namespace Chatbot {
281
310
  * Logging levels include ERROR, INFO, or NONE.
282
311
  */
283
312
  LoggingLevel?: CustomerCwLogLevel;
313
+ /**
314
+ * A list of tags to apply to the configuration.
315
+ */
316
+ Tags?: Tags;
284
317
  }
285
318
  export interface CreateChimeWebhookConfigurationResult {
286
319
  /**
@@ -325,6 +358,10 @@ declare namespace Chatbot {
325
358
  * Enables use of a user role requirement in your chat configuration.
326
359
  */
327
360
  UserAuthorizationRequired?: BooleanAccountPreference;
361
+ /**
362
+ * A list of tags to apply to the configuration.
363
+ */
364
+ Tags?: Tags;
328
365
  }
329
366
  export interface CreateSlackChannelConfigurationResult {
330
367
  /**
@@ -377,6 +414,10 @@ declare namespace Chatbot {
377
414
  * Enables use of a user role requirement in your chat configuration.
378
415
  */
379
416
  UserAuthorizationRequired?: BooleanAccountPreference;
417
+ /**
418
+ * A list of tags to apply to the configuration.
419
+ */
420
+ Tags?: Tags;
380
421
  }
381
422
  export interface CreateTeamsChannelConfigurationResult {
382
423
  /**
@@ -611,6 +652,18 @@ declare namespace Chatbot {
611
652
  */
612
653
  NextToken?: PaginationToken;
613
654
  }
655
+ export interface ListTagsForResourceRequest {
656
+ /**
657
+ * The ARN of the configuration.
658
+ */
659
+ ResourceARN: AmazonResourceName;
660
+ }
661
+ export interface ListTagsForResourceResponse {
662
+ /**
663
+ * A list of tags applied to the configuration.
664
+ */
665
+ Tags?: TagList;
666
+ }
614
667
  export interface ListTeamsChannelConfigurationsRequest {
615
668
  /**
616
669
  * The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
@@ -682,6 +735,10 @@ declare namespace Chatbot {
682
735
  * Enables use of a user role requirement in your chat configuration.
683
736
  */
684
737
  UserAuthorizationRequired?: BooleanAccountPreference;
738
+ /**
739
+ * A list of tags applied to the configuration.
740
+ */
741
+ Tags?: Tags;
685
742
  }
686
743
  export type SlackChannelConfigurationList = SlackChannelConfiguration[];
687
744
  export type SlackChannelDisplayName = string;
@@ -725,6 +782,33 @@ declare namespace Chatbot {
725
782
  export type SlackWorkspacesList = SlackWorkspace[];
726
783
  export type SnsTopicArnList = Arn[];
727
784
  export type String = string;
785
+ export interface Tag {
786
+ /**
787
+ * The tag key.
788
+ */
789
+ TagKey: TagKey;
790
+ /**
791
+ * The tag value.
792
+ */
793
+ TagValue: TagValue;
794
+ }
795
+ export type TagKey = string;
796
+ export type TagKeyList = TagKey[];
797
+ export type TagList = Tag[];
798
+ export interface TagResourceRequest {
799
+ /**
800
+ * The ARN of the configuration.
801
+ */
802
+ ResourceARN: AmazonResourceName;
803
+ /**
804
+ * A list of tags to apply to the configuration.
805
+ */
806
+ Tags: TagList;
807
+ }
808
+ export interface TagResourceResponse {
809
+ }
810
+ export type TagValue = string;
811
+ export type Tags = Tag[];
728
812
  export type TeamChannelConfigurationsList = TeamsChannelConfiguration[];
729
813
  export type TeamName = string;
730
814
  export interface TeamsChannelConfiguration {
@@ -776,6 +860,10 @@ declare namespace Chatbot {
776
860
  * Enables use of a user role requirement in your chat configuration.
777
861
  */
778
862
  UserAuthorizationRequired?: BooleanAccountPreference;
863
+ /**
864
+ * A list of tags applied to the configuration.
865
+ */
866
+ Tags?: Tags;
779
867
  }
780
868
  export type TeamsChannelId = string;
781
869
  export type TeamsChannelName = string;
@@ -811,6 +899,18 @@ declare namespace Chatbot {
811
899
  TeamsTenantId?: UUID;
812
900
  }
813
901
  export type UUID = string;
902
+ export interface UntagResourceRequest {
903
+ /**
904
+ * The ARN of the configuration.
905
+ */
906
+ ResourceARN: AmazonResourceName;
907
+ /**
908
+ * A list of tag keys to remove from the configuration.
909
+ */
910
+ TagKeys: TagKeyList;
911
+ }
912
+ export interface UntagResourceResponse {
913
+ }
814
914
  export interface UpdateAccountPreferencesRequest {
815
915
  /**
816
916
  * Enables use of a user role requirement in your chat configuration.
@@ -1399,6 +1399,10 @@ declare namespace CloudFormation {
1399
1399
  * A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that CloudFormation successfully received them. All events initiated by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.
1400
1400
  */
1401
1401
  ClientRequestToken?: ClientRequestToken;
1402
+ /**
1403
+ * Specifies the deletion mode for the stack. Possible values are: STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this parameter. FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to resource deletion failure.
1404
+ */
1405
+ DeletionMode?: DeletionMode;
1402
1406
  }
1403
1407
  export interface DeleteStackInstancesInput {
1404
1408
  /**
@@ -1452,6 +1456,7 @@ declare namespace CloudFormation {
1452
1456
  }
1453
1457
  export interface DeleteStackSetOutput {
1454
1458
  }
1459
+ export type DeletionMode = "STANDARD"|"FORCE_DELETE_STACK"|string;
1455
1460
  export type DeletionTime = Date;
1456
1461
  export interface DeploymentTargets {
1457
1462
  /**
@@ -2680,7 +2685,7 @@ declare namespace CloudFormation {
2680
2685
  }
2681
2686
  export interface ListStackInstanceResourceDriftsOutput {
2682
2687
  /**
2683
- * A list of StackInstanceResourceDriftSummary structures that contain information about the specified stack instances.
2688
+ * A list of StackInstanceResourceDriftsSummary structures that contain information about the specified stack instances.
2684
2689
  */
2685
2690
  Summaries?: StackInstanceResourceDriftsSummaries;
2686
2691
  /**
@@ -3806,6 +3811,10 @@ declare namespace CloudFormation {
3806
3811
  * When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false
3807
3812
  */
3808
3813
  RetainExceptOnCreate?: RetainExceptOnCreate;
3814
+ /**
3815
+ * Specifies the deletion mode for the stack. Possible values are: STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this parameter. FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to resource deletion failure.
3816
+ */
3817
+ DeletionMode?: DeletionMode;
3809
3818
  /**
3810
3819
  * The detailed status of the resource or stack. If CONFIGURATION_COMPLETE is present, the resource or resource configuration phase has completed and the stabilization of the resources is in progress. The stack sets CONFIGURATION_COMPLETE when all of the resources in the stack have reached that event. For more information, see CloudFormation stack deployment in the CloudFormation User Guide.
3811
3820
  */
@@ -4475,7 +4484,7 @@ declare namespace CloudFormation {
4475
4484
  */
4476
4485
  MaxConcurrentPercentage?: MaxConcurrentPercentage;
4477
4486
  /**
4478
- * Specifies how the concurrency level behaves during the operation execution. STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of MaxConcurrentCount +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior. If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar. SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual concurrency. This allows stack set operations to run at the concurrency level set by the MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of failures.
4487
+ * Specifies how the concurrency level behaves during the operation execution. STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of FailureToleranceCount +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior. If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar. SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual concurrency. This allows stack set operations to run at the concurrency level set by the MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of failures.
4479
4488
  */
4480
4489
  ConcurrencyMode?: ConcurrencyMode;
4481
4490
  }
package/clients/glue.d.ts CHANGED
@@ -4344,6 +4344,10 @@ declare namespace Glue {
4344
4344
  * The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.
4345
4345
  */
4346
4346
  SourceControlDetails?: SourceControlDetails;
4347
+ /**
4348
+ * This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
4349
+ */
4350
+ MaintenanceWindow?: MaintenanceWindow;
4347
4351
  }
4348
4352
  export interface CreateJobResponse {
4349
4353
  /**
@@ -8928,6 +8932,10 @@ declare namespace Glue {
8928
8932
  * The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.
8929
8933
  */
8930
8934
  SourceControlDetails?: SourceControlDetails;
8935
+ /**
8936
+ * This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
8937
+ */
8938
+ MaintenanceWindow?: MaintenanceWindow;
8931
8939
  }
8932
8940
  export interface JobBookmarkEntry {
8933
8941
  /**
@@ -9055,7 +9063,7 @@ declare namespace Glue {
9055
9063
  */
9056
9064
  ExecutionTime?: ExecutionTime;
9057
9065
  /**
9058
- * The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
9066
+ * The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. The maximum value for timeout for batch jobs is 7 days or 10080 minutes. The default is 2880 minutes (48 hours) for batch jobs. Any existing Glue jobs that have a greater timeout value are defaulted to 7 days. For instance you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.
9059
9067
  */
9060
9068
  Timeout?: Timeout;
9061
9069
  /**
@@ -9087,16 +9095,20 @@ declare namespace Glue {
9087
9095
  */
9088
9096
  GlueVersion?: GlueVersionString;
9089
9097
  /**
9090
- * This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.
9098
+ * This field can be set for either job runs with execution class FLEX or when Auto Scaling is enabled, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.
9091
9099
  */
9092
9100
  DPUSeconds?: NullableDouble;
9093
9101
  /**
9094
9102
  * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.
9095
9103
  */
9096
9104
  ExecutionClass?: ExecutionClass;
9105
+ /**
9106
+ * This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
9107
+ */
9108
+ MaintenanceWindow?: MaintenanceWindow;
9097
9109
  }
9098
9110
  export type JobRunList = JobRun[];
9099
- export type JobRunState = "STARTING"|"RUNNING"|"STOPPING"|"STOPPED"|"SUCCEEDED"|"FAILED"|"TIMEOUT"|"ERROR"|"WAITING"|string;
9111
+ export type JobRunState = "STARTING"|"RUNNING"|"STOPPING"|"STOPPED"|"SUCCEEDED"|"FAILED"|"TIMEOUT"|"ERROR"|"WAITING"|"EXPIRED"|string;
9100
9112
  export interface JobUpdate {
9101
9113
  /**
9102
9114
  * Description of the job being defined.
@@ -9178,6 +9190,10 @@ declare namespace Glue {
9178
9190
  * The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.
9179
9191
  */
9180
9192
  SourceControlDetails?: SourceControlDetails;
9193
+ /**
9194
+ * This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
9195
+ */
9196
+ MaintenanceWindow?: MaintenanceWindow;
9181
9197
  }
9182
9198
  export interface Join {
9183
9199
  /**
@@ -10138,6 +10154,7 @@ declare namespace Glue {
10138
10154
  KmsKeyId?: NameString;
10139
10155
  }
10140
10156
  export type MLUserDataEncryptionModeString = "DISABLED"|"SSE-KMS"|string;
10157
+ export type MaintenanceWindow = string;
10141
10158
  export type ManyInputs = NodeId[];
10142
10159
  export type MapValue = {[key: string]: GenericString};
10143
10160
  export interface Mapping {
package/clients/kms.d.ts CHANGED
@@ -220,11 +220,11 @@ declare class KMS extends Service {
220
220
  */
221
221
  getKeyRotationStatus(callback?: (err: AWSError, data: KMS.Types.GetKeyRotationStatusResponse) => void): Request<KMS.Types.GetKeyRotationStatusResponse, AWSError>;
222
222
  /**
223
- * Returns the public key and an import token you need to import or reimport key material for a KMS key. By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide. Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted. GetParametersForImport returns the items that you need to import your key material. The public key (or "wrapping key") of an RSA key pair that KMS generates. You will use this public key to encrypt ("wrap") your key material while it's in transit to KMS. A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key. The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request. GetParametersForImport requires the following information: The key ID of the KMS key for which you are importing the key material. The key spec of the public key ("wrapping key") that you will use to encrypt your key material during import. The wrapping algorithm that you will use with the public key to encrypt your key material. You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material. The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:GetParametersForImport (key policy) Related operations: ImportKeyMaterial DeleteImportedKeyMaterial Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.
223
+ * Returns the public key and an import token you need to import or reimport key material for a KMS key. By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide. Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted. GetParametersForImport returns the items that you need to import your key material. The public key (or "wrapping key") of an asymmetric key pair that KMS generates. You will use this public key to encrypt ("wrap") your key material while it's in transit to KMS. A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key. The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request. GetParametersForImport requires the following information: The key ID of the KMS key for which you are importing the key material. The key spec of the public key ("wrapping key") that you will use to encrypt your key material during import. The wrapping algorithm that you will use with the public key to encrypt your key material. You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material. The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:GetParametersForImport (key policy) Related operations: ImportKeyMaterial DeleteImportedKeyMaterial Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.
224
224
  */
225
225
  getParametersForImport(params: KMS.Types.GetParametersForImportRequest, callback?: (err: AWSError, data: KMS.Types.GetParametersForImportResponse) => void): Request<KMS.Types.GetParametersForImportResponse, AWSError>;
226
226
  /**
227
- * Returns the public key and an import token you need to import or reimport key material for a KMS key. By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide. Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted. GetParametersForImport returns the items that you need to import your key material. The public key (or "wrapping key") of an RSA key pair that KMS generates. You will use this public key to encrypt ("wrap") your key material while it's in transit to KMS. A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key. The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request. GetParametersForImport requires the following information: The key ID of the KMS key for which you are importing the key material. The key spec of the public key ("wrapping key") that you will use to encrypt your key material during import. The wrapping algorithm that you will use with the public key to encrypt your key material. You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material. The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:GetParametersForImport (key policy) Related operations: ImportKeyMaterial DeleteImportedKeyMaterial Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.
227
+ * Returns the public key and an import token you need to import or reimport key material for a KMS key. By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide. Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted. GetParametersForImport returns the items that you need to import your key material. The public key (or "wrapping key") of an asymmetric key pair that KMS generates. You will use this public key to encrypt ("wrap") your key material while it's in transit to KMS. A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key. The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request. GetParametersForImport requires the following information: The key ID of the KMS key for which you are importing the key material. The key spec of the public key ("wrapping key") that you will use to encrypt your key material during import. The wrapping algorithm that you will use with the public key to encrypt your key material. You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material. The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. Required permissions: kms:GetParametersForImport (key policy) Related operations: ImportKeyMaterial DeleteImportedKeyMaterial Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.
228
228
  */
229
229
  getParametersForImport(callback?: (err: AWSError, data: KMS.Types.GetParametersForImportResponse) => void): Request<KMS.Types.GetParametersForImportResponse, AWSError>;
230
230
  /**
@@ -430,7 +430,7 @@ declare class KMS extends Service {
430
430
  }
431
431
  declare namespace KMS {
432
432
  export type AWSAccountIdType = string;
433
- export type AlgorithmSpec = "RSAES_PKCS1_V1_5"|"RSAES_OAEP_SHA_1"|"RSAES_OAEP_SHA_256"|"RSA_AES_KEY_WRAP_SHA_1"|"RSA_AES_KEY_WRAP_SHA_256"|string;
433
+ export type AlgorithmSpec = "RSAES_PKCS1_V1_5"|"RSAES_OAEP_SHA_1"|"RSAES_OAEP_SHA_256"|"RSA_AES_KEY_WRAP_SHA_1"|"RSA_AES_KEY_WRAP_SHA_256"|"SM2PKE"|string;
434
434
  export type AliasList = AliasListEntry[];
435
435
  export interface AliasListEntry {
436
436
  /**
@@ -1171,11 +1171,11 @@ declare namespace KMS {
1171
1171
  */
1172
1172
  KeyId: KeyIdType;
1173
1173
  /**
1174
- * The algorithm you will use with the RSA public key (PublicKey) in the response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide. For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping algorithms, you encrypt your key material directly with the RSA public key from KMS. The wrapping algorithms that you can use depend on the type of key material that you are importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm. RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material. RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material. RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key). You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material. RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key). You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material. RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not support the RSAES_PKCS1_V1_5 wrapping algorithm.
1174
+ * The algorithm you will use with the asymmetric public key (PublicKey) in the response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide. For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping algorithms, you encrypt your key material directly with the RSA public key from KMS. For SM2PKE wrapping algorithms, you encrypt your key material directly with the SM2 public key from KMS. The wrapping algorithms that you can use depend on the type of key material that you are importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm, except in China Regions, where you must use the SM2PKE wrapping algorithm to import an RSA private key. The SM2PKE wrapping algorithm is available only in China Regions. The RSA_AES_KEY_WRAP_SHA_256 and RSA_AES_KEY_WRAP_SHA_1 wrapping algorithms are not supported in China Regions. RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material. RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material. RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key). You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material. RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key). You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material. RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not support the RSAES_PKCS1_V1_5 wrapping algorithm. SM2PKE (China Regions only) — supported for wrapping RSA, ECC, and SM2 key material.
1175
1175
  */
1176
1176
  WrappingAlgorithm: AlgorithmSpec;
1177
1177
  /**
1178
- * The type of RSA public key to return in the response. You will use this wrapping key with the specified wrapping algorithm to protect your key material during import. Use the longest RSA wrapping key that is practical. You cannot use an RSA_2048 public key to directly wrap an ECC_NIST_P521 private key. Instead, use an RSA_AES wrapping algorithm or choose a longer RSA public key.
1178
+ * The type of public key to return in the response. You will use this wrapping key with the specified wrapping algorithm to protect your key material during import. Use the longest wrapping key that is practical. You cannot use an RSA_2048 public key to directly wrap an ECC_NIST_P521 private key. Instead, use an RSA_AES wrapping algorithm or choose a longer RSA public key. The SM2 wrapping key spec is available only in China Regions.
1179
1179
  */
1180
1180
  WrappingKeySpec: WrappingKeySpec;
1181
1181
  }
@@ -2108,7 +2108,7 @@ declare namespace KMS {
2108
2108
  */
2109
2109
  SigningAlgorithm?: SigningAlgorithmSpec;
2110
2110
  }
2111
- export type WrappingKeySpec = "RSA_2048"|"RSA_3072"|"RSA_4096"|string;
2111
+ export type WrappingKeySpec = "RSA_2048"|"RSA_3072"|"RSA_4096"|"SM2"|string;
2112
2112
  export interface XksKeyConfigurationType {
2113
2113
  /**
2114
2114
  * The ID of the external key in its external key manager. This is the ID that the external key store proxy uses to identify the external key.
@@ -1655,7 +1655,7 @@ declare namespace Lightsail {
1655
1655
  export type BehaviorEnum = "dont-cache"|"cache"|string;
1656
1656
  export interface Blueprint {
1657
1657
  /**
1658
- * The ID for the virtual private server image (app_wordpress_4_4 or app_lamp_7_0).
1658
+ * The ID for the virtual private server image (app_wordpress_x_x or app_lamp_x_x).
1659
1659
  */
1660
1660
  blueprintId?: NonEmptyString;
1661
1661
  /**
@@ -1844,11 +1844,11 @@ declare namespace Lightsail {
1844
1844
  */
1845
1845
  diskSizeInGb?: integer;
1846
1846
  /**
1847
- * The bundle ID (micro_1_0).
1847
+ * The bundle ID (micro_x_x).
1848
1848
  */
1849
1849
  bundleId?: NonEmptyString;
1850
1850
  /**
1851
- * The Amazon EC2 instance type (t2.micro).
1851
+ * The instance type (micro).
1852
1852
  */
1853
1853
  instanceType?: string;
1854
1854
  /**
@@ -2912,7 +2912,7 @@ declare namespace Lightsail {
2912
2912
  */
2913
2913
  instanceSnapshotName?: ResourceName;
2914
2914
  /**
2915
- * The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_1_0).
2915
+ * The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_x_x).
2916
2916
  */
2917
2917
  bundleId: NonEmptyString;
2918
2918
  /**
@@ -2932,7 +2932,7 @@ declare namespace Lightsail {
2932
2932
  */
2933
2933
  addOns?: AddOnRequestList;
2934
2934
  /**
2935
- * The IP address type for the instance. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack.
2935
+ * The IP address type for the instance. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is dualstack.
2936
2936
  */
2937
2937
  ipAddressType?: IpAddressType;
2938
2938
  /**
@@ -2968,11 +2968,11 @@ declare namespace Lightsail {
2968
2968
  */
2969
2969
  customImageName?: ResourceName;
2970
2970
  /**
2971
- * The ID for a virtual private server image (app_wordpress_4_4 or app_lamp_7_0). Use the get blueprints operation to return a list of available images (or blueprints). Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
2971
+ * The ID for a virtual private server image (app_wordpress_x_x or app_lamp_x_x). Use the get blueprints operation to return a list of available images (or blueprints). Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
2972
2972
  */
2973
2973
  blueprintId: NonEmptyString;
2974
2974
  /**
2975
- * The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_1_0).
2975
+ * The bundle of specification information for your virtual private server (or instance), including the pricing plan (medium_x_x).
2976
2976
  */
2977
2977
  bundleId: NonEmptyString;
2978
2978
  /**
@@ -2992,7 +2992,7 @@ declare namespace Lightsail {
2992
2992
  */
2993
2993
  addOns?: AddOnRequestList;
2994
2994
  /**
2995
- * The IP address type for the instance. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack.
2995
+ * The IP address type for the instance. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is dualstack.
2996
2996
  */
2997
2997
  ipAddressType?: IpAddressType;
2998
2998
  }
@@ -3060,7 +3060,7 @@ declare namespace Lightsail {
3060
3060
  */
3061
3061
  tags?: TagList;
3062
3062
  /**
3063
- * The IP address type for the load balancer. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack.
3063
+ * The IP address type for the load balancer. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is dualstack.
3064
3064
  */
3065
3065
  ipAddressType?: IpAddressType;
3066
3066
  /**
@@ -5337,7 +5337,7 @@ declare namespace Lightsail {
5337
5337
  }
5338
5338
  export interface Instance {
5339
5339
  /**
5340
- * The name the user gave the instance (Amazon_Linux-1GB-Ohio-1).
5340
+ * The name the user gave the instance (Amazon_Linux_2023-1).
5341
5341
  */
5342
5342
  name?: ResourceName;
5343
5343
  /**
@@ -5365,15 +5365,15 @@ declare namespace Lightsail {
5365
5365
  */
5366
5366
  tags?: TagList;
5367
5367
  /**
5368
- * The blueprint ID (os_amlinux_2016_03).
5368
+ * The blueprint ID (amazon_linux_2023).
5369
5369
  */
5370
5370
  blueprintId?: NonEmptyString;
5371
5371
  /**
5372
- * The friendly name of the blueprint (Amazon Linux).
5372
+ * The friendly name of the blueprint (Amazon Linux 2023).
5373
5373
  */
5374
5374
  blueprintName?: NonEmptyString;
5375
5375
  /**
5376
- * The bundle for the instance (micro_1_0).
5376
+ * The bundle for the instance (micro_x_x).
5377
5377
  */
5378
5378
  bundleId?: NonEmptyString;
5379
5379
  /**
@@ -5397,7 +5397,7 @@ declare namespace Lightsail {
5397
5397
  */
5398
5398
  ipv6Addresses?: Ipv6AddressList;
5399
5399
  /**
5400
- * The IP address type of the instance. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
5400
+ * The IP address type of the instance. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.
5401
5401
  */
5402
5402
  ipAddressType?: IpAddressType;
5403
5403
  /**
@@ -5573,7 +5573,7 @@ declare namespace Lightsail {
5573
5573
  */
5574
5574
  toPort?: Port;
5575
5575
  /**
5576
- * The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.
5576
+ * The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.
5577
5577
  */
5578
5578
  protocol?: NetworkProtocol;
5579
5579
  /**
@@ -5616,7 +5616,7 @@ declare namespace Lightsail {
5616
5616
  */
5617
5617
  toPort?: Port;
5618
5618
  /**
5619
- * The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.
5619
+ * The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.
5620
5620
  */
5621
5621
  protocol?: NetworkProtocol;
5622
5622
  /**
@@ -5687,11 +5687,11 @@ declare namespace Lightsail {
5687
5687
  */
5688
5688
  fromInstanceArn?: NonEmptyString;
5689
5689
  /**
5690
- * The blueprint ID from which you created the snapshot (os_debian_8_3). A blueprint is a virtual private server (or instance) image used to create instances quickly.
5690
+ * The blueprint ID from which you created the snapshot (amazon_linux_2023). A blueprint is a virtual private server (or instance) image used to create instances quickly.
5691
5691
  */
5692
5692
  fromBlueprintId?: string;
5693
5693
  /**
5694
- * The bundle ID from which you created the snapshot (micro_1_0).
5694
+ * The bundle ID from which you created the snapshot (micro_x_x).
5695
5695
  */
5696
5696
  fromBundleId?: string;
5697
5697
  /**
@@ -5705,11 +5705,11 @@ declare namespace Lightsail {
5705
5705
  }
5706
5706
  export interface InstanceSnapshotInfo {
5707
5707
  /**
5708
- * The bundle ID from which the source instance was created (micro_1_0).
5708
+ * The bundle ID from which the source instance was created (micro_x_x).
5709
5709
  */
5710
5710
  fromBundleId?: NonEmptyString;
5711
5711
  /**
5712
- * The blueprint ID from which the source instance (os_debian_8_3).
5712
+ * The blueprint ID from which the source instance (amazon_linux_2023).
5713
5713
  */
5714
5714
  fromBlueprintId?: NonEmptyString;
5715
5715
  /**
@@ -5730,7 +5730,7 @@ declare namespace Lightsail {
5730
5730
  name?: string;
5731
5731
  }
5732
5732
  export type IpAddress = string;
5733
- export type IpAddressType = "dualstack"|"ipv4"|string;
5733
+ export type IpAddressType = "dualstack"|"ipv4"|"ipv6"|string;
5734
5734
  export type Ipv6Address = string;
5735
5735
  export type Ipv6AddressList = Ipv6Address[];
5736
5736
  export interface IsVpcPeeredRequest {
@@ -5931,7 +5931,7 @@ declare namespace Lightsail {
5931
5931
  */
5932
5932
  configurationOptions?: LoadBalancerConfigurationOptions;
5933
5933
  /**
5934
- * The IP address type of the load balancer. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
5934
+ * The IP address type of the load balancer. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.
5935
5935
  */
5936
5936
  ipAddressType?: IpAddressType;
5937
5937
  /**
@@ -6228,7 +6228,7 @@ declare namespace Lightsail {
6228
6228
  message?: string;
6229
6229
  }
6230
6230
  export type NameServersUpdateStateCode = "SUCCEEDED"|"PENDING"|"FAILED"|"STARTED"|string;
6231
- export type NetworkProtocol = "tcp"|"all"|"udp"|"icmp"|string;
6231
+ export type NetworkProtocol = "tcp"|"all"|"udp"|"icmp"|"icmpv6"|string;
6232
6232
  export type NonEmptyString = string;
6233
6233
  export type NotificationTriggerList = AlarmState[];
6234
6234
  export interface OpenInstancePublicPortsRequest {
@@ -6383,7 +6383,7 @@ declare namespace Lightsail {
6383
6383
  */
6384
6384
  toPort?: Port;
6385
6385
  /**
6386
- * The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.
6386
+ * The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.
6387
6387
  */
6388
6388
  protocol?: NetworkProtocol;
6389
6389
  /**
@@ -7062,9 +7062,13 @@ declare namespace Lightsail {
7062
7062
  */
7063
7063
  resourceName: ResourceName;
7064
7064
  /**
7065
- * The IP address type to set for the specified resource. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
7065
+ * The IP address type to set for the specified resource. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6.
7066
7066
  */
7067
7067
  ipAddressType: IpAddressType;
7068
+ /**
7069
+ * Required parameter to accept the instance bundle update when changing to, and from, IPv6-only. An instance bundle will change when switching from dual-stack or ipv4, to ipv6. It also changes when switching from ipv6, to dual-stack or ipv4. You must include this parameter in the command to update the bundle. For example, if you switch from dual-stack to ipv6, the bundle will be updated, and billing for the IPv6-only instance bundle begins immediately.
7070
+ */
7071
+ acceptBundleUpdate?: boolean;
7068
7072
  }
7069
7073
  export interface SetIpAddressTypeResult {
7070
7074
  /**