aws-sdk 2.1662.0 → 2.1664.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +1 -1
  2. package/apis/application-signals-2024-04-15.min.json +15 -0
  3. package/apis/bedrock-runtime-2023-09-30.min.json +27 -5
  4. package/apis/cleanrooms-2022-02-17.min.json +444 -156
  5. package/apis/codecommit-2015-04-13.min.json +7 -1
  6. package/apis/datazone-2018-05-10.min.json +68 -32
  7. package/apis/ecr-2015-09-21.examples.json +186 -0
  8. package/apis/ecr-2015-09-21.min.json +177 -20
  9. package/apis/ecr-2015-09-21.paginators.json +9 -0
  10. package/apis/eks-2017-11-01.min.json +81 -63
  11. package/apis/elasticloadbalancingv2-2015-12-01.examples.json +33 -0
  12. package/apis/elasticloadbalancingv2-2015-12-01.min.json +113 -73
  13. package/apis/iotsitewise-2019-12-02.min.json +113 -98
  14. package/apis/mediapackagev2-2022-12-25.min.json +0 -3
  15. package/apis/medical-imaging-2023-07-19.min.json +75 -35
  16. package/apis/network-firewall-2020-11-12.min.json +7 -1
  17. package/apis/outposts-2019-12-03.min.json +11 -2
  18. package/apis/pinpoint-sms-voice-v2-2022-03-31.min.json +6 -0
  19. package/apis/states-2016-11-23.min.json +163 -128
  20. package/clients/applicationautoscaling.d.ts +1 -1
  21. package/clients/applicationsignals.d.ts +29 -20
  22. package/clients/bedrockruntime.d.ts +11 -8
  23. package/clients/cleanrooms.d.ts +328 -63
  24. package/clients/datazone.d.ts +36 -0
  25. package/clients/dynamodb.d.ts +6 -6
  26. package/clients/ec2.d.ts +4 -4
  27. package/clients/ecr.d.ts +242 -8
  28. package/clients/eks.d.ts +27 -2
  29. package/clients/elbv2.d.ts +54 -8
  30. package/clients/iotsitewise.d.ts +37 -25
  31. package/clients/mediapackagev2.d.ts +1 -1
  32. package/clients/medicalimaging.d.ts +38 -0
  33. package/clients/networkfirewall.d.ts +7 -7
  34. package/clients/outposts.d.ts +5 -0
  35. package/clients/pinpointsmsvoicev2.d.ts +56 -56
  36. package/clients/stepfunctions.d.ts +67 -9
  37. package/dist/aws-sdk-core-react-native.js +1 -1
  38. package/dist/aws-sdk-react-native.js +17 -17
  39. package/dist/aws-sdk.js +309 -97
  40. package/dist/aws-sdk.min.js +80 -80
  41. package/lib/core.js +1 -1
  42. package/package.json +1 -1
@@ -451,6 +451,14 @@ declare class DataZone extends Service {
451
451
  * Gets the blueprint configuration in Amazon DataZone.
452
452
  */
453
453
  getEnvironmentBlueprintConfiguration(callback?: (err: AWSError, data: DataZone.Types.GetEnvironmentBlueprintConfigurationOutput) => void): Request<DataZone.Types.GetEnvironmentBlueprintConfigurationOutput, AWSError>;
454
+ /**
455
+ * Gets the credentials of an environment in Amazon DataZone.
456
+ */
457
+ getEnvironmentCredentials(params: DataZone.Types.GetEnvironmentCredentialsInput, callback?: (err: AWSError, data: DataZone.Types.GetEnvironmentCredentialsOutput) => void): Request<DataZone.Types.GetEnvironmentCredentialsOutput, AWSError>;
458
+ /**
459
+ * Gets the credentials of an environment in Amazon DataZone.
460
+ */
461
+ getEnvironmentCredentials(callback?: (err: AWSError, data: DataZone.Types.GetEnvironmentCredentialsOutput) => void): Request<DataZone.Types.GetEnvironmentCredentialsOutput, AWSError>;
454
462
  /**
455
463
  * Gets an evinronment profile in Amazon DataZone.
456
464
  */
@@ -4765,6 +4773,34 @@ declare namespace DataZone {
4765
4773
  */
4766
4774
  userParameters?: CustomParameterList;
4767
4775
  }
4776
+ export interface GetEnvironmentCredentialsInput {
4777
+ /**
4778
+ * The ID of the Amazon DataZone domain in which this environment and its credentials exist.
4779
+ */
4780
+ domainIdentifier: DomainId;
4781
+ /**
4782
+ * The ID of the environment whose credentials this operation gets.
4783
+ */
4784
+ environmentIdentifier: EnvironmentId;
4785
+ }
4786
+ export interface GetEnvironmentCredentialsOutput {
4787
+ /**
4788
+ * The access key ID of the environment.
4789
+ */
4790
+ accessKeyId?: String;
4791
+ /**
4792
+ * The expiration timestamp of the environment credentials.
4793
+ */
4794
+ expiration?: SyntheticTimestamp_date_time;
4795
+ /**
4796
+ * The secret access key of the environment credentials.
4797
+ */
4798
+ secretAccessKey?: String;
4799
+ /**
4800
+ * The session token of the environment credentials.
4801
+ */
4802
+ sessionToken?: String;
4803
+ }
4768
4804
  export interface GetEnvironmentInput {
4769
4805
  /**
4770
4806
  * The ID of the Amazon DataZone domain where the environment exists.
@@ -15,11 +15,11 @@ declare class DynamoDB extends DynamoDBCustomizations {
15
15
  constructor(options?: DynamoDB.Types.ClientConfiguration)
16
16
  config: Config & DynamoDB.Types.ClientConfiguration;
17
17
  /**
18
- * This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement.
18
+ * This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB . The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement.
19
19
  */
20
20
  batchExecuteStatement(params: DynamoDB.Types.BatchExecuteStatementInput, callback?: (err: AWSError, data: DynamoDB.Types.BatchExecuteStatementOutput) => void): Request<DynamoDB.Types.BatchExecuteStatementOutput, AWSError>;
21
21
  /**
22
- * This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement.
22
+ * This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB . The entire batch must consist of either read statements or write statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all statements in the BatchExecuteStatement succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each statement.
23
23
  */
24
24
  batchExecuteStatement(callback?: (err: AWSError, data: DynamoDB.Types.BatchExecuteStatementOutput) => void): Request<DynamoDB.Types.BatchExecuteStatementOutput, AWSError>;
25
25
  /**
@@ -31,11 +31,11 @@ declare class DynamoDB extends DynamoDBCustomizations {
31
31
  */
32
32
  batchGetItem(callback?: (err: AWSError, data: DynamoDB.Types.BatchGetItemOutput) => void): Request<DynamoDB.Types.BatchGetItemOutput, AWSError>;
33
33
  /**
34
- * The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes.
34
+ * The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. For tables and indexes with provisioned capacity, if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. For all tables and indexes, if none of the items can be processed due to other throttling scenarios (such as exceeding partition level limits), then BatchWriteItem returns a ThrottlingException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes.
35
35
  */
36
36
  batchWriteItem(params: DynamoDB.Types.BatchWriteItemInput, callback?: (err: AWSError, data: DynamoDB.Types.BatchWriteItemOutput) => void): Request<DynamoDB.Types.BatchWriteItemOutput, AWSError>;
37
37
  /**
38
- * The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes.
38
+ * The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. For tables and indexes with provisioned capacity, if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. For all tables and indexes, if none of the items can be processed due to other throttling scenarios (such as exceeding partition level limits), then BatchWriteItem returns a ThrottlingException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes.
39
39
  */
40
40
  batchWriteItem(callback?: (err: AWSError, data: DynamoDB.Types.BatchWriteItemOutput) => void): Request<DynamoDB.Types.BatchWriteItemOutput, AWSError>;
41
41
  /**
@@ -87,11 +87,11 @@ declare class DynamoDB extends DynamoDBCustomizations {
87
87
  */
88
88
  deleteResourcePolicy(callback?: (err: AWSError, data: DynamoDB.Types.DeleteResourcePolicyOutput) => void): Request<DynamoDB.Types.DeleteResourcePolicyOutput, AWSError>;
89
89
  /**
90
- * The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table.
90
+ * The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. For the full list of table states, see TableStatus. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table.
91
91
  */
92
92
  deleteTable(params: DynamoDB.Types.DeleteTableInput, callback?: (err: AWSError, data: DynamoDB.Types.DeleteTableOutput) => void): Request<DynamoDB.Types.DeleteTableOutput, AWSError>;
93
93
  /**
94
- * The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table.
94
+ * The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. For the full list of table states, see TableStatus. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table.
95
95
  */
96
96
  deleteTable(callback?: (err: AWSError, data: DynamoDB.Types.DeleteTableOutput) => void): Request<DynamoDB.Types.DeleteTableOutput, AWSError>;
97
97
  /**
package/clients/ec2.d.ts CHANGED
@@ -21188,7 +21188,7 @@ declare namespace EC2 {
21188
21188
  */
21189
21189
  InstanceRequirements?: InstanceRequirements;
21190
21190
  /**
21191
- * The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.
21191
+ * The ID of the AMI in the format ami-17characters00000. Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch. To reference a public parameter: resolve:ssm:public-parameter To reference a parameter stored in the same account: resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label To reference a parameter shared from another Amazon Web Services account: resolve:ssm:parameter-ARN resolve:ssm:parameter-ARN:version-number resolve:ssm:parameter-ARN:label For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.
21192
21192
  */
21193
21193
  ImageId?: ImageId;
21194
21194
  }
@@ -21228,7 +21228,7 @@ declare namespace EC2 {
21228
21228
  */
21229
21229
  InstanceRequirements?: InstanceRequirementsRequest;
21230
21230
  /**
21231
- * The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.
21231
+ * The ID of the AMI in the format ami-17characters00000. Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch. To reference a public parameter: resolve:ssm:public-parameter To reference a parameter stored in the same account: resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label To reference a parameter shared from another Amazon Web Services account: resolve:ssm:parameter-ARN resolve:ssm:parameter-ARN:version-number resolve:ssm:parameter-ARN:label For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.
21232
21232
  */
21233
21233
  ImageId?: ImageId;
21234
21234
  }
@@ -33444,7 +33444,7 @@ declare namespace EC2 {
33444
33444
  */
33445
33445
  NetworkInterfaces?: LaunchTemplateInstanceNetworkInterfaceSpecificationRequestList;
33446
33446
  /**
33447
- * The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch. Valid formats: ami-17characters00000 resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label resolve:ssm:public-parameter Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.
33447
+ * The ID of the AMI in the format ami-17characters00000. Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch. To reference a public parameter: resolve:ssm:public-parameter To reference a parameter stored in the same account: resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label To reference a parameter shared from another Amazon Web Services account: resolve:ssm:parameter-ARN resolve:ssm:parameter-ARN:version-number resolve:ssm:parameter-ARN:label For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. If the launch template will be used for an EC2 Fleet or Spot Fleet, note the following: Only EC2 Fleets of type instant support specifying a Systems Manager parameter. For EC2 Fleets of type maintain or request, or for Spot Fleets, you must specify the AMI ID.
33448
33448
  */
33449
33449
  ImageId?: ImageId;
33450
33450
  /**
@@ -34185,7 +34185,7 @@ declare namespace EC2 {
34185
34185
  */
34186
34186
  ResourceTypes?: ValueStringList;
34187
34187
  }
34188
- export type ResourceType = "capacity-reservation"|"client-vpn-endpoint"|"customer-gateway"|"carrier-gateway"|"coip-pool"|"dedicated-host"|"dhcp-options"|"egress-only-internet-gateway"|"elastic-ip"|"elastic-gpu"|"export-image-task"|"export-instance-task"|"fleet"|"fpga-image"|"host-reservation"|"image"|"import-image-task"|"import-snapshot-task"|"instance"|"instance-event-window"|"internet-gateway"|"ipam"|"ipam-pool"|"ipam-scope"|"ipv4pool-ec2"|"ipv6pool-ec2"|"key-pair"|"launch-template"|"local-gateway"|"local-gateway-route-table"|"local-gateway-virtual-interface"|"local-gateway-virtual-interface-group"|"local-gateway-route-table-vpc-association"|"local-gateway-route-table-virtual-interface-group-association"|"natgateway"|"network-acl"|"network-interface"|"network-insights-analysis"|"network-insights-path"|"network-insights-access-scope"|"network-insights-access-scope-analysis"|"placement-group"|"prefix-list"|"replace-root-volume-task"|"reserved-instances"|"route-table"|"security-group"|"security-group-rule"|"snapshot"|"spot-fleet-request"|"spot-instances-request"|"subnet"|"subnet-cidr-reservation"|"traffic-mirror-filter"|"traffic-mirror-session"|"traffic-mirror-target"|"transit-gateway"|"transit-gateway-attachment"|"transit-gateway-connect-peer"|"transit-gateway-multicast-domain"|"transit-gateway-policy-table"|"transit-gateway-route-table"|"transit-gateway-route-table-announcement"|"volume"|"vpc"|"vpc-endpoint"|"vpc-endpoint-connection"|"vpc-endpoint-service"|"vpc-endpoint-service-permission"|"vpc-peering-connection"|"vpn-connection"|"vpn-gateway"|"vpc-flow-log"|"capacity-reservation-fleet"|"traffic-mirror-filter-rule"|"vpc-endpoint-connection-device-type"|"verified-access-instance"|"verified-access-group"|"verified-access-endpoint"|"verified-access-policy"|"verified-access-trust-provider"|"vpn-connection-device-type"|"vpc-block-public-access-exclusion"|"vpc-encryption-control"|"ipam-resource-discovery"|"ipam-resource-discovery-association"|"instance-connect-endpoint"|"ipam-external-resource-verification-token"|string;
34188
+ export type ResourceType = "capacity-reservation"|"client-vpn-endpoint"|"customer-gateway"|"carrier-gateway"|"coip-pool"|"dedicated-host"|"dhcp-options"|"egress-only-internet-gateway"|"elastic-ip"|"elastic-gpu"|"export-image-task"|"export-instance-task"|"fleet"|"fpga-image"|"host-reservation"|"image"|"import-image-task"|"import-snapshot-task"|"instance"|"instance-event-window"|"internet-gateway"|"ipam"|"ipam-pool"|"ipam-scope"|"ipv4pool-ec2"|"ipv6pool-ec2"|"key-pair"|"launch-template"|"local-gateway"|"local-gateway-route-table"|"local-gateway-virtual-interface"|"local-gateway-virtual-interface-group"|"local-gateway-route-table-vpc-association"|"local-gateway-route-table-virtual-interface-group-association"|"natgateway"|"network-acl"|"network-interface"|"network-insights-analysis"|"network-insights-path"|"network-insights-access-scope"|"network-insights-access-scope-analysis"|"placement-group"|"prefix-list"|"replace-root-volume-task"|"reserved-instances"|"route-table"|"security-group"|"security-group-rule"|"snapshot"|"spot-fleet-request"|"spot-instances-request"|"subnet"|"subnet-cidr-reservation"|"traffic-mirror-filter"|"traffic-mirror-session"|"traffic-mirror-target"|"transit-gateway"|"transit-gateway-attachment"|"transit-gateway-connect-peer"|"transit-gateway-multicast-domain"|"transit-gateway-policy-table"|"transit-gateway-route-table"|"transit-gateway-route-table-announcement"|"volume"|"vpc"|"vpc-endpoint"|"vpc-endpoint-connection"|"vpc-endpoint-service"|"vpc-endpoint-service-permission"|"vpc-peering-connection"|"vpn-connection"|"vpn-gateway"|"vpc-flow-log"|"capacity-reservation-fleet"|"traffic-mirror-filter-rule"|"vpc-endpoint-connection-device-type"|"verified-access-instance"|"verified-access-group"|"verified-access-endpoint"|"verified-access-policy"|"verified-access-trust-provider"|"vpn-connection-device-type"|"vpc-block-public-access-exclusion"|"ipam-resource-discovery"|"ipam-resource-discovery-association"|"instance-connect-endpoint"|"ipam-external-resource-verification-token"|string;
34189
34189
  export interface ResponseError {
34190
34190
  /**
34191
34191
  * The error code.
package/clients/ecr.d.ts CHANGED
@@ -68,6 +68,14 @@ declare class ECR extends Service {
68
68
  * Creates a repository. For more information, see Amazon ECR repositories in the Amazon Elastic Container Registry User Guide.
69
69
  */
70
70
  createRepository(callback?: (err: AWSError, data: ECR.Types.CreateRepositoryResponse) => void): Request<ECR.Types.CreateRepositoryResponse, AWSError>;
71
+ /**
72
+ * Creates a repository creation template. This template is used to define the settings for repositories created by Amazon ECR on your behalf. For example, repositories created through pull through cache actions. For more information, see Private repository creation templates in the Amazon Elastic Container Registry User Guide.
73
+ */
74
+ createRepositoryCreationTemplate(params: ECR.Types.CreateRepositoryCreationTemplateRequest, callback?: (err: AWSError, data: ECR.Types.CreateRepositoryCreationTemplateResponse) => void): Request<ECR.Types.CreateRepositoryCreationTemplateResponse, AWSError>;
75
+ /**
76
+ * Creates a repository creation template. This template is used to define the settings for repositories created by Amazon ECR on your behalf. For example, repositories created through pull through cache actions. For more information, see Private repository creation templates in the Amazon Elastic Container Registry User Guide.
77
+ */
78
+ createRepositoryCreationTemplate(callback?: (err: AWSError, data: ECR.Types.CreateRepositoryCreationTemplateResponse) => void): Request<ECR.Types.CreateRepositoryCreationTemplateResponse, AWSError>;
71
79
  /**
72
80
  * Deletes the lifecycle policy associated with the specified repository.
73
81
  */
@@ -100,6 +108,14 @@ declare class ECR extends Service {
100
108
  * Deletes a repository. If the repository isn't empty, you must either delete the contents of the repository or use the force option to delete the repository and have Amazon ECR delete all of its contents on your behalf.
101
109
  */
102
110
  deleteRepository(callback?: (err: AWSError, data: ECR.Types.DeleteRepositoryResponse) => void): Request<ECR.Types.DeleteRepositoryResponse, AWSError>;
111
+ /**
112
+ * Deletes a repository creation template.
113
+ */
114
+ deleteRepositoryCreationTemplate(params: ECR.Types.DeleteRepositoryCreationTemplateRequest, callback?: (err: AWSError, data: ECR.Types.DeleteRepositoryCreationTemplateResponse) => void): Request<ECR.Types.DeleteRepositoryCreationTemplateResponse, AWSError>;
115
+ /**
116
+ * Deletes a repository creation template.
117
+ */
118
+ deleteRepositoryCreationTemplate(callback?: (err: AWSError, data: ECR.Types.DeleteRepositoryCreationTemplateResponse) => void): Request<ECR.Types.DeleteRepositoryCreationTemplateResponse, AWSError>;
103
119
  /**
104
120
  * Deletes the repository policy associated with the specified repository.
105
121
  */
@@ -156,6 +172,14 @@ declare class ECR extends Service {
156
172
  * Describes image repositories in a registry.
157
173
  */
158
174
  describeRepositories(callback?: (err: AWSError, data: ECR.Types.DescribeRepositoriesResponse) => void): Request<ECR.Types.DescribeRepositoriesResponse, AWSError>;
175
+ /**
176
+ * Returns details about the repository creation templates in a registry. The prefixes request parameter can be used to return the details for a specific repository creation template.
177
+ */
178
+ describeRepositoryCreationTemplates(params: ECR.Types.DescribeRepositoryCreationTemplatesRequest, callback?: (err: AWSError, data: ECR.Types.DescribeRepositoryCreationTemplatesResponse) => void): Request<ECR.Types.DescribeRepositoryCreationTemplatesResponse, AWSError>;
179
+ /**
180
+ * Returns details about the repository creation templates in a registry. The prefixes request parameter can be used to return the details for a specific repository creation template.
181
+ */
182
+ describeRepositoryCreationTemplates(callback?: (err: AWSError, data: ECR.Types.DescribeRepositoryCreationTemplatesResponse) => void): Request<ECR.Types.DescribeRepositoryCreationTemplatesResponse, AWSError>;
159
183
  /**
160
184
  * Retrieves an authorization token. An authorization token represents your IAM authentication credentials and can be used to access any Amazon ECR registry that your IAM principal has access to. The authorization token is valid for 12 hours. The authorizationToken returned is a base64 encoded string that can be decoded and used in a docker login command to authenticate to a registry. The CLI offers an get-login-password command that simplifies the login process. For more information, see Registry authentication in the Amazon Elastic Container Registry User Guide.
161
185
  */
@@ -285,11 +309,11 @@ declare class ECR extends Service {
285
309
  */
286
310
  putRegistryScanningConfiguration(callback?: (err: AWSError, data: ECR.Types.PutRegistryScanningConfigurationResponse) => void): Request<ECR.Types.PutRegistryScanningConfigurationResponse, AWSError>;
287
311
  /**
288
- * Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy.
312
+ * Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. For more information on the custom role for replication, see Creating an IAM role for replication. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy.
289
313
  */
290
314
  putReplicationConfiguration(params: ECR.Types.PutReplicationConfigurationRequest, callback?: (err: AWSError, data: ECR.Types.PutReplicationConfigurationResponse) => void): Request<ECR.Types.PutReplicationConfigurationResponse, AWSError>;
291
315
  /**
292
- * Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy.
316
+ * Creates or updates the replication configuration for a registry. The existing replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. For more information on the custom role for replication, see Creating an IAM role for replication. When configuring cross-account replication, the destination account must grant the source account permission to replicate. This permission is controlled using a registry permissions policy. For more information, see PutRegistryPolicy.
293
317
  */
294
318
  putReplicationConfiguration(callback?: (err: AWSError, data: ECR.Types.PutReplicationConfigurationResponse) => void): Request<ECR.Types.PutReplicationConfigurationResponse, AWSError>;
295
319
  /**
@@ -340,6 +364,14 @@ declare class ECR extends Service {
340
364
  * Updates an existing pull through cache rule.
341
365
  */
342
366
  updatePullThroughCacheRule(callback?: (err: AWSError, data: ECR.Types.UpdatePullThroughCacheRuleResponse) => void): Request<ECR.Types.UpdatePullThroughCacheRuleResponse, AWSError>;
367
+ /**
368
+ * Updates an existing repository creation template.
369
+ */
370
+ updateRepositoryCreationTemplate(params: ECR.Types.UpdateRepositoryCreationTemplateRequest, callback?: (err: AWSError, data: ECR.Types.UpdateRepositoryCreationTemplateResponse) => void): Request<ECR.Types.UpdateRepositoryCreationTemplateResponse, AWSError>;
371
+ /**
372
+ * Updates an existing repository creation template.
373
+ */
374
+ updateRepositoryCreationTemplate(callback?: (err: AWSError, data: ECR.Types.UpdateRepositoryCreationTemplateResponse) => void): Request<ECR.Types.UpdateRepositoryCreationTemplateResponse, AWSError>;
343
375
  /**
344
376
  * Uploads an image layer part to Amazon ECR. When an image is pushed, each new image layer is uploaded in parts. The maximum size of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per each new image layer part. This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.
345
377
  */
@@ -577,7 +609,7 @@ declare namespace ECR {
577
609
  */
578
610
  ecrRepositoryPrefix: PullThroughCacheRuleRepositoryPrefix;
579
611
  /**
580
- * The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - &lt;custom&gt;.azurecr.io GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com
612
+ * The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - &lt;custom&gt;.azurecr.io
581
613
  */
582
614
  upstreamRegistryUrl: Url;
583
615
  /**
@@ -619,6 +651,54 @@ declare namespace ECR {
619
651
  */
620
652
  credentialArn?: CredentialArn;
621
653
  }
654
+ export interface CreateRepositoryCreationTemplateRequest {
655
+ /**
656
+ * The repository namespace prefix to associate with the template. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. Similarly, a prefix of prod/team would apply to all repositories beginning with prod/team/. To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix. There is always an assumed / applied to the end of the prefix. If you specify ecr-public as the prefix, Amazon ECR treats that as ecr-public/. When using a pull through cache rule, the repository prefix you specify during rule creation is what you should specify as your repository creation template prefix as well.
657
+ */
658
+ prefix: Prefix;
659
+ /**
660
+ * A description for the repository creation template.
661
+ */
662
+ description?: RepositoryTemplateDescription;
663
+ /**
664
+ * The encryption configuration to use for repositories created using the template.
665
+ */
666
+ encryptionConfiguration?: EncryptionConfigurationForRepositoryCreationTemplate;
667
+ /**
668
+ * The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.
669
+ */
670
+ resourceTags?: TagList;
671
+ /**
672
+ * The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.
673
+ */
674
+ imageTagMutability?: ImageTagMutability;
675
+ /**
676
+ * The repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.
677
+ */
678
+ repositoryPolicy?: RepositoryPolicyText;
679
+ /**
680
+ * The lifecycle policy to use for repositories created using the template.
681
+ */
682
+ lifecyclePolicy?: LifecyclePolicyTextForRepositoryCreationTemplate;
683
+ /**
684
+ * A list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION
685
+ */
686
+ appliedFor: RCTAppliedForList;
687
+ /**
688
+ * The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring.
689
+ */
690
+ customRoleArn?: CustomRoleArn;
691
+ }
692
+ export interface CreateRepositoryCreationTemplateResponse {
693
+ /**
694
+ * The registry ID associated with the request.
695
+ */
696
+ registryId?: RegistryId;
697
+ /**
698
+ * The details of the repository creation template associated with the request.
699
+ */
700
+ repositoryCreationTemplate?: RepositoryCreationTemplate;
701
+ }
622
702
  export interface CreateRepositoryRequest {
623
703
  /**
624
704
  * The Amazon Web Services account ID associated with the registry to create the repository. If you do not specify a registry, the default registry is assumed.
@@ -653,6 +733,7 @@ declare namespace ECR {
653
733
  }
654
734
  export type CreationTimestamp = Date;
655
735
  export type CredentialArn = string;
736
+ export type CustomRoleArn = string;
656
737
  export interface CvssScore {
657
738
  /**
658
739
  * The base CVSS score used for the finding.
@@ -778,6 +859,22 @@ declare namespace ECR {
778
859
  */
779
860
  policyText?: RegistryPolicyText;
780
861
  }
862
+ export interface DeleteRepositoryCreationTemplateRequest {
863
+ /**
864
+ * The repository namespace prefix associated with the repository creation template.
865
+ */
866
+ prefix: Prefix;
867
+ }
868
+ export interface DeleteRepositoryCreationTemplateResponse {
869
+ /**
870
+ * The registry ID associated with the request.
871
+ */
872
+ registryId?: RegistryId;
873
+ /**
874
+ * The details of the repository creation template that was deleted.
875
+ */
876
+ repositoryCreationTemplate?: RepositoryCreationTemplate;
877
+ }
781
878
  export interface DeleteRepositoryPolicyRequest {
782
879
  /**
783
880
  * The Amazon Web Services account ID associated with the registry that contains the repository policy to delete. If you do not specify a registry, the default registry is assumed.
@@ -960,7 +1057,7 @@ declare namespace ECR {
960
1057
  }
961
1058
  export interface DescribeRegistryResponse {
962
1059
  /**
963
- * The ID of the registry.
1060
+ * The registry ID associated with the request.
964
1061
  */
965
1062
  registryId?: RegistryId;
966
1063
  /**
@@ -996,9 +1093,37 @@ declare namespace ECR {
996
1093
  */
997
1094
  nextToken?: NextToken;
998
1095
  }
1096
+ export interface DescribeRepositoryCreationTemplatesRequest {
1097
+ /**
1098
+ * The repository namespace prefixes associated with the repository creation templates to describe. If this value is not specified, all repository creation templates are returned.
1099
+ */
1100
+ prefixes?: PrefixList;
1101
+ /**
1102
+ * The nextToken value returned from a previous paginated DescribeRepositoryCreationTemplates request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
1103
+ */
1104
+ nextToken?: NextToken;
1105
+ /**
1106
+ * The maximum number of repository results returned by DescribeRepositoryCreationTemplatesRequest in paginated output. When this parameter is used, DescribeRepositoryCreationTemplatesRequest only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositoryCreationTemplatesRequest request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRepositoryCreationTemplatesRequest returns up to 100 results and a nextToken value, if applicable.
1107
+ */
1108
+ maxResults?: MaxResults;
1109
+ }
1110
+ export interface DescribeRepositoryCreationTemplatesResponse {
1111
+ /**
1112
+ * The registry ID associated with the request.
1113
+ */
1114
+ registryId?: RegistryId;
1115
+ /**
1116
+ * The details of the repository creation templates.
1117
+ */
1118
+ repositoryCreationTemplates?: RepositoryCreationTemplateList;
1119
+ /**
1120
+ * The nextToken value to include in a future DescribeRepositoryCreationTemplates request. When the results of a DescribeRepositoryCreationTemplates request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.
1121
+ */
1122
+ nextToken?: NextToken;
1123
+ }
999
1124
  export interface EncryptionConfiguration {
1000
1125
  /**
1001
- * The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide.
1126
+ * The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide.
1002
1127
  */
1003
1128
  encryptionType: EncryptionType;
1004
1129
  /**
@@ -1006,6 +1131,16 @@ declare namespace ECR {
1006
1131
  */
1007
1132
  kmsKey?: KmsKey;
1008
1133
  }
1134
+ export interface EncryptionConfigurationForRepositoryCreationTemplate {
1135
+ /**
1136
+ * The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. For more information, see Protecting data using server-side encryption with an KMS key stored in Key Management Service (SSE-KMS) in the Amazon Simple Storage Service Console Developer Guide. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide.
1137
+ */
1138
+ encryptionType: EncryptionType;
1139
+ /**
1140
+ * If you use the KMS encryption type, specify the KMS key to use for encryption. The full ARN of the KMS key must be specified. The key must exist in the same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS key for Amazon ECR will be used.
1141
+ */
1142
+ kmsKey?: KmsKeyForRepositoryCreationTemplate;
1143
+ }
1009
1144
  export type EncryptionType = "AES256"|"KMS"|string;
1010
1145
  export interface EnhancedImageScanFinding {
1011
1146
  /**
@@ -1205,7 +1340,7 @@ declare namespace ECR {
1205
1340
  }
1206
1341
  export interface GetRegistryPolicyResponse {
1207
1342
  /**
1208
- * The ID of the registry.
1343
+ * The registry ID associated with the request.
1209
1344
  */
1210
1345
  registryId?: RegistryId;
1211
1346
  /**
@@ -1217,7 +1352,7 @@ declare namespace ECR {
1217
1352
  }
1218
1353
  export interface GetRegistryScanningConfigurationResponse {
1219
1354
  /**
1220
- * The ID of the registry.
1355
+ * The registry ID associated with the request.
1221
1356
  */
1222
1357
  registryId?: RegistryId;
1223
1358
  /**
@@ -1472,6 +1607,7 @@ declare namespace ECR {
1472
1607
  }
1473
1608
  export type IsPTCRuleValid = boolean;
1474
1609
  export type KmsKey = string;
1610
+ export type KmsKeyForRepositoryCreationTemplate = string;
1475
1611
  export interface Layer {
1476
1612
  /**
1477
1613
  * The sha256 digest of the image layer.
@@ -1557,6 +1693,7 @@ declare namespace ECR {
1557
1693
  }
1558
1694
  export type LifecyclePolicyRulePriority = number;
1559
1695
  export type LifecyclePolicyText = string;
1696
+ export type LifecyclePolicyTextForRepositoryCreationTemplate = string;
1560
1697
  export type LifecyclePreviewMaxResults = number;
1561
1698
  export interface ListImagesFilter {
1562
1699
  /**
@@ -1659,6 +1796,8 @@ declare namespace ECR {
1659
1796
  }
1660
1797
  export type PartSize = number;
1661
1798
  export type Platform = string;
1799
+ export type Prefix = string;
1800
+ export type PrefixList = Prefix[];
1662
1801
  export type ProxyEndpoint = string;
1663
1802
  export interface PullThroughCacheRule {
1664
1803
  /**
@@ -1818,7 +1957,7 @@ declare namespace ECR {
1818
1957
  }
1819
1958
  export interface PutRegistryPolicyResponse {
1820
1959
  /**
1821
- * The registry ID.
1960
+ * The registry ID associated with the request.
1822
1961
  */
1823
1962
  registryId?: RegistryId;
1824
1963
  /**
@@ -1854,6 +1993,8 @@ declare namespace ECR {
1854
1993
  */
1855
1994
  replicationConfiguration?: ReplicationConfiguration;
1856
1995
  }
1996
+ export type RCTAppliedFor = "REPLICATION"|"PULL_THROUGH_CACHE"|string;
1997
+ export type RCTAppliedForList = RCTAppliedFor[];
1857
1998
  export type Reason = string;
1858
1999
  export interface Recommendation {
1859
2000
  /**
@@ -1962,6 +2103,53 @@ declare namespace ECR {
1962
2103
  */
1963
2104
  encryptionConfiguration?: EncryptionConfiguration;
1964
2105
  }
2106
+ export interface RepositoryCreationTemplate {
2107
+ /**
2108
+ * The repository namespace prefix associated with the repository creation template.
2109
+ */
2110
+ prefix?: Prefix;
2111
+ /**
2112
+ * The description associated with the repository creation template.
2113
+ */
2114
+ description?: RepositoryTemplateDescription;
2115
+ /**
2116
+ * The encryption configuration associated with the repository creation template.
2117
+ */
2118
+ encryptionConfiguration?: EncryptionConfigurationForRepositoryCreationTemplate;
2119
+ /**
2120
+ * The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.
2121
+ */
2122
+ resourceTags?: TagList;
2123
+ /**
2124
+ * The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.
2125
+ */
2126
+ imageTagMutability?: ImageTagMutability;
2127
+ /**
2128
+ * he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.
2129
+ */
2130
+ repositoryPolicy?: RepositoryPolicyText;
2131
+ /**
2132
+ * The lifecycle policy to use for repositories created using the template.
2133
+ */
2134
+ lifecyclePolicy?: LifecyclePolicyTextForRepositoryCreationTemplate;
2135
+ /**
2136
+ * A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION
2137
+ */
2138
+ appliedFor?: RCTAppliedForList;
2139
+ /**
2140
+ * The ARN of the role to be assumed by Amazon ECR.
2141
+ */
2142
+ customRoleArn?: CustomRoleArn;
2143
+ /**
2144
+ * The date and time, in JavaScript date format, when the repository creation template was created.
2145
+ */
2146
+ createdAt?: _Date;
2147
+ /**
2148
+ * The date and time, in JavaScript date format, when the repository creation template was last updated.
2149
+ */
2150
+ updatedAt?: _Date;
2151
+ }
2152
+ export type RepositoryCreationTemplateList = RepositoryCreationTemplate[];
1965
2153
  export interface RepositoryFilter {
1966
2154
  /**
1967
2155
  * The repository filter details. When the PREFIX_MATCH filter type is specified, this value is required and should be the repository name prefix to configure replication for.
@@ -2017,6 +2205,7 @@ declare namespace ECR {
2017
2205
  }
2018
2206
  export type RepositoryScanningConfigurationFailureList = RepositoryScanningConfigurationFailure[];
2019
2207
  export type RepositoryScanningConfigurationList = RepositoryScanningConfiguration[];
2208
+ export type RepositoryTemplateDescription = string;
2020
2209
  export interface Resource {
2021
2210
  /**
2022
2211
  * An object that contains details about the resource involved in a finding.
@@ -2242,6 +2431,51 @@ declare namespace ECR {
2242
2431
  */
2243
2432
  credentialArn?: CredentialArn;
2244
2433
  }
2434
+ export interface UpdateRepositoryCreationTemplateRequest {
2435
+ /**
2436
+ * The repository namespace prefix that matches an existing repository creation template in the registry. All repositories created using this namespace prefix will have the settings defined in this template applied. For example, a prefix of prod would apply to all repositories beginning with prod/. This includes a repository named prod/team1 as well as a repository named prod/repository1. To apply a template to all repositories in your registry that don't have an associated creation template, you can use ROOT as the prefix.
2437
+ */
2438
+ prefix: Prefix;
2439
+ /**
2440
+ * A description for the repository creation template.
2441
+ */
2442
+ description?: RepositoryTemplateDescription;
2443
+ encryptionConfiguration?: EncryptionConfigurationForRepositoryCreationTemplate;
2444
+ /**
2445
+ * The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.
2446
+ */
2447
+ resourceTags?: TagList;
2448
+ /**
2449
+ * Updates the tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.
2450
+ */
2451
+ imageTagMutability?: ImageTagMutability;
2452
+ /**
2453
+ * Updates the repository policy created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.
2454
+ */
2455
+ repositoryPolicy?: RepositoryPolicyText;
2456
+ /**
2457
+ * Updates the lifecycle policy associated with the specified repository creation template.
2458
+ */
2459
+ lifecyclePolicy?: LifecyclePolicyTextForRepositoryCreationTemplate;
2460
+ /**
2461
+ * Updates the list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION
2462
+ */
2463
+ appliedFor?: RCTAppliedForList;
2464
+ /**
2465
+ * The ARN of the role to be assumed by Amazon ECR. This role must be in the same account as the registry that you are configuring.
2466
+ */
2467
+ customRoleArn?: CustomRoleArn;
2468
+ }
2469
+ export interface UpdateRepositoryCreationTemplateResponse {
2470
+ /**
2471
+ * The registry ID associated with the request.
2472
+ */
2473
+ registryId?: RegistryId;
2474
+ /**
2475
+ * The details of the repository creation template associated with the request.
2476
+ */
2477
+ repositoryCreationTemplate?: RepositoryCreationTemplate;
2478
+ }
2245
2479
  export type UpdatedTimestamp = Date;
2246
2480
  export type UploadId = string;
2247
2481
  export interface UploadLayerPartRequest {