cdk-lambda-subminute 2.0.413 → 2.0.415
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +3 -3
- package/lib/cdk-lambda-subminute.js +3 -3
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/batch-2016-08-10.min.json +66 -45
- package/node_modules/aws-sdk/apis/bedrock-agent-2023-06-05.min.json +392 -385
- package/node_modules/aws-sdk/apis/bedrock-agent-runtime-2023-07-26.min.json +107 -13
- package/node_modules/aws-sdk/apis/ce-2017-10-25.min.json +52 -0
- package/node_modules/aws-sdk/apis/ce-2017-10-25.paginators.json +5 -0
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +1 -3
- package/node_modules/aws-sdk/apis/elasticache-2015-02-02.min.json +6 -4
- package/node_modules/aws-sdk/apis/finspace-2021-03-12.min.json +60 -27
- package/node_modules/aws-sdk/clients/batch.d.ts +19 -0
- package/node_modules/aws-sdk/clients/bedrockagent.d.ts +1423 -278
- package/node_modules/aws-sdk/clients/bedrockagentruntime.d.ts +102 -9
- package/node_modules/aws-sdk/clients/costexplorer.d.ts +72 -0
- package/node_modules/aws-sdk/clients/ec2.d.ts +7 -1
- package/node_modules/aws-sdk/clients/ecs.d.ts +10 -10
- package/node_modules/aws-sdk/clients/elasticache.d.ts +10 -2
- package/node_modules/aws-sdk/clients/finspace.d.ts +31 -2
- package/node_modules/aws-sdk/clients/secretsmanager.d.ts +1 -1
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +11 -11
- package/node_modules/aws-sdk/dist/aws-sdk.js +71 -10
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +102 -102
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/lib/region_config_data.json +4 -0
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +3 -3
@@ -29,11 +29,11 @@ declare class BedrockAgentRuntime extends Service {
|
|
29
29
|
*/
|
30
30
|
retrieve(callback?: (err: AWSError, data: BedrockAgentRuntime.Types.RetrieveResponse) => void): Request<BedrockAgentRuntime.Types.RetrieveResponse, AWSError>;
|
31
31
|
/**
|
32
|
-
* Queries a knowledge base and generates responses based on the retrieved results. The response cites
|
32
|
+
* Queries a knowledge base and generates responses based on the retrieved results. The response only cites sources that are relevant to the query.
|
33
33
|
*/
|
34
34
|
retrieveAndGenerate(params: BedrockAgentRuntime.Types.RetrieveAndGenerateRequest, callback?: (err: AWSError, data: BedrockAgentRuntime.Types.RetrieveAndGenerateResponse) => void): Request<BedrockAgentRuntime.Types.RetrieveAndGenerateResponse, AWSError>;
|
35
35
|
/**
|
36
|
-
* Queries a knowledge base and generates responses based on the retrieved results. The response cites
|
36
|
+
* Queries a knowledge base and generates responses based on the retrieved results. The response only cites sources that are relevant to the query.
|
37
37
|
*/
|
38
38
|
retrieveAndGenerate(callback?: (err: AWSError, data: BedrockAgentRuntime.Types.RetrieveAndGenerateResponse) => void): Request<BedrockAgentRuntime.Types.RetrieveAndGenerateResponse, AWSError>;
|
39
39
|
}
|
@@ -124,6 +124,19 @@ declare namespace BedrockAgentRuntime {
|
|
124
124
|
*/
|
125
125
|
traceId?: TraceId;
|
126
126
|
}
|
127
|
+
export interface FilterAttribute {
|
128
|
+
/**
|
129
|
+
* The name that the metadata attribute must match.
|
130
|
+
*/
|
131
|
+
key: FilterKey;
|
132
|
+
/**
|
133
|
+
* The value to whcih to compare the value of the metadata attribute.
|
134
|
+
*/
|
135
|
+
value: FilterValue;
|
136
|
+
}
|
137
|
+
export type FilterKey = string;
|
138
|
+
export interface FilterValue {
|
139
|
+
}
|
127
140
|
export interface FinalResponse {
|
128
141
|
/**
|
129
142
|
* The text in the response to the user.
|
@@ -137,6 +150,12 @@ declare namespace BedrockAgentRuntime {
|
|
137
150
|
*/
|
138
151
|
textResponsePart?: TextResponsePart;
|
139
152
|
}
|
153
|
+
export interface GenerationConfiguration {
|
154
|
+
/**
|
155
|
+
* Contains the template for the prompt that's sent to the model for response generation.
|
156
|
+
*/
|
157
|
+
promptTemplate?: PromptTemplate;
|
158
|
+
}
|
140
159
|
export interface InferenceConfiguration {
|
141
160
|
/**
|
142
161
|
* The maximum number of tokens allowed in the generated response.
|
@@ -208,7 +227,7 @@ declare namespace BedrockAgentRuntime {
|
|
208
227
|
*/
|
209
228
|
sessionId: SessionId;
|
210
229
|
/**
|
211
|
-
* Contains parameters that specify various attributes of the session.
|
230
|
+
* Contains parameters that specify various attributes of the session. For more information, see Control session context.
|
212
231
|
*/
|
213
232
|
sessionState?: SessionState;
|
214
233
|
}
|
@@ -254,7 +273,7 @@ declare namespace BedrockAgentRuntime {
|
|
254
273
|
export type KnowledgeBaseQueryTextString = string;
|
255
274
|
export interface KnowledgeBaseRetrievalConfiguration {
|
256
275
|
/**
|
257
|
-
* Contains details about how the results from the vector search should be returned.
|
276
|
+
* Contains details about how the results from the vector search should be returned. For more information, see Query configurations.
|
258
277
|
*/
|
259
278
|
vectorSearchConfiguration: KnowledgeBaseVectorSearchConfiguration;
|
260
279
|
}
|
@@ -267,6 +286,10 @@ declare namespace BedrockAgentRuntime {
|
|
267
286
|
* Contains information about the location of the data source.
|
268
287
|
*/
|
269
288
|
location?: RetrievalResultLocation;
|
289
|
+
/**
|
290
|
+
* Contains metadata attributes and their values for the file in the data source. For more information, see Metadata and filtering.
|
291
|
+
*/
|
292
|
+
metadata?: RetrievalResultMetadata;
|
270
293
|
/**
|
271
294
|
* The level of relevance of the result to the query.
|
272
295
|
*/
|
@@ -274,6 +297,10 @@ declare namespace BedrockAgentRuntime {
|
|
274
297
|
}
|
275
298
|
export type KnowledgeBaseRetrievalResults = KnowledgeBaseRetrievalResult[];
|
276
299
|
export interface KnowledgeBaseRetrieveAndGenerateConfiguration {
|
300
|
+
/**
|
301
|
+
* Contains configurations for response generation based on the knowwledge base query results.
|
302
|
+
*/
|
303
|
+
generationConfiguration?: GenerationConfiguration;
|
277
304
|
/**
|
278
305
|
* The unique identifier of the knowledge base that is queried and the foundation model used for generation.
|
279
306
|
*/
|
@@ -289,7 +316,11 @@ declare namespace BedrockAgentRuntime {
|
|
289
316
|
}
|
290
317
|
export interface KnowledgeBaseVectorSearchConfiguration {
|
291
318
|
/**
|
292
|
-
*
|
319
|
+
* Specifies the filters to use on the metadata in the knowledge base data sources before returning results. For more information, see Query configurations.
|
320
|
+
*/
|
321
|
+
filter?: RetrievalFilter;
|
322
|
+
/**
|
323
|
+
* The number of source chunks to retrieve.
|
293
324
|
*/
|
294
325
|
numberOfResults?: KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger;
|
295
326
|
/**
|
@@ -461,6 +492,12 @@ declare namespace BedrockAgentRuntime {
|
|
461
492
|
modelInvocationOutput?: PreProcessingModelInvocationOutput;
|
462
493
|
}
|
463
494
|
export type PromptSessionAttributesMap = {[key: string]: String};
|
495
|
+
export interface PromptTemplate {
|
496
|
+
/**
|
497
|
+
* The template for the prompt that's sent to the model for response generation. You can include prompt placeholders, which become replaced before the prompt is sent to the model to provide instructions and context to the model. In addition, you can include XML tags to delineate meaningful sections of the prompt template. For more information, see the following resources: Knowledge base prompt templates Use XML tags with Anthropic Claude models
|
498
|
+
*/
|
499
|
+
textPromptTemplate?: TextPromptTemplate;
|
500
|
+
}
|
464
501
|
export type PromptText = string;
|
465
502
|
export type PromptType = "PRE_PROCESSING"|"ORCHESTRATION"|"KNOWLEDGE_BASE_RESPONSE_GENERATION"|"POST_PROCESSING"|string;
|
466
503
|
export interface Rationale {
|
@@ -494,6 +531,53 @@ declare namespace BedrockAgentRuntime {
|
|
494
531
|
message?: NonBlankString;
|
495
532
|
}
|
496
533
|
export type ResponseStream = EventStream<{accessDeniedException?:AccessDeniedException,badGatewayException?:BadGatewayException,chunk?:PayloadPart,conflictException?:ConflictException,dependencyFailedException?:DependencyFailedException,internalServerException?:InternalServerException,resourceNotFoundException?:ResourceNotFoundException,serviceQuotaExceededException?:ServiceQuotaExceededException,throttlingException?:ThrottlingException,trace?:TracePart,validationException?:ValidationException}>;
|
534
|
+
export interface RetrievalFilter {
|
535
|
+
/**
|
536
|
+
* Knowledge base data sources whose metadata attributes fulfill all the filter conditions inside this list are returned.
|
537
|
+
*/
|
538
|
+
andAll?: RetrievalFilterList;
|
539
|
+
/**
|
540
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value matches the value in this object are returned.
|
541
|
+
*/
|
542
|
+
equals?: FilterAttribute;
|
543
|
+
/**
|
544
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than the value in this object are returned.
|
545
|
+
*/
|
546
|
+
greaterThan?: FilterAttribute;
|
547
|
+
/**
|
548
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object are returned.
|
549
|
+
*/
|
550
|
+
greaterThanOrEquals?: FilterAttribute;
|
551
|
+
/**
|
552
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object are returned.
|
553
|
+
*/
|
554
|
+
in?: FilterAttribute;
|
555
|
+
/**
|
556
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than the value in this object are returned.
|
557
|
+
*/
|
558
|
+
lessThan?: FilterAttribute;
|
559
|
+
/**
|
560
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object are returned.
|
561
|
+
*/
|
562
|
+
lessThanOrEquals?: FilterAttribute;
|
563
|
+
/**
|
564
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned.
|
565
|
+
*/
|
566
|
+
notEquals?: FilterAttribute;
|
567
|
+
/**
|
568
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object are returned.
|
569
|
+
*/
|
570
|
+
notIn?: FilterAttribute;
|
571
|
+
/**
|
572
|
+
* Knowledge base data sources whose metadata attributes fulfill at least one of the filter conditions inside this list are returned.
|
573
|
+
*/
|
574
|
+
orAll?: RetrievalFilterList;
|
575
|
+
/**
|
576
|
+
* Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value starts with the value in this object are returned. This filter is currently only supported for Amazon OpenSearch Serverless vector stores.
|
577
|
+
*/
|
578
|
+
startsWith?: FilterAttribute;
|
579
|
+
}
|
580
|
+
export type RetrievalFilterList = RetrievalFilter[];
|
497
581
|
export interface RetrievalResultContent {
|
498
582
|
/**
|
499
583
|
* The cited text from the data source.
|
@@ -511,6 +595,10 @@ declare namespace BedrockAgentRuntime {
|
|
511
595
|
type: RetrievalResultLocationType;
|
512
596
|
}
|
513
597
|
export type RetrievalResultLocationType = "S3"|string;
|
598
|
+
export type RetrievalResultMetadata = {[key: string]: RetrievalResultMetadataValue};
|
599
|
+
export type RetrievalResultMetadataKey = string;
|
600
|
+
export interface RetrievalResultMetadataValue {
|
601
|
+
}
|
514
602
|
export interface RetrievalResultS3Location {
|
515
603
|
/**
|
516
604
|
* The S3 URI of the data source.
|
@@ -542,11 +630,11 @@ declare namespace BedrockAgentRuntime {
|
|
542
630
|
}
|
543
631
|
export interface RetrieveAndGenerateRequest {
|
544
632
|
/**
|
545
|
-
* Contains the query made to the knowledge base.
|
633
|
+
* Contains the query to be made to the knowledge base.
|
546
634
|
*/
|
547
635
|
input: RetrieveAndGenerateInput;
|
548
636
|
/**
|
549
|
-
* Contains
|
637
|
+
* Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
|
550
638
|
*/
|
551
639
|
retrieveAndGenerateConfiguration?: RetrieveAndGenerateConfiguration;
|
552
640
|
/**
|
@@ -589,11 +677,11 @@ declare namespace BedrockAgentRuntime {
|
|
589
677
|
*/
|
590
678
|
nextToken?: NextToken;
|
591
679
|
/**
|
592
|
-
* Contains
|
680
|
+
* Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
|
593
681
|
*/
|
594
682
|
retrievalConfiguration?: KnowledgeBaseRetrievalConfiguration;
|
595
683
|
/**
|
596
|
-
*
|
684
|
+
* Contains the query to send the knowledge base.
|
597
685
|
*/
|
598
686
|
retrievalQuery: KnowledgeBaseQuery;
|
599
687
|
}
|
@@ -616,6 +704,10 @@ declare namespace BedrockAgentRuntime {
|
|
616
704
|
* Contains information about the location of the data source.
|
617
705
|
*/
|
618
706
|
location?: RetrievalResultLocation;
|
707
|
+
/**
|
708
|
+
* Contains metadata attributes and their values for the file in the data source. For more information, see Metadata and filtering.
|
709
|
+
*/
|
710
|
+
metadata?: RetrievalResultMetadata;
|
619
711
|
}
|
620
712
|
export type RetrievedReferences = RetrievedReference[];
|
621
713
|
export type SearchType = "HYBRID"|"SEMANTIC"|string;
|
@@ -650,6 +742,7 @@ declare namespace BedrockAgentRuntime {
|
|
650
742
|
export type StopSequences = String[];
|
651
743
|
export type String = string;
|
652
744
|
export type Temperature = number;
|
745
|
+
export type TextPromptTemplate = string;
|
653
746
|
export interface TextResponsePart {
|
654
747
|
/**
|
655
748
|
* Contains information about where the text with a citation begins and ends in the generated output.
|
@@ -227,6 +227,14 @@ declare class CostExplorer extends Service {
|
|
227
227
|
* Retrieves a forecast for how much Amazon Web Services predicts that you will use over the forecast time period that you select, based on your past usage.
|
228
228
|
*/
|
229
229
|
getUsageForecast(callback?: (err: AWSError, data: CostExplorer.Types.GetUsageForecastResponse) => void): Request<CostExplorer.Types.GetUsageForecastResponse, AWSError>;
|
230
|
+
/**
|
231
|
+
* Retrieves a list of your historical cost allocation tag backfill requests.
|
232
|
+
*/
|
233
|
+
listCostAllocationTagBackfillHistory(params: CostExplorer.Types.ListCostAllocationTagBackfillHistoryRequest, callback?: (err: AWSError, data: CostExplorer.Types.ListCostAllocationTagBackfillHistoryResponse) => void): Request<CostExplorer.Types.ListCostAllocationTagBackfillHistoryResponse, AWSError>;
|
234
|
+
/**
|
235
|
+
* Retrieves a list of your historical cost allocation tag backfill requests.
|
236
|
+
*/
|
237
|
+
listCostAllocationTagBackfillHistory(callback?: (err: AWSError, data: CostExplorer.Types.ListCostAllocationTagBackfillHistoryResponse) => void): Request<CostExplorer.Types.ListCostAllocationTagBackfillHistoryResponse, AWSError>;
|
230
238
|
/**
|
231
239
|
* Get a list of cost allocation tags. All inputs in the API are optional and serve as filters. By default, all cost allocation tags are returned.
|
232
240
|
*/
|
@@ -267,6 +275,14 @@ declare class CostExplorer extends Service {
|
|
267
275
|
* Modifies the feedback property of a given cost anomaly.
|
268
276
|
*/
|
269
277
|
provideAnomalyFeedback(callback?: (err: AWSError, data: CostExplorer.Types.ProvideAnomalyFeedbackResponse) => void): Request<CostExplorer.Types.ProvideAnomalyFeedbackResponse, AWSError>;
|
278
|
+
/**
|
279
|
+
* Request a cost allocation tag backfill. This will backfill the activation status (either active or inactive) for all tag keys from para:BackfillFrom up to the when this request is made. You can request a backfill once every 24 hours.
|
280
|
+
*/
|
281
|
+
startCostAllocationTagBackfill(params: CostExplorer.Types.StartCostAllocationTagBackfillRequest, callback?: (err: AWSError, data: CostExplorer.Types.StartCostAllocationTagBackfillResponse) => void): Request<CostExplorer.Types.StartCostAllocationTagBackfillResponse, AWSError>;
|
282
|
+
/**
|
283
|
+
* Request a cost allocation tag backfill. This will backfill the activation status (either active or inactive) for all tag keys from para:BackfillFrom up to the when this request is made. You can request a backfill once every 24 hours.
|
284
|
+
*/
|
285
|
+
startCostAllocationTagBackfill(callback?: (err: AWSError, data: CostExplorer.Types.StartCostAllocationTagBackfillResponse) => void): Request<CostExplorer.Types.StartCostAllocationTagBackfillResponse, AWSError>;
|
270
286
|
/**
|
271
287
|
* Requests a Savings Plans recommendation generation. This enables you to calculate a fresh set of Savings Plans recommendations that takes your latest usage data and current Savings Plans inventory into account. You can refresh Savings Plans recommendations up to three times daily for a consolidated billing family. StartSavingsPlansPurchaseRecommendationGeneration has no request syntax because no input parameters are needed to support this operation.
|
272
288
|
*/
|
@@ -489,6 +505,30 @@ declare namespace CostExplorer {
|
|
489
505
|
*/
|
490
506
|
LastUsedDate?: ZonedDateTime;
|
491
507
|
}
|
508
|
+
export interface CostAllocationTagBackfillRequest {
|
509
|
+
/**
|
510
|
+
* The date the backfill starts from.
|
511
|
+
*/
|
512
|
+
BackfillFrom?: ZonedDateTime;
|
513
|
+
/**
|
514
|
+
* The time when the backfill was requested.
|
515
|
+
*/
|
516
|
+
RequestedAt?: ZonedDateTime;
|
517
|
+
/**
|
518
|
+
* The backfill completion time.
|
519
|
+
*/
|
520
|
+
CompletedAt?: ZonedDateTime;
|
521
|
+
/**
|
522
|
+
* The status of the cost allocation tag backfill request.
|
523
|
+
*/
|
524
|
+
BackfillStatus?: CostAllocationTagBackfillStatus;
|
525
|
+
/**
|
526
|
+
* The time when the backfill status was last updated.
|
527
|
+
*/
|
528
|
+
LastUpdatedAt?: ZonedDateTime;
|
529
|
+
}
|
530
|
+
export type CostAllocationTagBackfillRequestList = CostAllocationTagBackfillRequest[];
|
531
|
+
export type CostAllocationTagBackfillStatus = "SUCCEEDED"|"PROCESSING"|"FAILED"|string;
|
492
532
|
export type CostAllocationTagKeyList = TagKey[];
|
493
533
|
export type CostAllocationTagList = CostAllocationTag[];
|
494
534
|
export type CostAllocationTagStatus = "Active"|"Inactive"|string;
|
@@ -2027,6 +2067,26 @@ declare namespace CostExplorer {
|
|
2027
2067
|
}
|
2028
2068
|
export type Key = string;
|
2029
2069
|
export type Keys = Key[];
|
2070
|
+
export interface ListCostAllocationTagBackfillHistoryRequest {
|
2071
|
+
/**
|
2072
|
+
* The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
|
2073
|
+
*/
|
2074
|
+
NextToken?: NextPageToken;
|
2075
|
+
/**
|
2076
|
+
* The maximum number of objects that are returned for this request.
|
2077
|
+
*/
|
2078
|
+
MaxResults?: CostAllocationTagsMaxResults;
|
2079
|
+
}
|
2080
|
+
export interface ListCostAllocationTagBackfillHistoryResponse {
|
2081
|
+
/**
|
2082
|
+
* The list of historical cost allocation tag backfill requests.
|
2083
|
+
*/
|
2084
|
+
BackfillRequests?: CostAllocationTagBackfillRequestList;
|
2085
|
+
/**
|
2086
|
+
* The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
|
2087
|
+
*/
|
2088
|
+
NextToken?: NextPageToken;
|
2089
|
+
}
|
2030
2090
|
export interface ListCostAllocationTagsRequest {
|
2031
2091
|
/**
|
2032
2092
|
* The status of cost allocation tag keys that are returned for this request.
|
@@ -3127,6 +3187,18 @@ declare namespace CostExplorer {
|
|
3127
3187
|
export type SortDefinitionKey = string;
|
3128
3188
|
export type SortDefinitions = SortDefinition[];
|
3129
3189
|
export type SortOrder = "ASCENDING"|"DESCENDING"|string;
|
3190
|
+
export interface StartCostAllocationTagBackfillRequest {
|
3191
|
+
/**
|
3192
|
+
* The date you want the backfill to start from. The date can only be a first day of the month (a billing start date). Dates can't precede the previous twelve months, or in the future.
|
3193
|
+
*/
|
3194
|
+
BackfillFrom: ZonedDateTime;
|
3195
|
+
}
|
3196
|
+
export interface StartCostAllocationTagBackfillResponse {
|
3197
|
+
/**
|
3198
|
+
* An object containing detailed metadata of your new backfill request.
|
3199
|
+
*/
|
3200
|
+
BackfillRequest?: CostAllocationTagBackfillRequest;
|
3201
|
+
}
|
3130
3202
|
export interface StartSavingsPlansPurchaseRecommendationGenerationRequest {
|
3131
3203
|
}
|
3132
3204
|
export interface StartSavingsPlansPurchaseRecommendationGenerationResponse {
|
@@ -31149,7 +31149,13 @@ declare namespace EC2 {
|
|
31149
31149
|
TotalNeuronDeviceMemoryInMiB?: TotalNeuronMemory;
|
31150
31150
|
}
|
31151
31151
|
export interface NewDhcpConfiguration {
|
31152
|
+
/**
|
31153
|
+
* The name of a DHCP option.
|
31154
|
+
*/
|
31152
31155
|
Key?: String;
|
31156
|
+
/**
|
31157
|
+
* The values for the DHCP option.
|
31158
|
+
*/
|
31153
31159
|
Values?: ValueStringList;
|
31154
31160
|
}
|
31155
31161
|
export type NewDhcpConfigurationList = NewDhcpConfiguration[];
|
@@ -35104,7 +35110,7 @@ declare namespace EC2 {
|
|
35104
35110
|
*/
|
35105
35111
|
Filters: FilterList;
|
35106
35112
|
/**
|
35107
|
-
* The maximum number of routes to return.
|
35113
|
+
* The maximum number of routes to return.
|
35108
35114
|
*/
|
35109
35115
|
MaxResults?: TransitGatewayMaxResults;
|
35110
35116
|
/**
|
@@ -29,19 +29,19 @@ declare class ECS extends Service {
|
|
29
29
|
*/
|
30
30
|
createCluster(callback?: (err: AWSError, data: ECS.Types.CreateClusterResponse) => void): Request<ECS.Types.CreateClusterResponse, AWSError>;
|
31
31
|
/**
|
32
|
-
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
|
32
|
+
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
|
33
33
|
*/
|
34
34
|
createService(params: ECS.Types.CreateServiceRequest, callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
|
35
35
|
/**
|
36
|
-
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
|
36
|
+
* Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
|
37
37
|
*/
|
38
38
|
createService(callback?: (err: AWSError, data: ECS.Types.CreateServiceResponse) => void): Request<ECS.Types.CreateServiceResponse, AWSError>;
|
39
39
|
/**
|
40
|
-
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
|
40
|
+
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
|
41
41
|
*/
|
42
42
|
createTaskSet(params: ECS.Types.CreateTaskSetRequest, callback?: (err: AWSError, data: ECS.Types.CreateTaskSetResponse) => void): Request<ECS.Types.CreateTaskSetResponse, AWSError>;
|
43
43
|
/**
|
44
|
-
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
|
44
|
+
* Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
|
45
45
|
*/
|
46
46
|
createTaskSet(callback?: (err: AWSError, data: ECS.Types.CreateTaskSetResponse) => void): Request<ECS.Types.CreateTaskSetResponse, AWSError>;
|
47
47
|
/**
|
@@ -325,19 +325,19 @@ declare class ECS extends Service {
|
|
325
325
|
*/
|
326
326
|
registerTaskDefinition(callback?: (err: AWSError, data: ECS.Types.RegisterTaskDefinitionResponse) => void): Request<ECS.Types.RegisterTaskDefinitionResponse, AWSError>;
|
327
327
|
/**
|
328
|
-
* Starts a new task using the specified task definition.
|
328
|
+
* Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time. Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
|
329
329
|
*/
|
330
330
|
runTask(params: ECS.Types.RunTaskRequest, callback?: (err: AWSError, data: ECS.Types.RunTaskResponse) => void): Request<ECS.Types.RunTaskResponse, AWSError>;
|
331
331
|
/**
|
332
|
-
* Starts a new task using the specified task definition.
|
332
|
+
* Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time. Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
|
333
333
|
*/
|
334
334
|
runTask(callback?: (err: AWSError, data: ECS.Types.RunTaskResponse) => void): Request<ECS.Types.RunTaskResponse, AWSError>;
|
335
335
|
/**
|
336
|
-
* Starts a new task from the specified task definition on the specified container instance or instances.
|
336
|
+
* Starts a new task from the specified task definition on the specified container instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
|
337
337
|
*/
|
338
338
|
startTask(params: ECS.Types.StartTaskRequest, callback?: (err: AWSError, data: ECS.Types.StartTaskResponse) => void): Request<ECS.Types.StartTaskResponse, AWSError>;
|
339
339
|
/**
|
340
|
-
* Starts a new task from the specified task definition on the specified container instance or instances.
|
340
|
+
* Starts a new task from the specified task definition on the specified container instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
|
341
341
|
*/
|
342
342
|
startTask(callback?: (err: AWSError, data: ECS.Types.StartTaskResponse) => void): Request<ECS.Types.StartTaskResponse, AWSError>;
|
343
343
|
/**
|
@@ -429,11 +429,11 @@ declare class ECS extends Service {
|
|
429
429
|
*/
|
430
430
|
updateContainerInstancesState(callback?: (err: AWSError, data: ECS.Types.UpdateContainerInstancesStateResponse) => void): Request<ECS.Types.UpdateContainerInstancesStateResponse, AWSError>;
|
431
431
|
/**
|
432
|
-
* Modifies the parameters of a service.
|
432
|
+
* Modifies the parameters of a service. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role .
|
433
433
|
*/
|
434
434
|
updateService(params: ECS.Types.UpdateServiceRequest, callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
|
435
435
|
/**
|
436
|
-
* Modifies the parameters of a service.
|
436
|
+
* Modifies the parameters of a service. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic. Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service. You must have a service-linked role when you update any of the following service properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role .
|
437
437
|
*/
|
438
438
|
updateService(callback?: (err: AWSError, data: ECS.Types.UpdateServiceResponse) => void): Request<ECS.Types.UpdateServiceResponse, AWSError>;
|
439
439
|
/**
|
@@ -1827,7 +1827,11 @@ declare namespace ElastiCache {
|
|
1827
1827
|
/**
|
1828
1828
|
* The upper limit for data storage the cache is set to use.
|
1829
1829
|
*/
|
1830
|
-
Maximum
|
1830
|
+
Maximum?: IntegerOptional;
|
1831
|
+
/**
|
1832
|
+
* The lower limit for data storage the cache is set to use.
|
1833
|
+
*/
|
1834
|
+
Minimum?: IntegerOptional;
|
1831
1835
|
/**
|
1832
1836
|
* The unit that the storage is measured in, in GB.
|
1833
1837
|
*/
|
@@ -2516,7 +2520,11 @@ declare namespace ElastiCache {
|
|
2516
2520
|
/**
|
2517
2521
|
* The configuration for the maximum number of ECPUs the cache can consume per second.
|
2518
2522
|
*/
|
2519
|
-
Maximum
|
2523
|
+
Maximum?: IntegerOptional;
|
2524
|
+
/**
|
2525
|
+
* The configuration for the minimum number of ECPUs the cache should be able consume per second.
|
2526
|
+
*/
|
2527
|
+
Minimum?: IntegerOptional;
|
2520
2528
|
}
|
2521
2529
|
export interface Endpoint {
|
2522
2530
|
/**
|