cdk-comprehend-s3olap 2.0.142 → 2.0.143

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,6 +11,14 @@ declare class Appflow extends Service {
11
11
  */
12
12
  constructor(options?: Appflow.Types.ClientConfiguration)
13
13
  config: Config & Appflow.Types.ClientConfiguration;
14
+ /**
15
+ * Cancels active runs for a flow. You can cancel all of the active runs for a flow, or you can cancel specific runs by providing their IDs. You can cancel a flow run only when the run is in progress. You can't cancel a run that has already completed or failed. You also can't cancel a run that's scheduled to occur but hasn't started yet. To prevent a scheduled run, you can deactivate the flow with the StopFlow action. You cannot resume a run after you cancel it. When you send your request, the status for each run becomes CancelStarted. When the cancellation completes, the status becomes Canceled. When you cancel a run, you still incur charges for any data that the run already processed before the cancellation. If the run had already written some data to the flow destination, then that data remains in the destination. If you configured the flow to use a batch API (such as the Salesforce Bulk API 2.0), then the run will finish reading or writing its entire batch of data after the cancellation. For these operations, the data processing charges for Amazon AppFlow apply. For the pricing information, see Amazon AppFlow pricing.
16
+ */
17
+ cancelFlowExecutions(params: Appflow.Types.CancelFlowExecutionsRequest, callback?: (err: AWSError, data: Appflow.Types.CancelFlowExecutionsResponse) => void): Request<Appflow.Types.CancelFlowExecutionsResponse, AWSError>;
18
+ /**
19
+ * Cancels active runs for a flow. You can cancel all of the active runs for a flow, or you can cancel specific runs by providing their IDs. You can cancel a flow run only when the run is in progress. You can't cancel a run that has already completed or failed. You also can't cancel a run that's scheduled to occur but hasn't started yet. To prevent a scheduled run, you can deactivate the flow with the StopFlow action. You cannot resume a run after you cancel it. When you send your request, the status for each run becomes CancelStarted. When the cancellation completes, the status becomes Canceled. When you cancel a run, you still incur charges for any data that the run already processed before the cancellation. If the run had already written some data to the flow destination, then that data remains in the destination. If you configured the flow to use a batch API (such as the Salesforce Bulk API 2.0), then the run will finish reading or writing its entire batch of data after the cancellation. For these operations, the data processing charges for Amazon AppFlow apply. For the pricing information, see Amazon AppFlow pricing.
20
+ */
21
+ cancelFlowExecutions(callback?: (err: AWSError, data: Appflow.Types.CancelFlowExecutionsResponse) => void): Request<Appflow.Types.CancelFlowExecutionsResponse, AWSError>;
14
22
  /**
15
23
  * Creates a new connector profile associated with your Amazon Web Services account. There is a soft quota of 100 connector profiles per Amazon Web Services account. If you need more connector profiles than this quota allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support channel. In each connector profile that you create, you can provide the credentials and properties for only one connector.
16
24
  */
@@ -322,6 +330,22 @@ declare namespace Appflow {
322
330
  export type BucketName = string;
323
331
  export type BucketPrefix = string;
324
332
  export type BusinessUnitId = string;
333
+ export interface CancelFlowExecutionsRequest {
334
+ /**
335
+ * The name of a flow with active runs that you want to cancel.
336
+ */
337
+ flowName: FlowName;
338
+ /**
339
+ * The ID of each active run to cancel. These runs must belong to the flow you specify in your request. If you omit this parameter, your request ends all active runs that belong to the flow.
340
+ */
341
+ executionIds?: ExecutionIds;
342
+ }
343
+ export interface CancelFlowExecutionsResponse {
344
+ /**
345
+ * The IDs of runs that Amazon AppFlow couldn't cancel. These runs might be ineligible for canceling because they haven't started yet or have already completed.
346
+ */
347
+ invalidExecutions?: ExecutionIds;
348
+ }
325
349
  export type CatalogType = "GLUE"|string;
326
350
  export type ClientCredentialsArn = string;
327
351
  export type ClientId = string;
@@ -1597,6 +1621,7 @@ declare namespace Appflow {
1597
1621
  mostRecentExecutionStatus?: ExecutionStatus;
1598
1622
  }
1599
1623
  export type ExecutionId = string;
1624
+ export type ExecutionIds = ExecutionId[];
1600
1625
  export type ExecutionMessage = string;
1601
1626
  export interface ExecutionRecord {
1602
1627
  /**
@@ -1650,7 +1675,7 @@ declare namespace Appflow {
1650
1675
  */
1651
1676
  recordsProcessed?: Long;
1652
1677
  }
1653
- export type ExecutionStatus = "InProgress"|"Successful"|"Error"|string;
1678
+ export type ExecutionStatus = "InProgress"|"Successful"|"Error"|"CancelStarted"|"Canceled"|string;
1654
1679
  export type FieldType = string;
1655
1680
  export interface FieldTypeDetails {
1656
1681
  /**
@@ -4222,7 +4222,7 @@ declare namespace Connect {
4222
4222
  Name: EventBridgeActionName;
4223
4223
  }
4224
4224
  export type EventBridgeActionName = string;
4225
- export type EventSourceName = "OnPostCallAnalysisAvailable"|"OnRealTimeCallAnalysisAvailable"|"OnPostChatAnalysisAvailable"|"OnZendeskTicketCreate"|"OnZendeskTicketStatusUpdate"|"OnSalesforceCaseCreate"|string;
4225
+ export type EventSourceName = "OnPostCallAnalysisAvailable"|"OnRealTimeCallAnalysisAvailable"|"OnPostChatAnalysisAvailable"|"OnZendeskTicketCreate"|"OnZendeskTicketStatusUpdate"|"OnSalesforceCaseCreate"|"OnContactEvaluationSubmit"|string;
4226
4226
  export interface FilterV2 {
4227
4227
  /**
4228
4228
  * The key to use for filtering data. For example, QUEUE, ROUTING_PROFILE, AGENT, CHANNEL, AGENT_HIERARCHY_LEVEL_ONE, AGENT_HIERARCHY_LEVEL_TWO, AGENT_HIERARCHY_LEVEL_THREE, AGENT_HIERARCHY_LEVEL_FOUR, AGENT_HIERARCHY_LEVEL_FIVE. There must be at least 1 key and a maximum 5 keys.
@@ -2782,7 +2782,7 @@ declare namespace ECS {
2782
2782
  */
2783
2783
  name: SettingName;
2784
2784
  /**
2785
- * The account setting value for the specified principal ARN. Accepted values are enabled and disabled.
2785
+ * The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.
2786
2786
  */
2787
2787
  value: String;
2788
2788
  }
@@ -2798,7 +2798,7 @@ declare namespace ECS {
2798
2798
  */
2799
2799
  name: SettingName;
2800
2800
  /**
2801
- * The account setting value for the specified principal ARN. Accepted values are enabled and disabled.
2801
+ * The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.
2802
2802
  */
2803
2803
  value: String;
2804
2804
  /**
@@ -516,11 +516,11 @@ declare class Kendra extends Service {
516
516
  */
517
517
  updateQuerySuggestionsBlockList(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
518
518
  /**
519
- * Updates the settings of query suggestions for an index. Amazon Kendra supports partial updates, so you only need to provide the fields you want to update. If an update is currently processing (i.e. 'happening'), you need to wait for the update to finish before making another update. Updates to query suggestions settings might not take effect right away. The time for your updated settings to take effect depends on the updates made and the number of search queries in your index. You can still enable/disable query suggestions at any time. UpdateQuerySuggestionsConfig is currently not supported in the Amazon Web Services GovCloud (US-West) region.
519
+ * Updates the settings of query suggestions for an index. Amazon Kendra supports partial updates, so you only need to provide the fields you want to update. If an update is currently processing, you need to wait for the update to finish before making another update. Updates to query suggestions settings might not take effect right away. The time for your updated settings to take effect depends on the updates made and the number of search queries in your index. You can still enable/disable query suggestions at any time. UpdateQuerySuggestionsConfig is currently not supported in the Amazon Web Services GovCloud (US-West) region.
520
520
  */
521
521
  updateQuerySuggestionsConfig(params: Kendra.Types.UpdateQuerySuggestionsConfigRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
522
522
  /**
523
- * Updates the settings of query suggestions for an index. Amazon Kendra supports partial updates, so you only need to provide the fields you want to update. If an update is currently processing (i.e. 'happening'), you need to wait for the update to finish before making another update. Updates to query suggestions settings might not take effect right away. The time for your updated settings to take effect depends on the updates made and the number of search queries in your index. You can still enable/disable query suggestions at any time. UpdateQuerySuggestionsConfig is currently not supported in the Amazon Web Services GovCloud (US-West) region.
523
+ * Updates the settings of query suggestions for an index. Amazon Kendra supports partial updates, so you only need to provide the fields you want to update. If an update is currently processing, you need to wait for the update to finish before making another update. Updates to query suggestions settings might not take effect right away. The time for your updated settings to take effect depends on the updates made and the number of search queries in your index. You can still enable/disable query suggestions at any time. UpdateQuerySuggestionsConfig is currently not supported in the Amazon Web Services GovCloud (US-West) region.
524
524
  */
525
525
  updateQuerySuggestionsConfig(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
526
526
  /**
@@ -717,6 +717,45 @@ declare namespace Kendra {
717
717
  LessThanOrEquals?: DocumentAttribute;
718
718
  }
719
719
  export type AttributeFilterList = AttributeFilter[];
720
+ export interface AttributeSuggestionsDescribeConfig {
721
+ /**
722
+ * The list of fields/attributes that you want to set as suggestible for query suggestions.
723
+ */
724
+ SuggestableConfigList?: SuggestableConfigList;
725
+ /**
726
+ * The mode is set to either ACTIVE or INACTIVE. If the Mode for query history is set to ENABLED when calling UpdateQuerySuggestionsConfig and AttributeSuggestionsMode to use fields/attributes is set to ACTIVE, and you haven't set your SuggestionTypes preference to DOCUMENT_ATTRIBUTES, then Amazon Kendra uses the query history.
727
+ */
728
+ AttributeSuggestionsMode?: AttributeSuggestionsMode;
729
+ }
730
+ export interface AttributeSuggestionsGetConfig {
731
+ /**
732
+ * The list of document field/attribute keys or field names to use for query suggestions. If the content within any of the fields match what your user starts typing as their query, then the field content is returned as a query suggestion.
733
+ */
734
+ SuggestionAttributes?: DocumentAttributeKeyList;
735
+ /**
736
+ * The list of additional document field/attribute keys or field names to include in the response. You can use additional fields to provide extra information in the response. Additional fields are not used to based suggestions on.
737
+ */
738
+ AdditionalResponseAttributes?: DocumentAttributeKeyList;
739
+ /**
740
+ * Filters the search results based on document fields/attributes.
741
+ */
742
+ AttributeFilter?: AttributeFilter;
743
+ /**
744
+ * Applies user context filtering so that only users who are given access to certain documents see these document in their search results.
745
+ */
746
+ UserContext?: UserContext;
747
+ }
748
+ export type AttributeSuggestionsMode = "ACTIVE"|"INACTIVE"|string;
749
+ export interface AttributeSuggestionsUpdateConfig {
750
+ /**
751
+ * The list of fields/attributes that you want to set as suggestible for query suggestions.
752
+ */
753
+ SuggestableConfigList?: SuggestableConfigList;
754
+ /**
755
+ * You can set the mode to ACTIVE or INACTIVE. You must also set SuggestionTypes as either QUERY or DOCUMENT_ATTRIBUTES and then call GetQuerySuggestions. If Mode to use query history is set to ENABLED when calling UpdateQuerySuggestionsConfig and AttributeSuggestionsMode to use fields/attributes is set to ACTIVE, and you haven't set your SuggestionTypes preference to DOCUMENT_ATTRIBUTES, then Amazon Kendra uses the query history.
756
+ */
757
+ AttributeSuggestionsMode?: AttributeSuggestionsMode;
758
+ }
720
759
  export interface AuthenticationConfiguration {
721
760
  /**
722
761
  * The list of configuration information that's required to connect to and crawl a website host using basic authentication credentials. The list includes the name and port number of the website host.
@@ -2338,7 +2377,7 @@ declare namespace Kendra {
2338
2377
  */
2339
2378
  MinimumQueryCount?: MinimumQueryCount;
2340
2379
  /**
2341
- * The Unix timestamp when query suggestions for an index was last updated.
2380
+ * The Unix timestamp when query suggestions for an index was last updated. Amazon Kendra automatically updates suggestions every 24 hours, after you change a setting or after you apply a block list.
2342
2381
  */
2343
2382
  LastSuggestionsBuildTime?: Timestamp;
2344
2383
  /**
@@ -2346,9 +2385,13 @@ declare namespace Kendra {
2346
2385
  */
2347
2386
  LastClearTime?: Timestamp;
2348
2387
  /**
2349
- * The current total count of query suggestions for an index. This count can change when you update your query suggestions settings, if you filter out certain queries from suggestions using a block list, and as the query log accumulates more queries for Amazon Kendra to learn from.
2388
+ * The current total count of query suggestions for an index. This count can change when you update your query suggestions settings, if you filter out certain queries from suggestions using a block list, and as the query log accumulates more queries for Amazon Kendra to learn from. If the count is much lower than you expected, it could be because Amazon Kendra needs more queries in the query history to learn from or your current query suggestions settings are too strict.
2350
2389
  */
2351
2390
  TotalSuggestionsCount?: Integer;
2391
+ /**
2392
+ * Configuration information for the document fields/attributes that you want to base query suggestions on.
2393
+ */
2394
+ AttributeSuggestionsConfig?: AttributeSuggestionsDescribeConfig;
2352
2395
  }
2353
2396
  export interface DescribeThesaurusRequest {
2354
2397
  /**
@@ -2994,6 +3037,14 @@ declare namespace Kendra {
2994
3037
  * The maximum number of query suggestions you want to show to your users.
2995
3038
  */
2996
3039
  MaxSuggestionsCount?: Integer;
3040
+ /**
3041
+ * The suggestions type to base query suggestions on. The suggestion types are query history or document fields/attributes. You can set one type or the other. If you set query history as your suggestions type, Amazon Kendra suggests queries relevant to your users based on popular queries in the query history. If you set document fields/attributes as your suggestions type, Amazon Kendra suggests queries relevant to your users based on the contents of document fields.
3042
+ */
3043
+ SuggestionTypes?: SuggestionTypes;
3044
+ /**
3045
+ * Configuration information for the document fields/attributes that you want to base query suggestions on.
3046
+ */
3047
+ AttributeSuggestionsConfig?: AttributeSuggestionsGetConfig;
2997
3048
  }
2998
3049
  export interface GetQuerySuggestionsResponse {
2999
3050
  /**
@@ -4698,6 +4749,21 @@ declare namespace Kendra {
4698
4749
  */
4699
4750
  SortOrder: SortOrder;
4700
4751
  }
4752
+ export interface SourceDocument {
4753
+ /**
4754
+ * The identifier of the document used for a query suggestion.
4755
+ */
4756
+ DocumentId?: String;
4757
+ /**
4758
+ * The document fields/attributes used for a query suggestion.
4759
+ */
4760
+ SuggestionAttributes?: DocumentAttributeKeyList;
4761
+ /**
4762
+ * The additional fields/attributes to include in the response. You can use additional fields to provide extra information in the response. Additional fields are not used to based suggestions on.
4763
+ */
4764
+ AdditionalAttributes?: DocumentAttributeList;
4765
+ }
4766
+ export type SourceDocuments = SourceDocument[];
4701
4767
  export interface SpellCorrectedQuery {
4702
4768
  /**
4703
4769
  * The query with the suggested spell corrections.
@@ -4788,6 +4854,17 @@ declare namespace Kendra {
4788
4854
  }
4789
4855
  export type SubnetId = string;
4790
4856
  export type SubnetIdList = SubnetId[];
4857
+ export interface SuggestableConfig {
4858
+ /**
4859
+ * The name of the document field/attribute.
4860
+ */
4861
+ AttributeName?: DocumentAttributeKey;
4862
+ /**
4863
+ * TRUE means the document field/attribute is suggestible, so the contents within the field can be used for query suggestions.
4864
+ */
4865
+ Suggestable?: ObjectBoolean;
4866
+ }
4867
+ export type SuggestableConfigList = SuggestableConfig[];
4791
4868
  export type SuggestedQueryText = string;
4792
4869
  export interface Suggestion {
4793
4870
  /**
@@ -4798,6 +4875,10 @@ declare namespace Kendra {
4798
4875
  * The value for the UUID (universally unique identifier) of a single query suggestion. The value is the text string of a suggestion.
4799
4876
  */
4800
4877
  Value?: SuggestionValue;
4878
+ /**
4879
+ * The list of document IDs and their fields/attributes that are used for a single query suggestion, if document fields set to use for query suggestions.
4880
+ */
4881
+ SourceDocuments?: SourceDocuments;
4801
4882
  }
4802
4883
  export interface SuggestionHighlight {
4803
4884
  /**
@@ -4822,6 +4903,8 @@ declare namespace Kendra {
4822
4903
  */
4823
4904
  Highlights?: SuggestionHighlightList;
4824
4905
  }
4906
+ export type SuggestionType = "QUERY"|"DOCUMENT_ATTRIBUTES"|string;
4907
+ export type SuggestionTypes = SuggestionType[];
4825
4908
  export interface SuggestionValue {
4826
4909
  /**
4827
4910
  * The SuggestionTextWithHighlights structure that contains the query suggestion text and highlights.
@@ -5076,7 +5159,7 @@ declare namespace Kendra {
5076
5159
  */
5077
5160
  IndexId: IndexId;
5078
5161
  /**
5079
- * The identifier of the index used for featuring results.
5162
+ * The identifier of the set of featured results that you want to update.
5080
5163
  */
5081
5164
  FeaturedResultsSetId: FeaturedResultsSetId;
5082
5165
  /**
@@ -5195,6 +5278,10 @@ declare namespace Kendra {
5195
5278
  * The the minimum number of times a query must be searched in order to be eligible to suggest to your users. Decreasing this number increases the number of suggestions. However, this affects the quality of suggestions as it sets a low bar for a query to be considered popular to suggest to users. How you tune this setting depends on your specific needs.
5196
5279
  */
5197
5280
  MinimumQueryCount?: MinimumQueryCount;
5281
+ /**
5282
+ * Configuration information for the document fields/attributes that you want to base query suggestions on.
5283
+ */
5284
+ AttributeSuggestionsConfig?: AttributeSuggestionsUpdateConfig;
5198
5285
  }
5199
5286
  export interface UpdateThesaurusRequest {
5200
5287
  /**
@@ -20,11 +20,11 @@ declare class Resiliencehub extends Service {
20
20
  */
21
21
  addDraftAppVersionResourceMappings(callback?: (err: AWSError, data: Resiliencehub.Types.AddDraftAppVersionResourceMappingsResponse) => void): Request<Resiliencehub.Types.AddDraftAppVersionResourceMappingsResponse, AWSError>;
22
22
  /**
23
- * Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application, you provide an application name, resources from one or more–up to 20–CloudFormation stacks, and an appropriate resiliency policy. After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).
23
+ * Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe an Resilience Hub application, you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate resiliency policy. For more information about the number of resources supported per application, see Service Quotas. After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).
24
24
  */
25
25
  createApp(params: Resiliencehub.Types.CreateAppRequest, callback?: (err: AWSError, data: Resiliencehub.Types.CreateAppResponse) => void): Request<Resiliencehub.Types.CreateAppResponse, AWSError>;
26
26
  /**
27
- * Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application, you provide an application name, resources from one or more–up to 20–CloudFormation stacks, and an appropriate resiliency policy. After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).
27
+ * Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe an Resilience Hub application, you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate resiliency policy. For more information about the number of resources supported per application, see Service Quotas. After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).
28
28
  */
29
29
  createApp(callback?: (err: AWSError, data: Resiliencehub.Types.CreateAppResponse) => void): Request<Resiliencehub.Types.CreateAppResponse, AWSError>;
30
30
  /**
@@ -971,7 +971,7 @@ declare namespace Resiliencehub {
971
971
  /**
972
972
  * The name of the resource.
973
973
  */
974
- resourceName: EntityName;
974
+ resourceName?: EntityName;
975
975
  /**
976
976
  * The type of resource.
977
977
  */
@@ -2125,6 +2125,10 @@ declare namespace Resiliencehub {
2125
2125
  * The logical identifier of the resource.
2126
2126
  */
2127
2127
  logicalResourceId: LogicalResourceId;
2128
+ /**
2129
+ * The name of the parent resource.
2130
+ */
2131
+ parentResourceName?: EntityName;
2128
2132
  /**
2129
2133
  * The physical identifier of the resource.
2130
2134
  */
@@ -2137,6 +2141,10 @@ declare namespace Resiliencehub {
2137
2141
  * The type of resource.
2138
2142
  */
2139
2143
  resourceType: String255;
2144
+ /**
2145
+ * The type of input source.
2146
+ */
2147
+ sourceType?: ResourceSourceType;
2140
2148
  }
2141
2149
  export interface PhysicalResourceId {
2142
2150
  /**
@@ -2482,6 +2490,7 @@ declare namespace Resiliencehub {
2482
2490
  export type ResourceMappingList = ResourceMapping[];
2483
2491
  export type ResourceMappingType = "CfnStack"|"Resource"|"AppRegistryApp"|"ResourceGroup"|"Terraform"|"EKS"|string;
2484
2492
  export type ResourceResolutionStatusType = "Pending"|"InProgress"|"Failed"|"Success"|string;
2493
+ export type ResourceSourceType = "AppTemplate"|"Discovered"|string;
2485
2494
  export interface S3Location {
2486
2495
  /**
2487
2496
  * The name of the Amazon S3 bucket.
@@ -3158,6 +3158,10 @@ declare namespace SageMaker {
3158
3158
  * The channel type (optional) is an enum string. The default value is training. Channels for training and validation must share the same ContentType and TargetAttributeName. For information on specifying training and validation channel types, see How to specify training and validation datasets.
3159
3159
  */
3160
3160
  ChannelType?: AutoMLChannelType;
3161
+ /**
3162
+ * If specified, this column name indicates which column of the dataset should be treated as sample weights for use by the objective metric during the training, evaluation, and the selection of the best model. This column is not considered as a predictive feature. For more information on Autopilot metrics, see Metrics and validation. Sample weights should be numeric, non-negative, with larger values indicating which rows are more important than others. Data points that have invalid or no weight value are excluded. Support for sample weights is available in Ensembling mode only.
3163
+ */
3164
+ SampleWeightAttributeName?: SampleWeightAttributeName;
3161
3165
  }
3162
3166
  export type AutoMLChannelType = "training"|"validation"|string;
3163
3167
  export interface AutoMLContainerDefinition {
@@ -3259,7 +3263,7 @@ declare namespace SageMaker {
3259
3263
  export type AutoMLJobName = string;
3260
3264
  export interface AutoMLJobObjective {
3261
3265
  /**
3262
- * The name of the objective metric used to measure the predictive quality of a machine learning system. This metric is optimized during training to provide the best estimate for model parameter values from data. Here are the options: Accuracy The ratio of the number of correctly classified items to the total number of (correctly and incorrectly) classified items. It is used for both binary and multiclass classification. Accuracy measures how close the predicted class values are to the actual values. Values for accuracy metrics vary between zero (0) and one (1). A value of 1 indicates perfect accuracy, and 0 indicates perfect inaccuracy. AUC The area under the curve (AUC) metric is used to compare and evaluate binary classification by algorithms that return probabilities, such as logistic regression. To map the probabilities into classifications, these are compared against a threshold value. The relevant curve is the receiver operating characteristic curve (ROC curve). The ROC curve plots the true positive rate (TPR) of predictions (or recall) against the false positive rate (FPR) as a function of the threshold value, above which a prediction is considered positive. Increasing the threshold results in fewer false positives, but more false negatives. AUC is the area under this ROC curve. Therefore, AUC provides an aggregated measure of the model performance across all possible classification thresholds. AUC scores vary between 0 and 1. A score of 1 indicates perfect accuracy, and a score of one half (0.5) indicates that the prediction is not better than a random classifier. BalancedAccuracy BalancedAccuracy is a metric that measures the ratio of accurate predictions to all predictions. This ratio is calculated after normalizing true positives (TP) and true negatives (TN) by the total number of positive (P) and negative (N) values. It is used in both binary and multiclass classification and is defined as follows: 0.5*((TP/P)+(TN/N)), with values ranging from 0 to 1. BalancedAccuracy gives a better measure of accuracy when the number of positives or negatives differ greatly from each other in an imbalanced dataset. For example, when only 1% of email is spam. F1 The F1 score is the harmonic mean of the precision and recall, defined as follows: F1 = 2 * (precision * recall) / (precision + recall). It is used for binary classification into classes traditionally referred to as positive and negative. Predictions are said to be true when they match their actual (correct) class, and false when they do not. Precision is the ratio of the true positive predictions to all positive predictions, and it includes the false positives in a dataset. Precision measures the quality of the prediction when it predicts the positive class. Recall (or sensitivity) is the ratio of the true positive predictions to all actual positive instances. Recall measures how completely a model predicts the actual class members in a dataset. F1 scores vary between 0 and 1. A score of 1 indicates the best possible performance, and 0 indicates the worst. F1macro The F1macro score applies F1 scoring to multiclass classification problems. It does this by calculating the precision and recall, and then taking their harmonic mean to calculate the F1 score for each class. Lastly, the F1macro averages the individual scores to obtain the F1macro score. F1macro scores vary between 0 and 1. A score of 1 indicates the best possible performance, and 0 indicates the worst. MAE The mean absolute error (MAE) is a measure of how different the predicted and actual values are, when they're averaged over all values. MAE is commonly used in regression analysis to understand model prediction error. If there is linear regression, MAE represents the average distance from a predicted line to the actual value. MAE is defined as the sum of absolute errors divided by the number of observations. Values range from 0 to infinity, with smaller numbers indicating a better model fit to the data. MSE The mean squared error (MSE) is the average of the squared differences between the predicted and actual values. It is used for regression. MSE values are always positive. The better a model is at predicting the actual values, the smaller the MSE value is Precision Precision measures how well an algorithm predicts the true positives (TP) out of all of the positives that it identifies. It is defined as follows: Precision = TP/(TP+FP), with values ranging from zero (0) to one (1), and is used in binary classification. Precision is an important metric when the cost of a false positive is high. For example, the cost of a false positive is very high if an airplane safety system is falsely deemed safe to fly. A false positive (FP) reflects a positive prediction that is actually negative in the data. PrecisionMacro The precision macro computes precision for multiclass classification problems. It does this by calculating precision for each class and averaging scores to obtain precision for several classes. PrecisionMacro scores range from zero (0) to one (1). Higher scores reflect the model's ability to predict true positives (TP) out of all of the positives that it identifies, averaged across multiple classes. R2 R2, also known as the coefficient of determination, is used in regression to quantify how much a model can explain the variance of a dependent variable. Values range from one (1) to negative one (-1). Higher numbers indicate a higher fraction of explained variability. R2 values close to zero (0) indicate that very little of the dependent variable can be explained by the model. Negative values indicate a poor fit and that the model is outperformed by a constant function. For linear regression, this is a horizontal line. Recall Recall measures how well an algorithm correctly predicts all of the true positives (TP) in a dataset. A true positive is a positive prediction that is also an actual positive value in the data. Recall is defined as follows: Recall = TP/(TP+FN), with values ranging from 0 to 1. Higher scores reflect a better ability of the model to predict true positives (TP) in the data, and is used in binary classification. Recall is important when testing for cancer because it's used to find all of the true positives. A false positive (FP) reflects a positive prediction that is actually negative in the data. It is often insufficient to measure only recall, because predicting every output as a true positive yield a perfect recall score. RecallMacro The RecallMacro computes recall for multiclass classification problems by calculating recall for each class and averaging scores to obtain recall for several classes. RecallMacro scores range from 0 to 1. Higher scores reflect the model's ability to predict true positives (TP) in a dataset. Whereas, a true positive reflects a positive prediction that is also an actual positive value in the data. It is often insufficient to measure only recall, because predicting every output as a true positive yields a perfect recall score. RMSE Root mean squared error (RMSE) measures the square root of the squared difference between predicted and actual values, and it's averaged over all values. It is used in regression analysis to understand model prediction error. It's an important metric to indicate the presence of large model errors and outliers. Values range from zero (0) to infinity, with smaller numbers indicating a better model fit to the data. RMSE is dependent on scale, and should not be used to compare datasets of different sizes. If you do not specify a metric explicitly, the default behavior is to automatically use: MSE: for regression. F1: for binary classification Accuracy: for multiclass classification.
3266
+ * The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset. For the list of all available metrics supported by Autopilot, see Autopilot metrics. If you do not specify a metric explicitly, the default behavior is to automatically use: MSE: for regression. F1: for binary classification Accuracy: for multiclass classification.
3263
3267
  */
3264
3268
  MetricName: AutoMLMetricEnum;
3265
3269
  }
@@ -19438,6 +19442,7 @@ declare namespace SageMaker {
19438
19442
  export type SageMakerImageVersionAlias = string;
19439
19443
  export type SageMakerImageVersionAliases = SageMakerImageVersionAlias[];
19440
19444
  export type SagemakerServicecatalogStatus = "Enabled"|"Disabled"|string;
19445
+ export type SampleWeightAttributeName = string;
19441
19446
  export type SamplingPercentage = number;
19442
19447
  export interface ScheduleConfig {
19443
19448
  /**
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1369.0',
86
+ VERSION: '2.1370.0',
87
87
 
88
88
  /**
89
89
  * @api private