@cdklabs/cdk-ecs-codedeploy 0.0.12 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/.jsii +3 -3
  2. package/lib/ecs-appspec/index.js +1 -1
  3. package/lib/ecs-deployment/index.js +1 -1
  4. package/node_modules/aws-sdk/CHANGELOG.md +12 -1
  5. package/node_modules/aws-sdk/README.md +1 -1
  6. package/node_modules/aws-sdk/apis/batch-2016-08-10.min.json +6 -0
  7. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +90 -83
  8. package/node_modules/aws-sdk/apis/ec2-2016-11-15.paginators.json +3 -0
  9. package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.min.json +2 -1
  10. package/node_modules/aws-sdk/apis/medialive-2017-10-14.min.json +247 -201
  11. package/node_modules/aws-sdk/apis/nimble-2020-08-01.min.json +283 -140
  12. package/node_modules/aws-sdk/apis/nimble-2020-08-01.paginators.json +5 -0
  13. package/node_modules/aws-sdk/apis/route53domains-2014-05-15.min.json +159 -91
  14. package/node_modules/aws-sdk/clients/batch.d.ts +13 -5
  15. package/node_modules/aws-sdk/clients/ec2.d.ts +52 -38
  16. package/node_modules/aws-sdk/clients/lookoutequipment.d.ts +4 -0
  17. package/node_modules/aws-sdk/clients/medialive.d.ts +47 -4
  18. package/node_modules/aws-sdk/clients/nimble.d.ts +319 -123
  19. package/node_modules/aws-sdk/clients/resourceexplorer2.d.ts +6 -6
  20. package/node_modules/aws-sdk/clients/route53domains.d.ts +227 -37
  21. package/node_modules/aws-sdk/clients/sagemaker.d.ts +9 -9
  22. package/node_modules/aws-sdk/clients/transcribeservice.d.ts +1 -1
  23. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  24. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +9 -9
  25. package/node_modules/aws-sdk/dist/aws-sdk.js +255 -177
  26. package/node_modules/aws-sdk/dist/aws-sdk.min.js +69 -69
  27. package/node_modules/aws-sdk/lib/core.js +1 -1
  28. package/node_modules/aws-sdk/package.json +1 -1
  29. package/package.json +4 -4
@@ -181,11 +181,11 @@ declare class SageMaker extends Service {
181
181
  */
182
182
  createEndpointConfig(callback?: (err: AWSError, data: SageMaker.Types.CreateEndpointConfigOutput) => void): Request<SageMaker.Types.CreateEndpointConfigOutput, AWSError>;
183
183
  /**
184
- * Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model. The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant. When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK. You can add tags to experiments, trials, trial components and then use the Search API to search for the tags. To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API. To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.
184
+ * Creates a SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model. In the Studio UI, trials are referred to as run groups and trial components are referred to as runs. The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant. When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK. You can add tags to experiments, trials, trial components and then use the Search API to search for the tags. To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API. To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.
185
185
  */
186
186
  createExperiment(params: SageMaker.Types.CreateExperimentRequest, callback?: (err: AWSError, data: SageMaker.Types.CreateExperimentResponse) => void): Request<SageMaker.Types.CreateExperimentResponse, AWSError>;
187
187
  /**
188
- * Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model. The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant. When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK. You can add tags to experiments, trials, trial components and then use the Search API to search for the tags. To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API. To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.
188
+ * Creates a SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model. In the Studio UI, trials are referred to as run groups and trial components are referred to as runs. The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant. When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK. You can add tags to experiments, trials, trial components and then use the Search API to search for the tags. To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API. To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.
189
189
  */
190
190
  createExperiment(callback?: (err: AWSError, data: SageMaker.Types.CreateExperimentResponse) => void): Request<SageMaker.Types.CreateExperimentResponse, AWSError>;
191
191
  /**
@@ -3192,7 +3192,7 @@ declare namespace SageMaker {
3192
3192
  export type AutoMLJobName = string;
3193
3193
  export interface AutoMLJobObjective {
3194
3194
  /**
3195
- * The name of the objective metric used to measure the predictive quality of a machine learning system. This metric is optimized during training to provide the best estimate for model parameter values from data. Here are the options: MSE: The mean squared error (MSE) is the average of the squared differences between the predicted and actual values. It is used for regression. MSE values are always positive: the better a model is at predicting the actual values, the smaller the MSE value is. When the data contains outliers, they tend to dominate the MSE, which might cause subpar prediction performance. Accuracy: The ratio of the number of correctly classified items to the total number of (correctly and incorrectly) classified items. It is used for binary and multiclass classification. It measures how close the predicted class values are to the actual values. Accuracy values vary between zero and one: one indicates perfect accuracy and zero indicates perfect inaccuracy. F1: The F1 score is the harmonic mean of the precision and recall. It is used for binary classification into classes traditionally referred to as positive and negative. Predictions are said to be true when they match their actual (correct) class and false when they do not. Precision is the ratio of the true positive predictions to all positive predictions (including the false positives) in a data set and measures the quality of the prediction when it predicts the positive class. Recall (or sensitivity) is the ratio of the true positive predictions to all actual positive instances and measures how completely a model predicts the actual class members in a data set. The standard F1 score weighs precision and recall equally. But which metric is paramount typically depends on specific aspects of a problem. F1 scores vary between zero and one: one indicates the best possible performance and zero the worst. AUC: The area under the curve (AUC) metric is used to compare and evaluate binary classification by algorithms such as logistic regression that return probabilities. A threshold is needed to map the probabilities into classifications. The relevant curve is the receiver operating characteristic curve that plots the true positive rate (TPR) of predictions (or recall) against the false positive rate (FPR) as a function of the threshold value, above which a prediction is considered positive. Increasing the threshold results in fewer false positives but more false negatives. AUC is the area under this receiver operating characteristic curve and so provides an aggregated measure of the model performance across all possible classification thresholds. The AUC score can also be interpreted as the probability that a randomly selected positive data point is more likely to be predicted positive than a randomly selected negative example. AUC scores vary between zero and one: a score of one indicates perfect accuracy and a score of one half indicates that the prediction is not better than a random classifier. Values under one half predict less accurately than a random predictor. But such consistently bad predictors can simply be inverted to obtain better than random predictors. F1macro: The F1macro score applies F1 scoring to multiclass classification. In this context, you have multiple classes to predict. You just calculate the precision and recall for each class as you did for the positive class in binary classification. Then, use these values to calculate the F1 score for each class and average them to obtain the F1macro score. F1macro scores vary between zero and one: one indicates the best possible performance and zero the worst. If you do not specify a metric explicitly, the default behavior is to automatically use: MSE: for regression. F1: for binary classification Accuracy: for multiclass classification.
3195
+ * The name of the objective metric used to measure the predictive quality of a machine learning system. This metric is optimized during training to provide the best estimate for model parameter values from data. Here are the options: Accuracy The ratio of the number of correctly classified items to the total number of (correctly and incorrectly) classified items. It is used for both binary and multiclass classification. Accuracy measures how close the predicted class values are to the actual values. Values for accuracy metrics vary between zero (0) and one (1). A value of 1 indicates perfect accuracy, and 0 indicates perfect inaccuracy. AUC The area under the curve (AUC) metric is used to compare and evaluate binary classification by algorithms that return probabilities, such as logistic regression. To map the probabilities into classifications, these are compared against a threshold value. The relevant curve is the receiver operating characteristic curve (ROC curve). The ROC curve plots the true positive rate (TPR) of predictions (or recall) against the false positive rate (FPR) as a function of the threshold value, above which a prediction is considered positive. Increasing the threshold results in fewer false positives, but more false negatives. AUC is the area under this ROC curve. Therefore, AUC provides an aggregated measure of the model performance across all possible classification thresholds. AUC scores vary between 0 and 1. A score of 1 indicates perfect accuracy, and a score of one half (0.5) indicates that the prediction is not better than a random classifier. BalancedAccuracy BalancedAccuracy is a metric that measures the ratio of accurate predictions to all predictions. This ratio is calculated after normalizing true positives (TP) and true negatives (TN) by the total number of positive (P) and negative (N) values. It is used in both binary and multiclass classification and is defined as follows: 0.5*((TP/P)+(TN/N)), with values ranging from 0 to 1. BalancedAccuracy gives a better measure of accuracy when the number of positives or negatives differ greatly from each other in an imbalanced dataset. For example, when only 1% of email is spam. F1 The F1 score is the harmonic mean of the precision and recall, defined as follows: F1 = 2 * (precision * recall) / (precision + recall). It is used for binary classification into classes traditionally referred to as positive and negative. Predictions are said to be true when they match their actual (correct) class, and false when they do not. Precision is the ratio of the true positive predictions to all positive predictions, and it includes the false positives in a dataset. Precision measures the quality of the prediction when it predicts the positive class. Recall (or sensitivity) is the ratio of the true positive predictions to all actual positive instances. Recall measures how completely a model predicts the actual class members in a dataset. F1 scores vary between 0 and 1. A score of 1 indicates the best possible performance, and 0 indicates the worst. F1macro The F1macro score applies F1 scoring to multiclass classification problems. It does this by calculating the precision and recall, and then taking their harmonic mean to calculate the F1 score for each class. Lastly, the F1macro averages the individual scores to obtain the F1macro score. F1macro scores vary between 0 and 1. A score of 1 indicates the best possible performance, and 0 indicates the worst. MAE The mean absolute error (MAE) is a measure of how different the predicted and actual values are, when they're averaged over all values. MAE is commonly used in regression analysis to understand model prediction error. If there is linear regression, MAE represents the average distance from a predicted line to the actual value. MAE is defined as the sum of absolute errors divided by the number of observations. Values range from 0 to infinity, with smaller numbers indicating a better model fit to the data. MSE The mean squared error (MSE) is the average of the squared differences between the predicted and actual values. It is used for regression. MSE values are always positive. The better a model is at predicting the actual values, the smaller the MSE value is Precision Precision measures how well an algorithm predicts the true positives (TP) out of all of the positives that it identifies. It is defined as follows: Precision = TP/(TP+FP), with values ranging from zero (0) to one (1), and is used in binary classification. Precision is an important metric when the cost of a false positive is high. For example, the cost of a false positive is very high if an airplane safety system is falsely deemed safe to fly. A false positive (FP) reflects a positive prediction that is actually negative in the data. PrecisionMacro The precision macro computes precision for multiclass classification problems. It does this by calculating precision for each class and averaging scores to obtain precision for several classes. PrecisionMacro scores range from zero (0) to one (1). Higher scores reflect the model's ability to predict true positives (TP) out of all of the positives that it identifies, averaged across multiple classes. R2 R2, also known as the coefficient of determination, is used in regression to quantify how much a model can explain the variance of a dependent variable. Values range from one (1) to negative one (-1). Higher numbers indicate a higher fraction of explained variability. R2 values close to zero (0) indicate that very little of the dependent variable can be explained by the model. Negative values indicate a poor fit and that the model is outperformed by a constant function. For linear regression, this is a horizontal line. Recall Recall measures how well an algorithm correctly predicts all of the true positives (TP) in a dataset. A true positive is a positive prediction that is also an actual positive value in the data. Recall is defined as follows: Recall = TP/(TP+FN), with values ranging from 0 to 1. Higher scores reflect a better ability of the model to predict true positives (TP) in the data, and is used in binary classification. Recall is important when testing for cancer because it's used to find all of the true positives. A false positive (FP) reflects a positive prediction that is actually negative in the data. It is often insufficient to measure only recall, because predicting every output as a true positive will yield a perfect recall score. RecallMacro The RecallMacro computes recall for multiclass classification problems by calculating recall for each class and averaging scores to obtain recall for several classes. RecallMacro scores range from 0 to 1. Higher scores reflect the model's ability to predict true positives (TP) in a dataset. Whereas, a true positive reflects a positive prediction that is also an actual positive value in the data. It is often insufficient to measure only recall, because predicting every output as a true positive will yield a perfect recall score. RMSE Root mean squared error (RMSE) measures the square root of the squared difference between predicted and actual values, and it's averaged over all values. It is used in regression analysis to understand model prediction error. It's an important metric to indicate the presence of large model errors and outliers. Values range from zero (0) to infinity, with smaller numbers indicating a better model fit to the data. RMSE is dependent on scale, and should not be used to compare datasets of different sizes. If you do not specify a metric explicitly, the default behavior is to automatically use: MSE: for regression. F1: for binary classification Accuracy: for multiclass classification.
3196
3196
  */
3197
3197
  MetricName: AutoMLMetricEnum;
3198
3198
  }
@@ -3245,7 +3245,7 @@ declare namespace SageMaker {
3245
3245
  PartialFailureReasons?: AutoMLPartialFailureReasons;
3246
3246
  }
3247
3247
  export type AutoMLMaxResults = number;
3248
- export type AutoMLMetricEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|string;
3248
+ export type AutoMLMetricEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|string;
3249
3249
  export type AutoMLMetricExtendedEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|"LogLoss"|"InferenceLatency"|string;
3250
3250
  export type AutoMLMode = "AUTO"|"ENSEMBLING"|"HYPERPARAMETER_TUNING"|string;
3251
3251
  export type AutoMLNameContains = string;
@@ -7007,7 +7007,7 @@ declare namespace SageMaker {
7007
7007
  */
7008
7008
  PartialFailureReasons?: AutoMLPartialFailureReasons;
7009
7009
  /**
7010
- * Returns the job's best AutoMLCandidate.
7010
+ * The best model candidate selected by SageMaker Autopilot using both the best objective metric and lowest InferenceLatency for an experiment.
7011
7011
  */
7012
7012
  BestCandidate?: AutoMLCandidate;
7013
7013
  /**
@@ -9707,7 +9707,7 @@ declare namespace SageMaker {
9707
9707
  */
9708
9708
  LineageGroupArn?: LineageGroupArn;
9709
9709
  /**
9710
- * A list of the Amazon Resource Name (ARN) and, if applicable, job type for multiple sources of an experiment run.
9710
+ * A list of ARNs and, if applicable, job types for multiple sources of an experiment run.
9711
9711
  */
9712
9712
  Sources?: TrialComponentSources;
9713
9713
  }
@@ -10691,7 +10691,7 @@ declare namespace SageMaker {
10691
10691
  export type ExperimentArn = string;
10692
10692
  export interface ExperimentConfig {
10693
10693
  /**
10694
- * The name of an existing experiment to associate the trial component with.
10694
+ * The name of an existing experiment to associate with the trial component.
10695
10695
  */
10696
10696
  ExperimentName?: ExperimentEntityName;
10697
10697
  /**
@@ -10703,7 +10703,7 @@ declare namespace SageMaker {
10703
10703
  */
10704
10704
  TrialComponentDisplayName?: ExperimentEntityName;
10705
10705
  /**
10706
- * The name of the experiment run to associate the trial component with.
10706
+ * The name of the experiment run to associate with the trial component.
10707
10707
  */
10708
10708
  RunName?: ExperimentEntityName;
10709
10709
  }
@@ -11610,7 +11610,7 @@ declare namespace SageMaker {
11610
11610
  */
11611
11611
  ResourceLimits: ResourceLimits;
11612
11612
  /**
11613
- * The ParameterRanges object that specifies the ranges of hyperparameters that this tuning job searches over to find the optimal configuration for the highest model performance against .your chosen objective metric.
11613
+ * The ParameterRanges object that specifies the ranges of hyperparameters that this tuning job searches over to find the optimal configuration for the highest model performance against your chosen objective metric.
11614
11614
  */
11615
11615
  ParameterRanges?: ParameterRanges;
11616
11616
  /**
@@ -953,7 +953,7 @@ declare namespace TranscribeService {
953
953
  }
954
954
  export type KMSEncryptionContextMap = {[key: string]: NonEmptyString};
955
955
  export type KMSKeyId = string;
956
- export type LanguageCode = "af-ZA"|"ar-AE"|"ar-SA"|"da-DK"|"de-CH"|"de-DE"|"en-AB"|"en-AU"|"en-GB"|"en-IE"|"en-IN"|"en-US"|"en-WL"|"es-ES"|"es-US"|"fa-IR"|"fr-CA"|"fr-FR"|"he-IL"|"hi-IN"|"id-ID"|"it-IT"|"ja-JP"|"ko-KR"|"ms-MY"|"nl-NL"|"pt-BR"|"pt-PT"|"ru-RU"|"ta-IN"|"te-IN"|"tr-TR"|"zh-CN"|"zh-TW"|"th-TH"|"en-ZA"|"en-NZ"|string;
956
+ export type LanguageCode = "af-ZA"|"ar-AE"|"ar-SA"|"da-DK"|"de-CH"|"de-DE"|"en-AB"|"en-AU"|"en-GB"|"en-IE"|"en-IN"|"en-US"|"en-WL"|"es-ES"|"es-US"|"fa-IR"|"fr-CA"|"fr-FR"|"he-IL"|"hi-IN"|"id-ID"|"it-IT"|"ja-JP"|"ko-KR"|"ms-MY"|"nl-NL"|"pt-BR"|"pt-PT"|"ru-RU"|"ta-IN"|"te-IN"|"tr-TR"|"zh-CN"|"zh-TW"|"th-TH"|"en-ZA"|"en-NZ"|"vi-VN"|"sv-SE"|string;
957
957
  export interface LanguageCodeItem {
958
958
  /**
959
959
  * Provides the language code for each language identified in your media.
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1278.0',
86
+ VERSION: '2.1279.0',
87
87
 
88
88
  /**
89
89
  * @api private