aws-sdk-sagemaker 1.157.0 → 1.158.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b082ca52f03d7d82732093787abd9b675047c9a234b33d06420bcdf21719061e
4
- data.tar.gz: c2cf439c12b984fb3a8593b8ead44ea6e8d5359f97d6c155d51a4b6716cf6bce
3
+ metadata.gz: afa3d9aad52d2754e4e54670bff15a1be4f111158df8df74341671fa4f4a5573
4
+ data.tar.gz: 609ae29dcd112366ce30892884bafc2ea3c5d7a8dea344374783928d7865582a
5
5
  SHA512:
6
- metadata.gz: ccc8fa1c3a21c62ad46ccbd3e9908d6955a2382fa656ea2a56a894950c457bb5a3afce895b537dc8f47cdd717211da5d1731a6d5cfd1b9a3eef86c0c9732703f
7
- data.tar.gz: a1a16740daeb725a07b7b695b2e59f2c0a919dca9d943bed10c373fe6864af213387af2b994188869624bc2aca0197768ac87b772261ae03d91bb9527329ddea
6
+ metadata.gz: 036e5c82c6a7a42f3e10141e8c00a2687011434939868f4990b847474b04ec3a4ddd35a3307eb3c44a864ef0bd140836b2bf11223078ccc43aecf02f26c81d75
7
+ data.tar.gz: c557948a687fe872b8c85c3ba5084c2ac42717fc3f811171d1ea3ecd220f7ae5a714807fb6f285372168f7618b9168d6e9c4a2bda7ea19f125c5668ddf62d54e
data/CHANGELOG.md CHANGED
@@ -1,6 +1,11 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.158.0 (2022-12-20)
5
+ ------------------
6
+
7
+ * Feature - Amazon SageMaker Autopilot adds support for new objective metrics in CreateAutoMLJob API.
8
+
4
9
  1.157.0 (2022-12-19)
5
10
  ------------------
6
11
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.157.0
1
+ 1.158.0
@@ -1260,7 +1260,7 @@ module Aws::SageMaker
1260
1260
  # },
1261
1261
  # problem_type: "BinaryClassification", # accepts BinaryClassification, MulticlassClassification, Regression
1262
1262
  # auto_ml_job_objective: {
1263
- # metric_name: "Accuracy", # required, accepts Accuracy, MSE, F1, F1macro, AUC
1263
+ # metric_name: "Accuracy", # required, accepts Accuracy, MSE, F1, F1macro, AUC, RMSE, MAE, R2, BalancedAccuracy, Precision, PrecisionMacro, Recall, RecallMacro
1264
1264
  # },
1265
1265
  # auto_ml_job_config: {
1266
1266
  # completion_criteria: {
@@ -2702,11 +2702,16 @@ module Aws::SageMaker
2702
2702
  req.send_request(options)
2703
2703
  end
2704
2704
 
2705
- # Creates an SageMaker *experiment*. An experiment is a collection of
2705
+ # Creates a SageMaker *experiment*. An experiment is a collection of
2706
2706
  # *trials* that are observed, compared and evaluated as a group. A trial
2707
2707
  # is a set of steps, called *trial components*, that produce a machine
2708
2708
  # learning model.
2709
2709
  #
2710
+ # <note markdown="1"> In the Studio UI, trials are referred to as *run groups* and trial
2711
+ # components are referred to as *runs*.
2712
+ #
2713
+ # </note>
2714
+ #
2710
2715
  # The goal of an experiment is to determine the components that produce
2711
2716
  # the best model. Multiple trials are performed, each one isolating and
2712
2717
  # measuring the impact of a change to one or more inputs, while keeping
@@ -9545,7 +9550,7 @@ module Aws::SageMaker
9545
9550
  # resp.output_data_config.kms_key_id #=> String
9546
9551
  # resp.output_data_config.s3_output_path #=> String
9547
9552
  # resp.role_arn #=> String
9548
- # resp.auto_ml_job_objective.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
9553
+ # resp.auto_ml_job_objective.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro"
9549
9554
  # resp.problem_type #=> String, one of "BinaryClassification", "MulticlassClassification", "Regression"
9550
9555
  # resp.auto_ml_job_config.completion_criteria.max_candidates #=> Integer
9551
9556
  # resp.auto_ml_job_config.completion_criteria.max_runtime_per_training_job_in_seconds #=> Integer
@@ -9567,7 +9572,7 @@ module Aws::SageMaker
9567
9572
  # resp.partial_failure_reasons[0].partial_failure_message #=> String
9568
9573
  # resp.best_candidate.candidate_name #=> String
9569
9574
  # resp.best_candidate.final_auto_ml_job_objective_metric.type #=> String, one of "Maximize", "Minimize"
9570
- # resp.best_candidate.final_auto_ml_job_objective_metric.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
9575
+ # resp.best_candidate.final_auto_ml_job_objective_metric.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro"
9571
9576
  # resp.best_candidate.final_auto_ml_job_objective_metric.value #=> Float
9572
9577
  # resp.best_candidate.objective_status #=> String, one of "Succeeded", "Pending", "Failed"
9573
9578
  # resp.best_candidate.candidate_steps #=> Array
@@ -9587,7 +9592,7 @@ module Aws::SageMaker
9587
9592
  # resp.best_candidate.candidate_properties.candidate_artifact_locations.explainability #=> String
9588
9593
  # resp.best_candidate.candidate_properties.candidate_artifact_locations.model_insights #=> String
9589
9594
  # resp.best_candidate.candidate_properties.candidate_metrics #=> Array
9590
- # resp.best_candidate.candidate_properties.candidate_metrics[0].metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
9595
+ # resp.best_candidate.candidate_properties.candidate_metrics[0].metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro"
9591
9596
  # resp.best_candidate.candidate_properties.candidate_metrics[0].value #=> Float
9592
9597
  # resp.best_candidate.candidate_properties.candidate_metrics[0].set #=> String, one of "Train", "Validation", "Test"
9593
9598
  # resp.best_candidate.candidate_properties.candidate_metrics[0].standard_metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro", "LogLoss", "InferenceLatency"
@@ -9596,7 +9601,7 @@ module Aws::SageMaker
9596
9601
  # resp.generate_candidate_definitions_only #=> Boolean
9597
9602
  # resp.auto_ml_job_artifacts.candidate_definition_notebook_location #=> String
9598
9603
  # resp.auto_ml_job_artifacts.data_exploration_notebook_location #=> String
9599
- # resp.resolved_attributes.auto_ml_job_objective.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
9604
+ # resp.resolved_attributes.auto_ml_job_objective.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro"
9600
9605
  # resp.resolved_attributes.problem_type #=> String, one of "BinaryClassification", "MulticlassClassification", "Regression"
9601
9606
  # resp.resolved_attributes.completion_criteria.max_candidates #=> Integer
9602
9607
  # resp.resolved_attributes.completion_criteria.max_runtime_per_training_job_in_seconds #=> Integer
@@ -14645,7 +14650,7 @@ module Aws::SageMaker
14645
14650
  # resp.candidates #=> Array
14646
14651
  # resp.candidates[0].candidate_name #=> String
14647
14652
  # resp.candidates[0].final_auto_ml_job_objective_metric.type #=> String, one of "Maximize", "Minimize"
14648
- # resp.candidates[0].final_auto_ml_job_objective_metric.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
14653
+ # resp.candidates[0].final_auto_ml_job_objective_metric.metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro"
14649
14654
  # resp.candidates[0].final_auto_ml_job_objective_metric.value #=> Float
14650
14655
  # resp.candidates[0].objective_status #=> String, one of "Succeeded", "Pending", "Failed"
14651
14656
  # resp.candidates[0].candidate_steps #=> Array
@@ -14665,7 +14670,7 @@ module Aws::SageMaker
14665
14670
  # resp.candidates[0].candidate_properties.candidate_artifact_locations.explainability #=> String
14666
14671
  # resp.candidates[0].candidate_properties.candidate_artifact_locations.model_insights #=> String
14667
14672
  # resp.candidates[0].candidate_properties.candidate_metrics #=> Array
14668
- # resp.candidates[0].candidate_properties.candidate_metrics[0].metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
14673
+ # resp.candidates[0].candidate_properties.candidate_metrics[0].metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro"
14669
14674
  # resp.candidates[0].candidate_properties.candidate_metrics[0].value #=> Float
14670
14675
  # resp.candidates[0].candidate_properties.candidate_metrics[0].set #=> String, one of "Train", "Validation", "Test"
14671
14676
  # resp.candidates[0].candidate_properties.candidate_metrics[0].standard_metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro", "LogLoss", "InferenceLatency"
@@ -22700,7 +22705,7 @@ module Aws::SageMaker
22700
22705
  params: params,
22701
22706
  config: config)
22702
22707
  context[:gem_name] = 'aws-sdk-sagemaker'
22703
- context[:gem_version] = '1.157.0'
22708
+ context[:gem_version] = '1.158.0'
22704
22709
  Seahorse::Client::Request.new(handlers, context)
22705
22710
  end
22706
22711
 
@@ -2015,66 +2015,171 @@ module Aws::SageMaker
2015
2015
  #
2016
2016
  # Here are the options:
2017
2017
  #
2018
- # * `MSE`\: The mean squared error (MSE) is the average of the squared
2019
- # differences between the predicted and actual values. It is used
2020
- # for regression. MSE values are always positive: the better a model
2021
- # is at predicting the actual values, the smaller the MSE value is.
2022
- # When the data contains outliers, they tend to dominate the MSE,
2023
- # which might cause subpar prediction performance.
2024
- #
2025
- # * `Accuracy`\: The ratio of the number of correctly classified items
2026
- # to the total number of (correctly and incorrectly) classified
2027
- # items. It is used for binary and multiclass classification. It
2028
- # measures how close the predicted class values are to the actual
2029
- # values. Accuracy values vary between zero and one: one indicates
2030
- # perfect accuracy and zero indicates perfect inaccuracy.
2031
- #
2032
- # * `F1`\: The F1 score is the harmonic mean of the precision and
2033
- # recall. It is used for binary classification into classes
2018
+ # Accuracy
2019
+ #
2020
+ # : The ratio of the number of correctly classified items to the total
2021
+ # number of (correctly and incorrectly) classified items. It is used
2022
+ # for both binary and multiclass classification. Accuracy measures
2023
+ # how close the predicted class values are to the actual values.
2024
+ # Values for accuracy metrics vary between zero (0) and one (1). A
2025
+ # value of 1 indicates perfect accuracy, and 0 indicates perfect
2026
+ # inaccuracy.
2027
+ #
2028
+ # AUC
2029
+ #
2030
+ # : The area under the curve (AUC) metric is used to compare and
2031
+ # evaluate binary classification by algorithms that return
2032
+ # probabilities, such as logistic regression. To map the
2033
+ # probabilities into classifications, these are compared against a
2034
+ # threshold value.
2035
+ #
2036
+ # The relevant curve is the receiver operating characteristic curve
2037
+ # (ROC curve). The ROC curve plots the true positive rate (TPR) of
2038
+ # predictions (or recall) against the false positive rate (FPR) as a
2039
+ # function of the threshold value, above which a prediction is
2040
+ # considered positive. Increasing the threshold results in fewer
2041
+ # false positives, but more false negatives.
2042
+ #
2043
+ # AUC is the area under this ROC curve. Therefore, AUC provides an
2044
+ # aggregated measure of the model performance across all possible
2045
+ # classification thresholds. AUC scores vary between 0 and 1. A
2046
+ # score of 1 indicates perfect accuracy, and a score of one half
2047
+ # (0.5) indicates that the prediction is not better than a random
2048
+ # classifier.
2049
+ #
2050
+ # BalancedAccuracy
2051
+ #
2052
+ # : `BalancedAccuracy` is a metric that measures the ratio of accurate
2053
+ # predictions to all predictions. This ratio is calculated after
2054
+ # normalizing true positives (TP) and true negatives (TN) by the
2055
+ # total number of positive (P) and negative (N) values. It is used
2056
+ # in both binary and multiclass classification and is defined as
2057
+ # follows: 0.5*((TP/P)+(TN/N)), with values ranging from 0 to 1.
2058
+ # `BalancedAccuracy` gives a better measure of accuracy when the
2059
+ # number of positives or negatives differ greatly from each other in
2060
+ # an imbalanced dataset. For example, when only 1% of email is spam.
2061
+ #
2062
+ # F1
2063
+ #
2064
+ # : The `F1` score is the harmonic mean of the precision and recall,
2065
+ # defined as follows: F1 = 2 * (precision * recall) / (precision +
2066
+ # recall). It is used for binary classification into classes
2034
2067
  # traditionally referred to as positive and negative. Predictions
2035
- # are said to be true when they match their actual (correct) class
2036
- # and false when they do not. Precision is the ratio of the true
2037
- # positive predictions to all positive predictions (including the
2038
- # false positives) in a data set and measures the quality of the
2039
- # prediction when it predicts the positive class. Recall (or
2040
- # sensitivity) is the ratio of the true positive predictions to all
2041
- # actual positive instances and measures how completely a model
2042
- # predicts the actual class members in a data set. The standard F1
2043
- # score weighs precision and recall equally. But which metric is
2044
- # paramount typically depends on specific aspects of a problem. F1
2045
- # scores vary between zero and one: one indicates the best possible
2046
- # performance and zero the worst.
2047
- #
2048
- # * `AUC`\: The area under the curve (AUC) metric is used to compare
2049
- # and evaluate binary classification by algorithms such as logistic
2050
- # regression that return probabilities. A threshold is needed to map
2051
- # the probabilities into classifications. The relevant curve is the
2052
- # receiver operating characteristic curve that plots the true
2053
- # positive rate (TPR) of predictions (or recall) against the false
2054
- # positive rate (FPR) as a function of the threshold value, above
2055
- # which a prediction is considered positive. Increasing the
2056
- # threshold results in fewer false positives but more false
2057
- # negatives. AUC is the area under this receiver operating
2058
- # characteristic curve and so provides an aggregated measure of the
2059
- # model performance across all possible classification thresholds.
2060
- # The AUC score can also be interpreted as the probability that a
2061
- # randomly selected positive data point is more likely to be
2062
- # predicted positive than a randomly selected negative example. AUC
2063
- # scores vary between zero and one: a score of one indicates perfect
2064
- # accuracy and a score of one half indicates that the prediction is
2065
- # not better than a random classifier. Values under one half predict
2066
- # less accurately than a random predictor. But such consistently bad
2067
- # predictors can simply be inverted to obtain better than random
2068
- # predictors.
2069
- #
2070
- # * `F1macro`\: The F1macro score applies F1 scoring to multiclass
2071
- # classification. In this context, you have multiple classes to
2072
- # predict. You just calculate the precision and recall for each
2073
- # class as you did for the positive class in binary classification.
2074
- # Then, use these values to calculate the F1 score for each class
2075
- # and average them to obtain the F1macro score. F1macro scores vary
2076
- # between zero and one: one indicates the best possible performance
2077
- # and zero the worst.
2068
+ # are said to be true when they match their actual (correct) class,
2069
+ # and false when they do not.
2070
+ #
2071
+ # Precision is the ratio of the true positive predictions to all
2072
+ # positive predictions, and it includes the false positives in a
2073
+ # dataset. Precision measures the quality of the prediction when it
2074
+ # predicts the positive class.
2075
+ #
2076
+ # Recall (or sensitivity) is the ratio of the true positive
2077
+ # predictions to all actual positive instances. Recall measures how
2078
+ # completely a model predicts the actual class members in a dataset.
2079
+ #
2080
+ # F1 scores vary between 0 and 1. A score of 1 indicates the best
2081
+ # possible performance, and 0 indicates the worst.
2082
+ #
2083
+ # F1macro
2084
+ #
2085
+ # : The `F1macro` score applies F1 scoring to multiclass
2086
+ # classification problems. It does this by calculating the precision
2087
+ # and recall, and then taking their harmonic mean to calculate the
2088
+ # F1 score for each class. Lastly, the F1macro averages the
2089
+ # individual scores to obtain the `F1macro` score. `F1macro` scores
2090
+ # vary between 0 and 1. A score of 1 indicates the best possible
2091
+ # performance, and 0 indicates the worst.
2092
+ #
2093
+ # MAE
2094
+ #
2095
+ # : The mean absolute error (MAE) is a measure of how different the
2096
+ # predicted and actual values are, when they're averaged over all
2097
+ # values. MAE is commonly used in regression analysis to understand
2098
+ # model prediction error. If there is linear regression, MAE
2099
+ # represents the average distance from a predicted line to the
2100
+ # actual value. MAE is defined as the sum of absolute errors divided
2101
+ # by the number of observations. Values range from 0 to infinity,
2102
+ # with smaller numbers indicating a better model fit to the data.
2103
+ #
2104
+ # MSE
2105
+ #
2106
+ # : The mean squared error (MSE) is the average of the squared
2107
+ # differences between the predicted and actual values. It is used
2108
+ # for regression. MSE values are always positive. The better a model
2109
+ # is at predicting the actual values, the smaller the MSE value is
2110
+ #
2111
+ # Precision
2112
+ #
2113
+ # : Precision measures how well an algorithm predicts the true
2114
+ # positives (TP) out of all of the positives that it identifies. It
2115
+ # is defined as follows: Precision = TP/(TP+FP), with values ranging
2116
+ # from zero (0) to one (1), and is used in binary classification.
2117
+ # Precision is an important metric when the cost of a false positive
2118
+ # is high. For example, the cost of a false positive is very high if
2119
+ # an airplane safety system is falsely deemed safe to fly. A false
2120
+ # positive (FP) reflects a positive prediction that is actually
2121
+ # negative in the data.
2122
+ #
2123
+ # PrecisionMacro
2124
+ #
2125
+ # : The precision macro computes precision for multiclass
2126
+ # classification problems. It does this by calculating precision for
2127
+ # each class and averaging scores to obtain precision for several
2128
+ # classes. `PrecisionMacro` scores range from zero (0) to one (1).
2129
+ # Higher scores reflect the model's ability to predict true
2130
+ # positives (TP) out of all of the positives that it identifies,
2131
+ # averaged across multiple classes.
2132
+ #
2133
+ # R2
2134
+ #
2135
+ # : R2, also known as the coefficient of determination, is used in
2136
+ # regression to quantify how much a model can explain the variance
2137
+ # of a dependent variable. Values range from one (1) to negative one
2138
+ # (-1). Higher numbers indicate a higher fraction of explained
2139
+ # variability. `R2` values close to zero (0) indicate that very
2140
+ # little of the dependent variable can be explained by the model.
2141
+ # Negative values indicate a poor fit and that the model is
2142
+ # outperformed by a constant function. For linear regression, this
2143
+ # is a horizontal line.
2144
+ #
2145
+ # Recall
2146
+ #
2147
+ # : Recall measures how well an algorithm correctly predicts all of
2148
+ # the true positives (TP) in a dataset. A true positive is a
2149
+ # positive prediction that is also an actual positive value in the
2150
+ # data. Recall is defined as follows: Recall = TP/(TP+FN), with
2151
+ # values ranging from 0 to 1. Higher scores reflect a better ability
2152
+ # of the model to predict true positives (TP) in the data, and is
2153
+ # used in binary classification.
2154
+ #
2155
+ # Recall is important when testing for cancer because it's used to
2156
+ # find all of the true positives. A false positive (FP) reflects a
2157
+ # positive prediction that is actually negative in the data. It is
2158
+ # often insufficient to measure only recall, because predicting
2159
+ # every output as a true positive will yield a perfect recall score.
2160
+ #
2161
+ # RecallMacro
2162
+ #
2163
+ # : The RecallMacro computes recall for multiclass classification
2164
+ # problems by calculating recall for each class and averaging scores
2165
+ # to obtain recall for several classes. RecallMacro scores range
2166
+ # from 0 to 1. Higher scores reflect the model's ability to predict
2167
+ # true positives (TP) in a dataset. Whereas, a true positive
2168
+ # reflects a positive prediction that is also an actual positive
2169
+ # value in the data. It is often insufficient to measure only
2170
+ # recall, because predicting every output as a true positive will
2171
+ # yield a perfect recall score.
2172
+ #
2173
+ # RMSE
2174
+ #
2175
+ # : Root mean squared error (RMSE) measures the square root of the
2176
+ # squared difference between predicted and actual values, and it's
2177
+ # averaged over all values. It is used in regression analysis to
2178
+ # understand model prediction error. It's an important metric to
2179
+ # indicate the presence of large model errors and outliers. Values
2180
+ # range from zero (0) to infinity, with smaller numbers indicating a
2181
+ # better model fit to the data. RMSE is dependent on scale, and
2182
+ # should not be used to compare datasets of different sizes.
2078
2183
  #
2079
2184
  # If you do not specify a metric explicitly, the default behavior is
2080
2185
  # to automatically use:
@@ -9976,7 +10081,13 @@ module Aws::SageMaker
9976
10081
  # @return [Array<Types::AutoMLPartialFailureReason>]
9977
10082
  #
9978
10083
  # @!attribute [rw] best_candidate
9979
- # Returns the job's best `AutoMLCandidate`.
10084
+ # The best model candidate selected by SageMaker Autopilot using both
10085
+ # the best objective metric and lowest [InferenceLatency][1] for an
10086
+ # experiment.
10087
+ #
10088
+ #
10089
+ #
10090
+ # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-metrics-validation.html
9980
10091
  # @return [Types::AutoMLCandidate]
9981
10092
  #
9982
10093
  # @!attribute [rw] auto_ml_job_status
@@ -14654,8 +14765,8 @@ module Aws::SageMaker
14654
14765
  # @return [String]
14655
14766
  #
14656
14767
  # @!attribute [rw] sources
14657
- # A list of the Amazon Resource Name (ARN) and, if applicable, job
14658
- # type for multiple sources of an experiment run.
14768
+ # A list of ARNs and, if applicable, job types for multiple sources of
14769
+ # an experiment run.
14659
14770
  # @return [Array<Types::TrialComponentSource>]
14660
14771
  #
14661
14772
  # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeTrialComponentResponse AWS API Documentation
@@ -16336,8 +16447,8 @@ module Aws::SageMaker
16336
16447
  # * CreateTransformJob
16337
16448
  #
16338
16449
  # @!attribute [rw] experiment_name
16339
- # The name of an existing experiment to associate the trial component
16340
- # with.
16450
+ # The name of an existing experiment to associate with the trial
16451
+ # component.
16341
16452
  # @return [String]
16342
16453
  #
16343
16454
  # @!attribute [rw] trial_name
@@ -16351,8 +16462,8 @@ module Aws::SageMaker
16351
16462
  # @return [String]
16352
16463
  #
16353
16464
  # @!attribute [rw] run_name
16354
- # The name of the experiment run to associate the trial component
16355
- # with.
16465
+ # The name of the experiment run to associate with the trial
16466
+ # component.
16356
16467
  # @return [String]
16357
16468
  #
16358
16469
  # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ExperimentConfig AWS API Documentation
@@ -19034,8 +19145,8 @@ module Aws::SageMaker
19034
19145
  # @!attribute [rw] parameter_ranges
19035
19146
  # The ParameterRanges object that specifies the ranges of
19036
19147
  # hyperparameters that this tuning job searches over to find the
19037
- # optimal configuration for the highest model performance against
19038
- # .your chosen objective metric.
19148
+ # optimal configuration for the highest model performance against your
19149
+ # chosen objective metric.
19039
19150
  # @return [Types::ParameterRanges]
19040
19151
  #
19041
19152
  # @!attribute [rw] training_job_early_stopping_type
@@ -53,6 +53,6 @@ require_relative 'aws-sdk-sagemaker/customizations'
53
53
  # @!group service
54
54
  module Aws::SageMaker
55
55
 
56
- GEM_VERSION = '1.157.0'
56
+ GEM_VERSION = '1.158.0'
57
57
 
58
58
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-sagemaker
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.157.0
4
+ version: 1.158.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-12-19 00:00:00.000000000 Z
11
+ date: 2022-12-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core