aws-sdk-sagemaker 1.146.0 → 1.147.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ebd394d38d55f5b3a6b5e4d066da97a20e1fdbd0b90f18edf5fcd218a868306c
4
- data.tar.gz: 03a3cc6c12079bdd81ea09a83986d71439aa30fe87339c6f206abb763d9fbd59
3
+ metadata.gz: d30133b30a55095ff6f9b9f1c585cd78704f9d6e748e2f22a8bbdb9d7ada92aa
4
+ data.tar.gz: 4571e0f397878b638278db47b599bff5ba339d7a605d979a391f89b3f6d0897d
5
5
  SHA512:
6
- metadata.gz: 349df1460fe17a33087ceb79894a81939098272070d88998fdfcfdcb29f499d32b3c14d965d94c20e0eda329f013b81ecdc139c21e8bc3a501beeaa8cf750d72
7
- data.tar.gz: 26dd2ce5ffde1515f689a466ae0fc2a189b7418aa3b283ce2cdf0cf1fc11bb096a2bc9ac347745a8ea7cb1d6f4f7e74062c7edb3e4fc52565cf4e72736f7fc18
6
+ metadata.gz: 64cf887cbf8db655696b2735a06cb89ad50461721d8459e948a2ae89742b80df17f23b61b2334c02cb4d0df8f52ec26be4c899c239868e0064d7ec3817c72e33
7
+ data.tar.gz: f3641d9e5e67013aef1a7875e07a7c33b13b052a3e9c2aa303655795f701eae83dbcf5358b8b270228d8398a77fc6fab96dbf7c656e84343eb6c50aed4def3ab
data/CHANGELOG.md CHANGED
@@ -1,6 +1,11 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.147.0 (2022-10-24)
5
+ ------------------
6
+
7
+ * Feature - SageMaker Inference Recommender now supports a new API ListInferenceRecommendationJobSteps to return the details of all the benchmark we create for an inference recommendation job.
8
+
4
9
  1.146.0 (2022-10-21)
5
10
  ------------------
6
11
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.146.0
1
+ 1.147.0
@@ -8808,7 +8808,7 @@ module Aws::SageMaker
8808
8808
  # resp.best_candidate.candidate_properties.candidate_metrics[0].metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
8809
8809
  # resp.best_candidate.candidate_properties.candidate_metrics[0].value #=> Float
8810
8810
  # resp.best_candidate.candidate_properties.candidate_metrics[0].set #=> String, one of "Train", "Validation", "Test"
8811
- # resp.best_candidate.candidate_properties.candidate_metrics[0].standard_metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro", "LogLoss"
8811
+ # resp.best_candidate.candidate_properties.candidate_metrics[0].standard_metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro", "LogLoss", "InferenceLatency"
8812
8812
  # resp.auto_ml_job_status #=> String, one of "Completed", "InProgress", "Failed", "Stopped", "Stopping"
8813
8813
  # resp.auto_ml_job_secondary_status #=> String, one of "Starting", "AnalyzingData", "FeatureEngineering", "ModelTuning", "MaxCandidatesReached", "Failed", "Stopped", "MaxAutoMLJobRuntimeReached", "Stopping", "CandidateDefinitionsGenerated", "GeneratingExplainabilityReport", "Completed", "ExplainabilityError", "DeployingModel", "ModelDeploymentError", "GeneratingModelInsightsReport", "ModelInsightsError"
8814
8814
  # resp.generate_candidate_definitions_only #=> Boolean
@@ -13269,7 +13269,7 @@ module Aws::SageMaker
13269
13269
  # resp.candidates[0].candidate_properties.candidate_metrics[0].metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC"
13270
13270
  # resp.candidates[0].candidate_properties.candidate_metrics[0].value #=> Float
13271
13271
  # resp.candidates[0].candidate_properties.candidate_metrics[0].set #=> String, one of "Train", "Validation", "Test"
13272
- # resp.candidates[0].candidate_properties.candidate_metrics[0].standard_metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro", "LogLoss"
13272
+ # resp.candidates[0].candidate_properties.candidate_metrics[0].standard_metric_name #=> String, one of "Accuracy", "MSE", "F1", "F1macro", "AUC", "RMSE", "MAE", "R2", "BalancedAccuracy", "Precision", "PrecisionMacro", "Recall", "RecallMacro", "LogLoss", "InferenceLatency"
13273
13273
  # resp.next_token #=> String
13274
13274
  #
13275
13275
  # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListCandidatesForAutoMLJob AWS API Documentation
@@ -14589,6 +14589,80 @@ module Aws::SageMaker
14589
14589
  req.send_request(options)
14590
14590
  end
14591
14591
 
14592
+ # Returns a list of the subtasks for an Inference Recommender job.
14593
+ #
14594
+ # The supported subtasks are benchmarks, which evaluate the performance
14595
+ # of your model on different instance types.
14596
+ #
14597
+ # @option params [required, String] :job_name
14598
+ # The name for the Inference Recommender job.
14599
+ #
14600
+ # @option params [String] :status
14601
+ # A filter to return benchmarks of a specified status. If this field is
14602
+ # left empty, then all benchmarks are returned.
14603
+ #
14604
+ # @option params [String] :step_type
14605
+ # A filter to return details about the specified type of subtask.
14606
+ #
14607
+ # `BENCHMARK`\: Evaluate the performance of your model on different
14608
+ # instance types.
14609
+ #
14610
+ # @option params [Integer] :max_results
14611
+ # The maximum number of results to return.
14612
+ #
14613
+ # @option params [String] :next_token
14614
+ # A token that you can specify to return more results from the list.
14615
+ # Specify this field if you have a token that was returned from a
14616
+ # previous request.
14617
+ #
14618
+ # @return [Types::ListInferenceRecommendationsJobStepsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
14619
+ #
14620
+ # * {Types::ListInferenceRecommendationsJobStepsResponse#steps #steps} => Array<Types::InferenceRecommendationsJobStep>
14621
+ # * {Types::ListInferenceRecommendationsJobStepsResponse#next_token #next_token} => String
14622
+ #
14623
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
14624
+ #
14625
+ # @example Request syntax with placeholder values
14626
+ #
14627
+ # resp = client.list_inference_recommendations_job_steps({
14628
+ # job_name: "RecommendationJobName", # required
14629
+ # status: "PENDING", # accepts PENDING, IN_PROGRESS, COMPLETED, FAILED, STOPPING, STOPPED
14630
+ # step_type: "BENCHMARK", # accepts BENCHMARK
14631
+ # max_results: 1,
14632
+ # next_token: "NextToken",
14633
+ # })
14634
+ #
14635
+ # @example Response structure
14636
+ #
14637
+ # resp.steps #=> Array
14638
+ # resp.steps[0].step_type #=> String, one of "BENCHMARK"
14639
+ # resp.steps[0].job_name #=> String
14640
+ # resp.steps[0].status #=> String, one of "PENDING", "IN_PROGRESS", "COMPLETED", "FAILED", "STOPPING", "STOPPED"
14641
+ # resp.steps[0].inference_benchmark.metrics.cost_per_hour #=> Float
14642
+ # resp.steps[0].inference_benchmark.metrics.cost_per_inference #=> Float
14643
+ # resp.steps[0].inference_benchmark.metrics.max_invocations #=> Integer
14644
+ # resp.steps[0].inference_benchmark.metrics.model_latency #=> Integer
14645
+ # resp.steps[0].inference_benchmark.endpoint_configuration.endpoint_name #=> String
14646
+ # resp.steps[0].inference_benchmark.endpoint_configuration.variant_name #=> String
14647
+ # resp.steps[0].inference_benchmark.endpoint_configuration.instance_type #=> String, one of "ml.t2.medium", "ml.t2.large", "ml.t2.xlarge", "ml.t2.2xlarge", "ml.m4.xlarge", "ml.m4.2xlarge", "ml.m4.4xlarge", "ml.m4.10xlarge", "ml.m4.16xlarge", "ml.m5.large", "ml.m5.xlarge", "ml.m5.2xlarge", "ml.m5.4xlarge", "ml.m5.12xlarge", "ml.m5.24xlarge", "ml.m5d.large", "ml.m5d.xlarge", "ml.m5d.2xlarge", "ml.m5d.4xlarge", "ml.m5d.12xlarge", "ml.m5d.24xlarge", "ml.c4.large", "ml.c4.xlarge", "ml.c4.2xlarge", "ml.c4.4xlarge", "ml.c4.8xlarge", "ml.p2.xlarge", "ml.p2.8xlarge", "ml.p2.16xlarge", "ml.p3.2xlarge", "ml.p3.8xlarge", "ml.p3.16xlarge", "ml.c5.large", "ml.c5.xlarge", "ml.c5.2xlarge", "ml.c5.4xlarge", "ml.c5.9xlarge", "ml.c5.18xlarge", "ml.c5d.large", "ml.c5d.xlarge", "ml.c5d.2xlarge", "ml.c5d.4xlarge", "ml.c5d.9xlarge", "ml.c5d.18xlarge", "ml.g4dn.xlarge", "ml.g4dn.2xlarge", "ml.g4dn.4xlarge", "ml.g4dn.8xlarge", "ml.g4dn.12xlarge", "ml.g4dn.16xlarge", "ml.r5.large", "ml.r5.xlarge", "ml.r5.2xlarge", "ml.r5.4xlarge", "ml.r5.12xlarge", "ml.r5.24xlarge", "ml.r5d.large", "ml.r5d.xlarge", "ml.r5d.2xlarge", "ml.r5d.4xlarge", "ml.r5d.12xlarge", "ml.r5d.24xlarge", "ml.inf1.xlarge", "ml.inf1.2xlarge", "ml.inf1.6xlarge", "ml.inf1.24xlarge", "ml.c6i.large", "ml.c6i.xlarge", "ml.c6i.2xlarge", "ml.c6i.4xlarge", "ml.c6i.8xlarge", "ml.c6i.12xlarge", "ml.c6i.16xlarge", "ml.c6i.24xlarge", "ml.c6i.32xlarge", "ml.g5.xlarge", "ml.g5.2xlarge", "ml.g5.4xlarge", "ml.g5.8xlarge", "ml.g5.12xlarge", "ml.g5.16xlarge", "ml.g5.24xlarge", "ml.g5.48xlarge", "ml.p4d.24xlarge", "ml.c7g.large", "ml.c7g.xlarge", "ml.c7g.2xlarge", "ml.c7g.4xlarge", "ml.c7g.8xlarge", "ml.c7g.12xlarge", "ml.c7g.16xlarge", "ml.m6g.large", "ml.m6g.xlarge", "ml.m6g.2xlarge", "ml.m6g.4xlarge", "ml.m6g.8xlarge", "ml.m6g.12xlarge", "ml.m6g.16xlarge", "ml.m6gd.large", "ml.m6gd.xlarge", "ml.m6gd.2xlarge", "ml.m6gd.4xlarge", "ml.m6gd.8xlarge", "ml.m6gd.12xlarge", "ml.m6gd.16xlarge", "ml.c6g.large", "ml.c6g.xlarge", "ml.c6g.2xlarge", "ml.c6g.4xlarge", "ml.c6g.8xlarge", "ml.c6g.12xlarge", "ml.c6g.16xlarge", "ml.c6gd.large", "ml.c6gd.xlarge", "ml.c6gd.2xlarge", "ml.c6gd.4xlarge", "ml.c6gd.8xlarge", "ml.c6gd.12xlarge", "ml.c6gd.16xlarge", "ml.c6gn.large", "ml.c6gn.xlarge", "ml.c6gn.2xlarge", "ml.c6gn.4xlarge", "ml.c6gn.8xlarge", "ml.c6gn.12xlarge", "ml.c6gn.16xlarge", "ml.r6g.large", "ml.r6g.xlarge", "ml.r6g.2xlarge", "ml.r6g.4xlarge", "ml.r6g.8xlarge", "ml.r6g.12xlarge", "ml.r6g.16xlarge", "ml.r6gd.large", "ml.r6gd.xlarge", "ml.r6gd.2xlarge", "ml.r6gd.4xlarge", "ml.r6gd.8xlarge", "ml.r6gd.12xlarge", "ml.r6gd.16xlarge"
14648
+ # resp.steps[0].inference_benchmark.endpoint_configuration.initial_instance_count #=> Integer
14649
+ # resp.steps[0].inference_benchmark.model_configuration.inference_specification_name #=> String
14650
+ # resp.steps[0].inference_benchmark.model_configuration.environment_parameters #=> Array
14651
+ # resp.steps[0].inference_benchmark.model_configuration.environment_parameters[0].key #=> String
14652
+ # resp.steps[0].inference_benchmark.model_configuration.environment_parameters[0].value_type #=> String
14653
+ # resp.steps[0].inference_benchmark.model_configuration.environment_parameters[0].value #=> String
14654
+ # resp.steps[0].inference_benchmark.failure_reason #=> String
14655
+ # resp.next_token #=> String
14656
+ #
14657
+ # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListInferenceRecommendationsJobSteps AWS API Documentation
14658
+ #
14659
+ # @overload list_inference_recommendations_job_steps(params = {})
14660
+ # @param [Hash] params ({})
14661
+ def list_inference_recommendations_job_steps(params = {}, options = {})
14662
+ req = build_request(:list_inference_recommendations_job_steps, params)
14663
+ req.send_request(options)
14664
+ end
14665
+
14592
14666
  # Lists recommendation jobs that satisfy various filters.
14593
14667
  #
14594
14668
  # @option params [Time,DateTime,Date,Integer,String] :creation_time_after
@@ -20987,7 +21061,7 @@ module Aws::SageMaker
20987
21061
  params: params,
20988
21062
  config: config)
20989
21063
  context[:gem_name] = 'aws-sdk-sagemaker'
20990
- context[:gem_version] = '1.146.0'
21064
+ context[:gem_version] = '1.147.0'
20991
21065
  Seahorse::Client::Request.new(handlers, context)
20992
21066
  end
20993
21067
 
@@ -833,6 +833,8 @@ module Aws::SageMaker
833
833
  InferenceRecommendation = Shapes::StructureShape.new(name: 'InferenceRecommendation')
834
834
  InferenceRecommendations = Shapes::ListShape.new(name: 'InferenceRecommendations')
835
835
  InferenceRecommendationsJob = Shapes::StructureShape.new(name: 'InferenceRecommendationsJob')
836
+ InferenceRecommendationsJobStep = Shapes::StructureShape.new(name: 'InferenceRecommendationsJobStep')
837
+ InferenceRecommendationsJobSteps = Shapes::ListShape.new(name: 'InferenceRecommendationsJobSteps')
836
838
  InferenceRecommendationsJobs = Shapes::ListShape.new(name: 'InferenceRecommendationsJobs')
837
839
  InferenceSpecification = Shapes::StructureShape.new(name: 'InferenceSpecification')
838
840
  InferenceSpecificationName = Shapes::StringShape.new(name: 'InferenceSpecificationName')
@@ -963,6 +965,8 @@ module Aws::SageMaker
963
965
  ListImageVersionsResponse = Shapes::StructureShape.new(name: 'ListImageVersionsResponse')
964
966
  ListImagesRequest = Shapes::StructureShape.new(name: 'ListImagesRequest')
965
967
  ListImagesResponse = Shapes::StructureShape.new(name: 'ListImagesResponse')
968
+ ListInferenceRecommendationsJobStepsRequest = Shapes::StructureShape.new(name: 'ListInferenceRecommendationsJobStepsRequest')
969
+ ListInferenceRecommendationsJobStepsResponse = Shapes::StructureShape.new(name: 'ListInferenceRecommendationsJobStepsResponse')
966
970
  ListInferenceRecommendationsJobsRequest = Shapes::StructureShape.new(name: 'ListInferenceRecommendationsJobsRequest')
967
971
  ListInferenceRecommendationsJobsResponse = Shapes::StructureShape.new(name: 'ListInferenceRecommendationsJobsResponse')
968
972
  ListInferenceRecommendationsJobsSortBy = Shapes::StringShape.new(name: 'ListInferenceRecommendationsJobsSortBy')
@@ -1379,10 +1383,12 @@ module Aws::SageMaker
1379
1383
  RStudioServerProDomainSettingsForUpdate = Shapes::StructureShape.new(name: 'RStudioServerProDomainSettingsForUpdate')
1380
1384
  RStudioServerProUserGroup = Shapes::StringShape.new(name: 'RStudioServerProUserGroup')
1381
1385
  RealtimeInferenceInstanceTypes = Shapes::ListShape.new(name: 'RealtimeInferenceInstanceTypes')
1386
+ RecommendationFailureReason = Shapes::StringShape.new(name: 'RecommendationFailureReason')
1382
1387
  RecommendationJobArn = Shapes::StringShape.new(name: 'RecommendationJobArn')
1383
1388
  RecommendationJobCompiledOutputConfig = Shapes::StructureShape.new(name: 'RecommendationJobCompiledOutputConfig')
1384
1389
  RecommendationJobContainerConfig = Shapes::StructureShape.new(name: 'RecommendationJobContainerConfig')
1385
1390
  RecommendationJobDescription = Shapes::StringShape.new(name: 'RecommendationJobDescription')
1391
+ RecommendationJobInferenceBenchmark = Shapes::StructureShape.new(name: 'RecommendationJobInferenceBenchmark')
1386
1392
  RecommendationJobInputConfig = Shapes::StructureShape.new(name: 'RecommendationJobInputConfig')
1387
1393
  RecommendationJobName = Shapes::StringShape.new(name: 'RecommendationJobName')
1388
1394
  RecommendationJobOutputConfig = Shapes::StructureShape.new(name: 'RecommendationJobOutputConfig')
@@ -1394,6 +1400,7 @@ module Aws::SageMaker
1394
1400
  RecommendationJobSupportedInstanceTypes = Shapes::ListShape.new(name: 'RecommendationJobSupportedInstanceTypes')
1395
1401
  RecommendationJobType = Shapes::StringShape.new(name: 'RecommendationJobType')
1396
1402
  RecommendationMetrics = Shapes::StructureShape.new(name: 'RecommendationMetrics')
1403
+ RecommendationStepType = Shapes::StringShape.new(name: 'RecommendationStepType')
1397
1404
  RecordWrapper = Shapes::StringShape.new(name: 'RecordWrapper')
1398
1405
  RedshiftClusterId = Shapes::StringShape.new(name: 'RedshiftClusterId')
1399
1406
  RedshiftDatabase = Shapes::StringShape.new(name: 'RedshiftDatabase')
@@ -4777,6 +4784,14 @@ module Aws::SageMaker
4777
4784
  InferenceRecommendationsJob.add_member(:failure_reason, Shapes::ShapeRef.new(shape: FailureReason, location_name: "FailureReason"))
4778
4785
  InferenceRecommendationsJob.struct_class = Types::InferenceRecommendationsJob
4779
4786
 
4787
+ InferenceRecommendationsJobStep.add_member(:step_type, Shapes::ShapeRef.new(shape: RecommendationStepType, required: true, location_name: "StepType"))
4788
+ InferenceRecommendationsJobStep.add_member(:job_name, Shapes::ShapeRef.new(shape: RecommendationJobName, required: true, location_name: "JobName"))
4789
+ InferenceRecommendationsJobStep.add_member(:status, Shapes::ShapeRef.new(shape: RecommendationJobStatus, required: true, location_name: "Status"))
4790
+ InferenceRecommendationsJobStep.add_member(:inference_benchmark, Shapes::ShapeRef.new(shape: RecommendationJobInferenceBenchmark, location_name: "InferenceBenchmark"))
4791
+ InferenceRecommendationsJobStep.struct_class = Types::InferenceRecommendationsJobStep
4792
+
4793
+ InferenceRecommendationsJobSteps.member = Shapes::ShapeRef.new(shape: InferenceRecommendationsJobStep)
4794
+
4780
4795
  InferenceRecommendationsJobs.member = Shapes::ShapeRef.new(shape: InferenceRecommendationsJob)
4781
4796
 
4782
4797
  InferenceSpecification.add_member(:containers, Shapes::ShapeRef.new(shape: ModelPackageContainerDefinitionList, required: true, location_name: "Containers"))
@@ -5304,6 +5319,17 @@ module Aws::SageMaker
5304
5319
  ListImagesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken"))
5305
5320
  ListImagesResponse.struct_class = Types::ListImagesResponse
5306
5321
 
5322
+ ListInferenceRecommendationsJobStepsRequest.add_member(:job_name, Shapes::ShapeRef.new(shape: RecommendationJobName, required: true, location_name: "JobName"))
5323
+ ListInferenceRecommendationsJobStepsRequest.add_member(:status, Shapes::ShapeRef.new(shape: RecommendationJobStatus, location_name: "Status"))
5324
+ ListInferenceRecommendationsJobStepsRequest.add_member(:step_type, Shapes::ShapeRef.new(shape: RecommendationStepType, location_name: "StepType"))
5325
+ ListInferenceRecommendationsJobStepsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
5326
+ ListInferenceRecommendationsJobStepsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken"))
5327
+ ListInferenceRecommendationsJobStepsRequest.struct_class = Types::ListInferenceRecommendationsJobStepsRequest
5328
+
5329
+ ListInferenceRecommendationsJobStepsResponse.add_member(:steps, Shapes::ShapeRef.new(shape: InferenceRecommendationsJobSteps, location_name: "Steps"))
5330
+ ListInferenceRecommendationsJobStepsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken"))
5331
+ ListInferenceRecommendationsJobStepsResponse.struct_class = Types::ListInferenceRecommendationsJobStepsResponse
5332
+
5307
5333
  ListInferenceRecommendationsJobsRequest.add_member(:creation_time_after, Shapes::ShapeRef.new(shape: CreationTime, location_name: "CreationTimeAfter"))
5308
5334
  ListInferenceRecommendationsJobsRequest.add_member(:creation_time_before, Shapes::ShapeRef.new(shape: CreationTime, location_name: "CreationTimeBefore"))
5309
5335
  ListInferenceRecommendationsJobsRequest.add_member(:last_modified_time_after, Shapes::ShapeRef.new(shape: LastModifiedTime, location_name: "LastModifiedTimeAfter"))
@@ -6709,6 +6735,12 @@ module Aws::SageMaker
6709
6735
  RecommendationJobContainerConfig.add_member(:supported_instance_types, Shapes::ShapeRef.new(shape: RecommendationJobSupportedInstanceTypes, location_name: "SupportedInstanceTypes"))
6710
6736
  RecommendationJobContainerConfig.struct_class = Types::RecommendationJobContainerConfig
6711
6737
 
6738
+ RecommendationJobInferenceBenchmark.add_member(:metrics, Shapes::ShapeRef.new(shape: RecommendationMetrics, location_name: "Metrics"))
6739
+ RecommendationJobInferenceBenchmark.add_member(:endpoint_configuration, Shapes::ShapeRef.new(shape: EndpointOutputConfiguration, location_name: "EndpointConfiguration"))
6740
+ RecommendationJobInferenceBenchmark.add_member(:model_configuration, Shapes::ShapeRef.new(shape: ModelConfiguration, required: true, location_name: "ModelConfiguration"))
6741
+ RecommendationJobInferenceBenchmark.add_member(:failure_reason, Shapes::ShapeRef.new(shape: RecommendationFailureReason, location_name: "FailureReason"))
6742
+ RecommendationJobInferenceBenchmark.struct_class = Types::RecommendationJobInferenceBenchmark
6743
+
6712
6744
  RecommendationJobInputConfig.add_member(:model_package_version_arn, Shapes::ShapeRef.new(shape: ModelPackageArn, required: true, location_name: "ModelPackageVersionArn"))
6713
6745
  RecommendationJobInputConfig.add_member(:job_duration_in_seconds, Shapes::ShapeRef.new(shape: JobDurationInSeconds, location_name: "JobDurationInSeconds"))
6714
6746
  RecommendationJobInputConfig.add_member(:traffic_pattern, Shapes::ShapeRef.new(shape: TrafficPattern, location_name: "TrafficPattern"))
@@ -9470,6 +9502,20 @@ module Aws::SageMaker
9470
9502
  )
9471
9503
  end)
9472
9504
 
9505
+ api.add_operation(:list_inference_recommendations_job_steps, Seahorse::Model::Operation.new.tap do |o|
9506
+ o.name = "ListInferenceRecommendationsJobSteps"
9507
+ o.http_method = "POST"
9508
+ o.http_request_uri = "/"
9509
+ o.input = Shapes::ShapeRef.new(shape: ListInferenceRecommendationsJobStepsRequest)
9510
+ o.output = Shapes::ShapeRef.new(shape: ListInferenceRecommendationsJobStepsResponse)
9511
+ o[:pager] = Aws::Pager.new(
9512
+ limit_key: "max_results",
9513
+ tokens: {
9514
+ "next_token" => "next_token"
9515
+ }
9516
+ )
9517
+ end)
9518
+
9473
9519
  api.add_operation(:list_inference_recommendations_jobs, Seahorse::Model::Operation.new.tap do |o|
9474
9520
  o.name = "ListInferenceRecommendationsJobs"
9475
9521
  o.http_method = "POST"
@@ -23734,6 +23734,43 @@ module Aws::SageMaker
23734
23734
  include Aws::Structure
23735
23735
  end
23736
23736
 
23737
+ # A returned array object for the `Steps` response field in the
23738
+ # [ListInferenceRecommendationsJobSteps][1] API command.
23739
+ #
23740
+ #
23741
+ #
23742
+ # [1]: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_InferenceRecommendationsJobStep.html
23743
+ #
23744
+ # @!attribute [rw] step_type
23745
+ # The type of the subtask.
23746
+ #
23747
+ # `BENCHMARK`\: Evaluate the performance of your model on different
23748
+ # instance types.
23749
+ # @return [String]
23750
+ #
23751
+ # @!attribute [rw] job_name
23752
+ # The name of the Inference Recommender job.
23753
+ # @return [String]
23754
+ #
23755
+ # @!attribute [rw] status
23756
+ # The current status of the benchmark.
23757
+ # @return [String]
23758
+ #
23759
+ # @!attribute [rw] inference_benchmark
23760
+ # The details for a specific benchmark.
23761
+ # @return [Types::RecommendationJobInferenceBenchmark]
23762
+ #
23763
+ # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/InferenceRecommendationsJobStep AWS API Documentation
23764
+ #
23765
+ class InferenceRecommendationsJobStep < Struct.new(
23766
+ :step_type,
23767
+ :job_name,
23768
+ :status,
23769
+ :inference_benchmark)
23770
+ SENSITIVE = []
23771
+ include Aws::Structure
23772
+ end
23773
+
23737
23774
  # Defines how to perform inference generation after a training job is
23738
23775
  # run.
23739
23776
  #
@@ -27259,6 +27296,73 @@ module Aws::SageMaker
27259
27296
  include Aws::Structure
27260
27297
  end
27261
27298
 
27299
+ # @note When making an API call, you may pass ListInferenceRecommendationsJobStepsRequest
27300
+ # data as a hash:
27301
+ #
27302
+ # {
27303
+ # job_name: "RecommendationJobName", # required
27304
+ # status: "PENDING", # accepts PENDING, IN_PROGRESS, COMPLETED, FAILED, STOPPING, STOPPED
27305
+ # step_type: "BENCHMARK", # accepts BENCHMARK
27306
+ # max_results: 1,
27307
+ # next_token: "NextToken",
27308
+ # }
27309
+ #
27310
+ # @!attribute [rw] job_name
27311
+ # The name for the Inference Recommender job.
27312
+ # @return [String]
27313
+ #
27314
+ # @!attribute [rw] status
27315
+ # A filter to return benchmarks of a specified status. If this field
27316
+ # is left empty, then all benchmarks are returned.
27317
+ # @return [String]
27318
+ #
27319
+ # @!attribute [rw] step_type
27320
+ # A filter to return details about the specified type of subtask.
27321
+ #
27322
+ # `BENCHMARK`\: Evaluate the performance of your model on different
27323
+ # instance types.
27324
+ # @return [String]
27325
+ #
27326
+ # @!attribute [rw] max_results
27327
+ # The maximum number of results to return.
27328
+ # @return [Integer]
27329
+ #
27330
+ # @!attribute [rw] next_token
27331
+ # A token that you can specify to return more results from the list.
27332
+ # Specify this field if you have a token that was returned from a
27333
+ # previous request.
27334
+ # @return [String]
27335
+ #
27336
+ # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListInferenceRecommendationsJobStepsRequest AWS API Documentation
27337
+ #
27338
+ class ListInferenceRecommendationsJobStepsRequest < Struct.new(
27339
+ :job_name,
27340
+ :status,
27341
+ :step_type,
27342
+ :max_results,
27343
+ :next_token)
27344
+ SENSITIVE = []
27345
+ include Aws::Structure
27346
+ end
27347
+
27348
+ # @!attribute [rw] steps
27349
+ # A list of all subtask details in Inference Recommender.
27350
+ # @return [Array<Types::InferenceRecommendationsJobStep>]
27351
+ #
27352
+ # @!attribute [rw] next_token
27353
+ # A token that you can specify in your next request to return more
27354
+ # results from the list.
27355
+ # @return [String]
27356
+ #
27357
+ # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListInferenceRecommendationsJobStepsResponse AWS API Documentation
27358
+ #
27359
+ class ListInferenceRecommendationsJobStepsResponse < Struct.new(
27360
+ :steps,
27361
+ :next_token)
27362
+ SENSITIVE = []
27363
+ include Aws::Structure
27364
+ end
27365
+
27262
27366
  # @note When making an API call, you may pass ListInferenceRecommendationsJobsRequest
27263
27367
  # data as a hash:
27264
27368
  #
@@ -36915,6 +37019,38 @@ module Aws::SageMaker
36915
37019
  include Aws::Structure
36916
37020
  end
36917
37021
 
37022
+ # The details for a specific benchmark from an Inference Recommender
37023
+ # job.
37024
+ #
37025
+ # @!attribute [rw] metrics
37026
+ # The metrics of recommendations.
37027
+ # @return [Types::RecommendationMetrics]
37028
+ #
37029
+ # @!attribute [rw] endpoint_configuration
37030
+ # The endpoint configuration made by Inference Recommender during a
37031
+ # recommendation job.
37032
+ # @return [Types::EndpointOutputConfiguration]
37033
+ #
37034
+ # @!attribute [rw] model_configuration
37035
+ # Defines the model configuration. Includes the specification name and
37036
+ # environment parameters.
37037
+ # @return [Types::ModelConfiguration]
37038
+ #
37039
+ # @!attribute [rw] failure_reason
37040
+ # The reason why a benchmark failed.
37041
+ # @return [String]
37042
+ #
37043
+ # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/RecommendationJobInferenceBenchmark AWS API Documentation
37044
+ #
37045
+ class RecommendationJobInferenceBenchmark < Struct.new(
37046
+ :metrics,
37047
+ :endpoint_configuration,
37048
+ :model_configuration,
37049
+ :failure_reason)
37050
+ SENSITIVE = []
37051
+ include Aws::Structure
37052
+ end
37053
+
36918
37054
  # The input configuration of the recommendation job.
36919
37055
  #
36920
37056
  # @note When making an API call, you may pass RecommendationJobInputConfig
@@ -49,6 +49,6 @@ require_relative 'aws-sdk-sagemaker/customizations'
49
49
  # @!group service
50
50
  module Aws::SageMaker
51
51
 
52
- GEM_VERSION = '1.146.0'
52
+ GEM_VERSION = '1.147.0'
53
53
 
54
54
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-sagemaker
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.146.0
4
+ version: 1.147.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-10-21 00:00:00.000000000 Z
11
+ date: 2022-10-24 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core