aws-sdk-sagemaker 1.69.0 → 1.70.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/aws-sdk-sagemaker.rb +1 -1
- data/lib/aws-sdk-sagemaker/client.rb +10 -9
- data/lib/aws-sdk-sagemaker/types.rb +83 -3
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 25c8ad1c84f784c05b8e8a1c7ab40c927b85177f54dbdcfd1b3a690d4eb75003
|
4
|
+
data.tar.gz: 4d951953dd6f6f44b113696e2409df8b71058b9264e820f624b2a8979a37ec13
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4c1b054af1a10a5190579037e6bcb7be93e3fc909c7b8105c8e85408f3bba51af737b5cb9d2f3fccdc26954f9ef38d7516d1ab0d6cfd44a164b75b974b957476
|
7
|
+
data.tar.gz: 3e1fb935f7b6f9368ab687ee70bf12bc96b82c046968a4b281fb007597cb395e8b2e4f55a95229027926ac7754f11263b4fa4af1afe8f5c1bca66d63a9026879
|
data/lib/aws-sdk-sagemaker.rb
CHANGED
@@ -985,7 +985,7 @@ module Aws::SageMaker
|
|
985
985
|
# },
|
986
986
|
# output_config: { # required
|
987
987
|
# s3_output_location: "S3Uri", # required
|
988
|
-
# target_device: "lambda", # accepts lambda, ml_m4, ml_m5, ml_c4, ml_c5, ml_p2, ml_p3, ml_g4dn, ml_inf1, jetson_tx1, jetson_tx2, jetson_nano, jetson_xavier, rasp3b, imx8qm, deeplens, rk3399, rk3288, aisage, sbe_c, qcs605, qcs603, sitara_am57x, amba_cv22, x86_win32, x86_win64
|
988
|
+
# target_device: "lambda", # accepts lambda, ml_m4, ml_m5, ml_c4, ml_c5, ml_p2, ml_p3, ml_g4dn, ml_inf1, jetson_tx1, jetson_tx2, jetson_nano, jetson_xavier, rasp3b, imx8qm, deeplens, rk3399, rk3288, aisage, sbe_c, qcs605, qcs603, sitara_am57x, amba_cv22, x86_win32, x86_win64, coreml
|
989
989
|
# target_platform: {
|
990
990
|
# os: "ANDROID", # required, accepts ANDROID, LINUX
|
991
991
|
# arch: "X86_64", # required, accepts X86_64, X86, ARM64, ARM_EABI, ARM_EABIHF
|
@@ -1028,9 +1028,10 @@ module Aws::SageMaker
|
|
1028
1028
|
#
|
1029
1029
|
# All SageMaker Studio traffic between the domain and the EFS volume is
|
1030
1030
|
# through the specified VPC and subnets. For other Studio traffic, you
|
1031
|
-
# specify the `AppNetworkAccessType` parameter.
|
1032
|
-
# corresponds to the
|
1033
|
-
# The following options are
|
1031
|
+
# can specify the `AppNetworkAccessType` parameter.
|
1032
|
+
# `AppNetworkAccessType` corresponds to the network access type that you
|
1033
|
+
# choose when you onboard to Studio. The following options are
|
1034
|
+
# available:
|
1034
1035
|
#
|
1035
1036
|
# * `PublicInternetOnly` - Non-EFS traffic goes through a VPC managed by
|
1036
1037
|
# Amazon SageMaker, which allows internet access. This is the default
|
@@ -1045,9 +1046,9 @@ module Aws::SageMaker
|
|
1045
1046
|
# or a NAT gateway and your security groups allow outbound
|
1046
1047
|
# connections.
|
1047
1048
|
#
|
1048
|
-
# <b> <code>VpcOnly</code>
|
1049
|
+
# <b> <code>VpcOnly</code> network access type</b>
|
1049
1050
|
#
|
1050
|
-
# When you
|
1051
|
+
# When you choose `VpcOnly`, you must specify the following:
|
1051
1052
|
#
|
1052
1053
|
# * Security group inbound and outbound rules to allow NFS traffic over
|
1053
1054
|
# TCP on port 2049 between the domain and the EFS volume
|
@@ -5145,7 +5146,7 @@ module Aws::SageMaker
|
|
5145
5146
|
# resp.input_config.data_input_config #=> String
|
5146
5147
|
# resp.input_config.framework #=> String, one of "TENSORFLOW", "KERAS", "MXNET", "ONNX", "PYTORCH", "XGBOOST", "TFLITE"
|
5147
5148
|
# resp.output_config.s3_output_location #=> String
|
5148
|
-
# resp.output_config.target_device #=> String, one of "lambda", "ml_m4", "ml_m5", "ml_c4", "ml_c5", "ml_p2", "ml_p3", "ml_g4dn", "ml_inf1", "jetson_tx1", "jetson_tx2", "jetson_nano", "jetson_xavier", "rasp3b", "imx8qm", "deeplens", "rk3399", "rk3288", "aisage", "sbe_c", "qcs605", "qcs603", "sitara_am57x", "amba_cv22", "x86_win32", "x86_win64"
|
5149
|
+
# resp.output_config.target_device #=> String, one of "lambda", "ml_m4", "ml_m5", "ml_c4", "ml_c5", "ml_p2", "ml_p3", "ml_g4dn", "ml_inf1", "jetson_tx1", "jetson_tx2", "jetson_nano", "jetson_xavier", "rasp3b", "imx8qm", "deeplens", "rk3399", "rk3288", "aisage", "sbe_c", "qcs605", "qcs603", "sitara_am57x", "amba_cv22", "x86_win32", "x86_win64", "coreml"
|
5149
5150
|
# resp.output_config.target_platform.os #=> String, one of "ANDROID", "LINUX"
|
5150
5151
|
# resp.output_config.target_platform.arch #=> String, one of "X86_64", "X86", "ARM64", "ARM_EABI", "ARM_EABIHF"
|
5151
5152
|
# resp.output_config.target_platform.accelerator #=> String, one of "INTEL_GRAPHICS", "MALI", "NVIDIA"
|
@@ -7359,7 +7360,7 @@ module Aws::SageMaker
|
|
7359
7360
|
# resp.compilation_job_summaries[0].creation_time #=> Time
|
7360
7361
|
# resp.compilation_job_summaries[0].compilation_start_time #=> Time
|
7361
7362
|
# resp.compilation_job_summaries[0].compilation_end_time #=> Time
|
7362
|
-
# resp.compilation_job_summaries[0].compilation_target_device #=> String, one of "lambda", "ml_m4", "ml_m5", "ml_c4", "ml_c5", "ml_p2", "ml_p3", "ml_g4dn", "ml_inf1", "jetson_tx1", "jetson_tx2", "jetson_nano", "jetson_xavier", "rasp3b", "imx8qm", "deeplens", "rk3399", "rk3288", "aisage", "sbe_c", "qcs605", "qcs603", "sitara_am57x", "amba_cv22", "x86_win32", "x86_win64"
|
7363
|
+
# resp.compilation_job_summaries[0].compilation_target_device #=> String, one of "lambda", "ml_m4", "ml_m5", "ml_c4", "ml_c5", "ml_p2", "ml_p3", "ml_g4dn", "ml_inf1", "jetson_tx1", "jetson_tx2", "jetson_nano", "jetson_xavier", "rasp3b", "imx8qm", "deeplens", "rk3399", "rk3288", "aisage", "sbe_c", "qcs605", "qcs603", "sitara_am57x", "amba_cv22", "x86_win32", "x86_win64", "coreml"
|
7363
7364
|
# resp.compilation_job_summaries[0].compilation_target_platform_os #=> String, one of "ANDROID", "LINUX"
|
7364
7365
|
# resp.compilation_job_summaries[0].compilation_target_platform_arch #=> String, one of "X86_64", "X86", "ARM64", "ARM_EABI", "ARM_EABIHF"
|
7365
7366
|
# resp.compilation_job_summaries[0].compilation_target_platform_accelerator #=> String, one of "INTEL_GRAPHICS", "MALI", "NVIDIA"
|
@@ -11053,7 +11054,7 @@ module Aws::SageMaker
|
|
11053
11054
|
params: params,
|
11054
11055
|
config: config)
|
11055
11056
|
context[:gem_name] = 'aws-sdk-sagemaker'
|
11056
|
-
context[:gem_version] = '1.
|
11057
|
+
context[:gem_version] = '1.70.0'
|
11057
11058
|
Seahorse::Client::Request.new(handlers, context)
|
11058
11059
|
end
|
11059
11060
|
|
@@ -3040,7 +3040,7 @@ module Aws::SageMaker
|
|
3040
3040
|
# },
|
3041
3041
|
# output_config: { # required
|
3042
3042
|
# s3_output_location: "S3Uri", # required
|
3043
|
-
# target_device: "lambda", # accepts lambda, ml_m4, ml_m5, ml_c4, ml_c5, ml_p2, ml_p3, ml_g4dn, ml_inf1, jetson_tx1, jetson_tx2, jetson_nano, jetson_xavier, rasp3b, imx8qm, deeplens, rk3399, rk3288, aisage, sbe_c, qcs605, qcs603, sitara_am57x, amba_cv22, x86_win32, x86_win64
|
3043
|
+
# target_device: "lambda", # accepts lambda, ml_m4, ml_m5, ml_c4, ml_c5, ml_p2, ml_p3, ml_g4dn, ml_inf1, jetson_tx1, jetson_tx2, jetson_nano, jetson_xavier, rasp3b, imx8qm, deeplens, rk3399, rk3288, aisage, sbe_c, qcs605, qcs603, sitara_am57x, amba_cv22, x86_win32, x86_win64, coreml
|
3044
3044
|
# target_platform: {
|
3045
3045
|
# os: "ANDROID", # required, accepts ANDROID, LINUX
|
3046
3046
|
# arch: "X86_64", # required, accepts X86_64, X86, ARM64, ARM_EABI, ARM_EABIHF
|
@@ -12711,6 +12711,76 @@ module Aws::SageMaker
|
|
12711
12711
|
# [1,3,224,224]]`
|
12712
12712
|
#
|
12713
12713
|
# * `XGBOOST`\: input data name and shape are not needed.
|
12714
|
+
#
|
12715
|
+
# `DataInputConfig` supports the following parameters for `CoreML`
|
12716
|
+
# OutputConfig$TargetDevice (ML Model format):
|
12717
|
+
#
|
12718
|
+
# * `shape`\: Input shape, for example `\{"input_1": \{"shape":
|
12719
|
+
# [1,224,224,3]\}\}`. In addition to static input shapes, CoreML
|
12720
|
+
# converter supports Flexible input shapes:
|
12721
|
+
#
|
12722
|
+
# * Range Dimension. You can use the Range Dimension feature if you
|
12723
|
+
# know the input shape will be within some specific interval in
|
12724
|
+
# that dimension, for example: `\{"input_1": \{"shape": ["1..10",
|
12725
|
+
# 224, 224, 3]\}\}`
|
12726
|
+
#
|
12727
|
+
# * Enumerated shapes. Sometimes, the models are trained to work
|
12728
|
+
# only on a select set of inputs. You can enumerate all supported
|
12729
|
+
# input shapes, for example: `\{"input_1": \{"shape": [[1, 224,
|
12730
|
+
# 224, 3], [1, 160, 160, 3]]\}\}`
|
12731
|
+
#
|
12732
|
+
# * `default_shape`\: Default input shape. You can set a default shape
|
12733
|
+
# during conversion for both Range Dimension and Enumerated Shapes.
|
12734
|
+
# For example `\{"input_1": \{"shape": ["1..10", 224, 224, 3],
|
12735
|
+
# "default_shape": [1, 224, 224, 3]\}\}`
|
12736
|
+
#
|
12737
|
+
# * `type`\: Input type. Allowed values: `Image` and `Tensor`. By
|
12738
|
+
# default, the converter generates an ML Model with inputs of type
|
12739
|
+
# Tensor (MultiArray). User can set input type to be Image. Image
|
12740
|
+
# input type requires additional input parameters such as `bias` and
|
12741
|
+
# `scale`.
|
12742
|
+
#
|
12743
|
+
# * `bias`\: If the input type is an Image, you need to provide the
|
12744
|
+
# bias vector.
|
12745
|
+
#
|
12746
|
+
# * `scale`\: If the input type is an Image, you need to provide a
|
12747
|
+
# scale factor.
|
12748
|
+
#
|
12749
|
+
# CoreML `ClassifierConfig` parameters can be specified using
|
12750
|
+
# OutputConfig$CompilerOptions. CoreML converter supports Tensorflow
|
12751
|
+
# and PyTorch models. CoreML conversion examples:
|
12752
|
+
#
|
12753
|
+
# * Tensor type input:
|
12754
|
+
#
|
12755
|
+
# * `"DataInputConfig": \{"input_1": \{"shape": [[1,224,224,3],
|
12756
|
+
# [1,160,160,3]], "default_shape": [1,224,224,3]\}\}`
|
12757
|
+
#
|
12758
|
+
# ^
|
12759
|
+
#
|
12760
|
+
# * Tensor type input without input name (PyTorch):
|
12761
|
+
#
|
12762
|
+
# * `"DataInputConfig": [\{"shape": [[1,3,224,224], [1,3,160,160]],
|
12763
|
+
# "default_shape": [1,3,224,224]\}]`
|
12764
|
+
#
|
12765
|
+
# ^
|
12766
|
+
#
|
12767
|
+
# * Image type input:
|
12768
|
+
#
|
12769
|
+
# * `"DataInputConfig": \{"input_1": \{"shape": [[1,224,224,3],
|
12770
|
+
# [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image",
|
12771
|
+
# "bias": [-1,-1,-1], "scale": 0.007843137255\}\}`
|
12772
|
+
#
|
12773
|
+
# * `"CompilerOptions": \{"class_labels":
|
12774
|
+
# "imagenet_labels_1000.txt"\}`
|
12775
|
+
#
|
12776
|
+
# * Image type input without input name (PyTorch):
|
12777
|
+
#
|
12778
|
+
# * `"DataInputConfig": [\{"shape": [[1,3,224,224], [1,3,160,160]],
|
12779
|
+
# "default_shape": [1,3,224,224], "type": "Image", "bias":
|
12780
|
+
# [-1,-1,-1], "scale": 0.007843137255\}]`
|
12781
|
+
#
|
12782
|
+
# * `"CompilerOptions": \{"class_labels":
|
12783
|
+
# "imagenet_labels_1000.txt"\}`
|
12714
12784
|
# @return [String]
|
12715
12785
|
#
|
12716
12786
|
# @!attribute [rw] framework
|
@@ -17704,7 +17774,7 @@ module Aws::SageMaker
|
|
17704
17774
|
#
|
17705
17775
|
# {
|
17706
17776
|
# s3_output_location: "S3Uri", # required
|
17707
|
-
# target_device: "lambda", # accepts lambda, ml_m4, ml_m5, ml_c4, ml_c5, ml_p2, ml_p3, ml_g4dn, ml_inf1, jetson_tx1, jetson_tx2, jetson_nano, jetson_xavier, rasp3b, imx8qm, deeplens, rk3399, rk3288, aisage, sbe_c, qcs605, qcs603, sitara_am57x, amba_cv22, x86_win32, x86_win64
|
17777
|
+
# target_device: "lambda", # accepts lambda, ml_m4, ml_m5, ml_c4, ml_c5, ml_p2, ml_p3, ml_g4dn, ml_inf1, jetson_tx1, jetson_tx2, jetson_nano, jetson_xavier, rasp3b, imx8qm, deeplens, rk3399, rk3288, aisage, sbe_c, qcs605, qcs603, sitara_am57x, amba_cv22, x86_win32, x86_win64, coreml
|
17708
17778
|
# target_platform: {
|
17709
17779
|
# os: "ANDROID", # required, accepts ANDROID, LINUX
|
17710
17780
|
# arch: "X86_64", # required, accepts X86_64, X86, ARM64, ARM_EABI, ARM_EABIHF
|
@@ -17778,7 +17848,7 @@ module Aws::SageMaker
|
|
17778
17848
|
# @!attribute [rw] compiler_options
|
17779
17849
|
# Specifies additional parameters for compiler options in JSON format.
|
17780
17850
|
# The compiler options are `TargetPlatform` specific. It is required
|
17781
|
-
# for NVIDIA accelerators and highly recommended for CPU
|
17851
|
+
# for NVIDIA accelerators and highly recommended for CPU compilations.
|
17782
17852
|
# For any other cases, it is optional to specify `CompilerOptions.`
|
17783
17853
|
#
|
17784
17854
|
# * `CPU`\: Compilation for CPU supports the following compiler
|
@@ -17820,6 +17890,16 @@ module Aws::SageMaker
|
|
17820
17890
|
#
|
17821
17891
|
# * `mattr`\: Add `\{'mattr': ['+neon']\}` to compiler options if
|
17822
17892
|
# compiling for ARM 32-bit platform with NEON support.
|
17893
|
+
#
|
17894
|
+
# * `CoreML`\: Compilation for the CoreML OutputConfig$TargetDevice
|
17895
|
+
# supports the following compiler options:
|
17896
|
+
#
|
17897
|
+
# * `class_labels`\: Specifies the classification labels file name
|
17898
|
+
# inside input tar.gz file. For example, `\{"class_labels":
|
17899
|
+
# "imagenet_labels_1000.txt"\}`. Labels inside the txt file should
|
17900
|
+
# be separated by newlines.
|
17901
|
+
#
|
17902
|
+
# ^
|
17823
17903
|
# @return [String]
|
17824
17904
|
#
|
17825
17905
|
# @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OutputConfig AWS API Documentation
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: aws-sdk-sagemaker
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.70.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Amazon Web Services
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-10-
|
11
|
+
date: 2020-10-08 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-core
|