aws-sdk-machinelearning 1.0.0.rc1 → 1.0.0.rc2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 9ba71ed1db314c4ecc1e9971bc9ad657ea98a71b
4
- data.tar.gz: 27b6212254cac811e498620a1602371f12a45755
3
+ metadata.gz: 3f4ee000c6876c15fcd64a7c6cb65d222a855fa3
4
+ data.tar.gz: 84f323c08a8b239c4172e2cb3e9399a8ab53b81c
5
5
  SHA512:
6
- metadata.gz: b47aeee82d78fd427d620fdb2d3ee27623893a4001512ef555f5e9825ee30bc0558ae986014b4593ba4a0e11f0ee79f4827c32d54e147166f836a3da03801bbc
7
- data.tar.gz: 0bb0dae5ea3dbe0c7ab243b1c8790010ff3292c2f6446981b37bd8483e4c53a4b32ea15f7aaf6df677bcc4dbd8de5fc3571e4293f1e7e82e317cc8c1c1fba1dc
6
+ metadata.gz: 339b0d08d186dd4516067918daaa7cccbc85f080fc16d05fde4d177e41a40d11d7284f95c7e2f02374e51067d6f4c4302cb3f5047bc37a8c2c04c387fc6f2ae6
7
+ data.tar.gz: 1496358046d7bb00473dec77ee0da31bd82635ff35158031e6552a6cf81f573043a478f81192bf11e22aec2bee0831aaa33dc3be3c05f6db90889e61990e02d9
@@ -1,6 +1,6 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
@@ -43,6 +43,6 @@ require_relative 'aws-sdk-machinelearning/customizations'
43
43
  # @service
44
44
  module Aws::MachineLearning
45
45
 
46
- GEM_VERSION = '1.0.0.rc1'
46
+ GEM_VERSION = '1.0.0.rc2'
47
47
 
48
48
  end
@@ -1,6 +1,6 @@
1
1
  # WARNING ABOUT GENERATED CODE
2
2
  #
3
- # This file is generated. See the contributing for info on making contributions:
3
+ # This file is generated. See the contributing guide for more information:
4
4
  # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
5
  #
6
6
  # WARNING ABOUT GENERATED CODE
@@ -18,2020 +18,2253 @@ require 'aws-sdk-core/plugins/regional_endpoint.rb'
18
18
  require 'aws-sdk-core/plugins/response_paging.rb'
19
19
  require 'aws-sdk-core/plugins/stub_responses.rb'
20
20
  require 'aws-sdk-core/plugins/idempotency_token.rb'
21
+ require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
21
22
  require 'aws-sdk-core/plugins/signature_v4.rb'
22
23
  require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
23
24
  require 'aws-sdk-machinelearning/plugins/predict_endpoint.rb'
24
25
 
25
26
  Aws::Plugins::GlobalConfiguration.add_identifier(:machinelearning)
26
27
 
27
- module Aws
28
- module MachineLearning
29
- class Client < Seahorse::Client::Base
28
+ module Aws::MachineLearning
29
+ class Client < Seahorse::Client::Base
30
30
 
31
- include Aws::ClientStubs
31
+ include Aws::ClientStubs
32
32
 
33
- @identifier = :machinelearning
33
+ @identifier = :machinelearning
34
34
 
35
- set_api(ClientApi::API)
35
+ set_api(ClientApi::API)
36
36
 
37
- add_plugin(Seahorse::Client::Plugins::ContentLength)
38
- add_plugin(Aws::Plugins::CredentialsConfiguration)
39
- add_plugin(Aws::Plugins::Logging)
40
- add_plugin(Aws::Plugins::ParamConverter)
41
- add_plugin(Aws::Plugins::ParamValidator)
42
- add_plugin(Aws::Plugins::UserAgent)
43
- add_plugin(Aws::Plugins::HelpfulSocketErrors)
44
- add_plugin(Aws::Plugins::RetryErrors)
45
- add_plugin(Aws::Plugins::GlobalConfiguration)
46
- add_plugin(Aws::Plugins::RegionalEndpoint)
47
- add_plugin(Aws::Plugins::ResponsePaging)
48
- add_plugin(Aws::Plugins::StubResponses)
49
- add_plugin(Aws::Plugins::IdempotencyToken)
50
- add_plugin(Aws::Plugins::SignatureV4)
51
- add_plugin(Aws::Plugins::Protocols::JsonRpc)
52
- add_plugin(Aws::MachineLearning::Plugins::PredictEndpoint)
37
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
38
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
39
+ add_plugin(Aws::Plugins::Logging)
40
+ add_plugin(Aws::Plugins::ParamConverter)
41
+ add_plugin(Aws::Plugins::ParamValidator)
42
+ add_plugin(Aws::Plugins::UserAgent)
43
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
44
+ add_plugin(Aws::Plugins::RetryErrors)
45
+ add_plugin(Aws::Plugins::GlobalConfiguration)
46
+ add_plugin(Aws::Plugins::RegionalEndpoint)
47
+ add_plugin(Aws::Plugins::ResponsePaging)
48
+ add_plugin(Aws::Plugins::StubResponses)
49
+ add_plugin(Aws::Plugins::IdempotencyToken)
50
+ add_plugin(Aws::Plugins::JsonvalueConverter)
51
+ add_plugin(Aws::Plugins::SignatureV4)
52
+ add_plugin(Aws::Plugins::Protocols::JsonRpc)
53
+ add_plugin(Aws::MachineLearning::Plugins::PredictEndpoint)
53
54
 
54
- # @option options [required, Aws::CredentialProvider] :credentials
55
- # Your AWS credentials. This can be an instance of any one of the
56
- # following classes:
57
- #
58
- # * `Aws::Credentials` - Used for configuring static, non-refreshing
59
- # credentials.
60
- #
61
- # * `Aws::InstanceProfileCredentials` - Used for loading credentials
62
- # from an EC2 IMDS on an EC2 instance.
63
- #
64
- # * `Aws::SharedCredentials` - Used for loading credentials from a
65
- # shared file, such as `~/.aws/config`.
66
- #
67
- # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
68
- #
69
- # When `:credentials` are not configured directly, the following
70
- # locations will be searched for credentials:
71
- #
72
- # * `Aws.config[:credentials]`
73
- # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
74
- # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
75
- # * `~/.aws/credentials`
76
- # * `~/.aws/config`
77
- # * EC2 IMDS instance profile - When used by default, the timeouts are
78
- # very aggressive. Construct and pass an instance of
79
- # `Aws::InstanceProfileCredentails` to enable retries and extended
80
- # timeouts.
81
- # @option options [required, String] :region
82
- # The AWS region to connect to. The configured `:region` is
83
- # used to determine the service `:endpoint`. When not passed,
84
- # a default `:region` is search for in the following locations:
85
- #
86
- # * `Aws.config[:region]`
87
- # * `ENV['AWS_REGION']`
88
- # * `ENV['AMAZON_REGION']`
89
- # * `ENV['AWS_DEFAULT_REGION']`
90
- # * `~/.aws/credentials`
91
- # * `~/.aws/config`
92
- # @option options [String] :access_key_id
93
- # @option options [Boolean] :convert_params (true)
94
- # When `true`, an attempt is made to coerce request parameters into
95
- # the required types.
96
- # @option options [String] :endpoint
97
- # The client endpoint is normally constructed from the `:region`
98
- # option. You should only configure an `:endpoint` when connecting
99
- # to test endpoints. This should be avalid HTTP(S) URI.
100
- # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
101
- # The log formatter.
102
- # @option options [Symbol] :log_level (:info)
103
- # The log level to send messages to the `:logger` at.
104
- # @option options [Logger] :logger
105
- # The Logger instance to send log messages to. If this option
106
- # is not set, logging will be disabled.
107
- # @option options [String] :profile ("default")
108
- # Used when loading credentials from the shared credentials file
109
- # at HOME/.aws/credentials. When not specified, 'default' is used.
110
- # @option options [Integer] :retry_limit (3)
111
- # The maximum number of times to retry failed requests. Only
112
- # ~ 500 level server errors and certain ~ 400 level client errors
113
- # are retried. Generally, these are throttling errors, data
114
- # checksum errors, networking errors, timeout errors and auth
115
- # errors from expired credentials.
116
- # @option options [String] :secret_access_key
117
- # @option options [String] :session_token
118
- # @option options [Boolean] :simple_json (false)
119
- # Disables request parameter conversion, validation, and formatting.
120
- # Also disable response data type conversions. This option is useful
121
- # when you want to ensure the highest level of performance by
122
- # avoiding overhead of walking request parameters and response data
123
- # structures.
124
- #
125
- # When `:simple_json` is enabled, the request parameters hash must
126
- # be formatted exactly as the DynamoDB API expects.
127
- # @option options [Boolean] :stub_responses (false)
128
- # Causes the client to return stubbed responses. By default
129
- # fake responses are generated and returned. You can specify
130
- # the response data to return or errors to raise by calling
131
- # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
132
- #
133
- # ** Please note ** When response stubbing is enabled, no HTTP
134
- # requests are made, and retries are disabled.
135
- # @option options [Boolean] :validate_params (true)
136
- # When `true`, request parameters are validated before
137
- # sending the request.
138
- def initialize(*args)
139
- super
140
- end
55
+ # @option options [required, Aws::CredentialProvider] :credentials
56
+ # Your AWS credentials. This can be an instance of any one of the
57
+ # following classes:
58
+ #
59
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
60
+ # credentials.
61
+ #
62
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
63
+ # from an EC2 IMDS on an EC2 instance.
64
+ #
65
+ # * `Aws::SharedCredentials` - Used for loading credentials from a
66
+ # shared file, such as `~/.aws/config`.
67
+ #
68
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
69
+ #
70
+ # When `:credentials` are not configured directly, the following
71
+ # locations will be searched for credentials:
72
+ #
73
+ # * `Aws.config[:credentials]`
74
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
75
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
76
+ # * `~/.aws/credentials`
77
+ # * `~/.aws/config`
78
+ # * EC2 IMDS instance profile - When used by default, the timeouts are
79
+ # very aggressive. Construct and pass an instance of
80
+ # `Aws::InstanceProfileCredentails` to enable retries and extended
81
+ # timeouts.
82
+ #
83
+ # @option options [required, String] :region
84
+ # The AWS region to connect to. The configured `:region` is
85
+ # used to determine the service `:endpoint`. When not passed,
86
+ # a default `:region` is search for in the following locations:
87
+ #
88
+ # * `Aws.config[:region]`
89
+ # * `ENV['AWS_REGION']`
90
+ # * `ENV['AMAZON_REGION']`
91
+ # * `ENV['AWS_DEFAULT_REGION']`
92
+ # * `~/.aws/credentials`
93
+ # * `~/.aws/config`
94
+ #
95
+ # @option options [String] :access_key_id
96
+ #
97
+ # @option options [Boolean] :convert_params (true)
98
+ # When `true`, an attempt is made to coerce request parameters into
99
+ # the required types.
100
+ #
101
+ # @option options [String] :endpoint
102
+ # The client endpoint is normally constructed from the `:region`
103
+ # option. You should only configure an `:endpoint` when connecting
104
+ # to test endpoints. This should be avalid HTTP(S) URI.
105
+ #
106
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
107
+ # The log formatter.
108
+ #
109
+ # @option options [Symbol] :log_level (:info)
110
+ # The log level to send messages to the `:logger` at.
111
+ #
112
+ # @option options [Logger] :logger
113
+ # The Logger instance to send log messages to. If this option
114
+ # is not set, logging will be disabled.
115
+ #
116
+ # @option options [String] :profile ("default")
117
+ # Used when loading credentials from the shared credentials file
118
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
119
+ #
120
+ # @option options [Integer] :retry_limit (3)
121
+ # The maximum number of times to retry failed requests. Only
122
+ # ~ 500 level server errors and certain ~ 400 level client errors
123
+ # are retried. Generally, these are throttling errors, data
124
+ # checksum errors, networking errors, timeout errors and auth
125
+ # errors from expired credentials.
126
+ #
127
+ # @option options [String] :secret_access_key
128
+ #
129
+ # @option options [String] :session_token
130
+ #
131
+ # @option options [Boolean] :simple_json (false)
132
+ # Disables request parameter conversion, validation, and formatting.
133
+ # Also disable response data type conversions. This option is useful
134
+ # when you want to ensure the highest level of performance by
135
+ # avoiding overhead of walking request parameters and response data
136
+ # structures.
137
+ #
138
+ # When `:simple_json` is enabled, the request parameters hash must
139
+ # be formatted exactly as the DynamoDB API expects.
140
+ #
141
+ # @option options [Boolean] :stub_responses (false)
142
+ # Causes the client to return stubbed responses. By default
143
+ # fake responses are generated and returned. You can specify
144
+ # the response data to return or errors to raise by calling
145
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
146
+ #
147
+ # ** Please note ** When response stubbing is enabled, no HTTP
148
+ # requests are made, and retries are disabled.
149
+ #
150
+ # @option options [Boolean] :validate_params (true)
151
+ # When `true`, request parameters are validated before
152
+ # sending the request.
153
+ #
154
+ def initialize(*args)
155
+ super
156
+ end
141
157
 
142
- # @!group API Operations
158
+ # @!group API Operations
143
159
 
144
- # Adds one or more tags to an object, up to a limit of 10. Each tag
145
- # consists of a key and an optional value. If you add a tag using a key
146
- # that is already associated with the ML object, `AddTags` updates the
147
- # tag's value.
148
- # @option params [required, Array<Types::Tag>] :tags
149
- # The key-value pairs to use to create tags. If you specify a key
150
- # without specifying a value, Amazon ML creates a tag with the specified
151
- # key and a value of null.
152
- # @option params [required, String] :resource_id
153
- # The ID of the ML object to tag. For example, `exampleModelId`.
154
- # @option params [required, String] :resource_type
155
- # The type of the ML object to tag.
156
- # @return [Types::AddTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
157
- #
158
- # * {Types::AddTagsOutput#resource_id #ResourceId} => String
159
- # * {Types::AddTagsOutput#resource_type #ResourceType} => String
160
- #
161
- # @example Request syntax with placeholder values
162
- # resp = client.add_tags({
163
- # tags: [ # required
164
- # {
165
- # key: "TagKey",
166
- # value: "TagValue",
167
- # },
168
- # ],
169
- # resource_id: "EntityId", # required
170
- # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
171
- # })
172
- #
173
- # @example Response structure
174
- # resp.resource_id #=> String
175
- # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
176
- # @overload add_tags(params = {})
177
- # @param [Hash] params ({})
178
- def add_tags(params = {}, options = {})
179
- req = build_request(:add_tags, params)
180
- req.send_request(options)
181
- end
182
-
183
- # Generates predictions for a group of observations. The observations to
184
- # process exist in one or more data files referenced by a `DataSource`.
185
- # This operation creates a new `BatchPrediction`, and uses an `MLModel`
186
- # and the data files referenced by the `DataSource` as information
187
- # sources.
188
- #
189
- # `CreateBatchPrediction` is an asynchronous operation. In response to
190
- # `CreateBatchPrediction`, Amazon Machine Learning (Amazon ML)
191
- # immediately returns and sets the `BatchPrediction` status to
192
- # `PENDING`. After the `BatchPrediction` completes, Amazon ML sets the
193
- # status to `COMPLETED`.
194
- #
195
- # You can poll for status updates by using the GetBatchPrediction
196
- # operation and checking the `Status` parameter of the result. After the
197
- # `COMPLETED` status appears, the results are available in the location
198
- # specified by the `OutputUri` parameter.
199
- # @option params [required, String] :batch_prediction_id
200
- # A user-supplied ID that uniquely identifies the `BatchPrediction`.
201
- # @option params [String] :batch_prediction_name
202
- # A user-supplied name or description of the `BatchPrediction`.
203
- # `BatchPredictionName` can only use the UTF-8 character set.
204
- # @option params [required, String] :ml_model_id
205
- # The ID of the `MLModel` that will generate predictions for the group
206
- # of observations.
207
- # @option params [required, String] :batch_prediction_data_source_id
208
- # The ID of the `DataSource` that points to the group of observations to
209
- # predict.
210
- # @option params [required, String] :output_uri
211
- # The location of an Amazon Simple Storage Service (Amazon S3) bucket or
212
- # directory to store the batch prediction results. The following
213
- # substrings are not allowed in the `s3 key` portion of the `outputURI`
214
- # field: ':', '//', '/./', '/../'.
215
- #
216
- # Amazon ML needs permissions to store and retrieve the logs on your
217
- # behalf. For information about how to set permissions, see the [Amazon
218
- # Machine Learning Developer Guide][1].
219
- #
220
- #
221
- #
222
- # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
223
- # @return [Types::CreateBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
224
- #
225
- # * {Types::CreateBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
226
- #
227
- # @example Request syntax with placeholder values
228
- # resp = client.create_batch_prediction({
229
- # batch_prediction_id: "EntityId", # required
230
- # batch_prediction_name: "EntityName",
231
- # ml_model_id: "EntityId", # required
232
- # batch_prediction_data_source_id: "EntityId", # required
233
- # output_uri: "S3Url", # required
234
- # })
235
- #
236
- # @example Response structure
237
- # resp.batch_prediction_id #=> String
238
- # @overload create_batch_prediction(params = {})
239
- # @param [Hash] params ({})
240
- def create_batch_prediction(params = {}, options = {})
241
- req = build_request(:create_batch_prediction, params)
242
- req.send_request(options)
243
- end
160
+ # Adds one or more tags to an object, up to a limit of 10. Each tag
161
+ # consists of a key and an optional value. If you add a tag using a key
162
+ # that is already associated with the ML object, `AddTags` updates the
163
+ # tag's value.
164
+ #
165
+ # @option params [required, Array<Types::Tag>] :tags
166
+ # The key-value pairs to use to create tags. If you specify a key
167
+ # without specifying a value, Amazon ML creates a tag with the specified
168
+ # key and a value of null.
169
+ #
170
+ # @option params [required, String] :resource_id
171
+ # The ID of the ML object to tag. For example, `exampleModelId`.
172
+ #
173
+ # @option params [required, String] :resource_type
174
+ # The type of the ML object to tag.
175
+ #
176
+ # @return [Types::AddTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
177
+ #
178
+ # * {Types::AddTagsOutput#resource_id #resource_id} => String
179
+ # * {Types::AddTagsOutput#resource_type #resource_type} => String
180
+ #
181
+ # @example Request syntax with placeholder values
182
+ #
183
+ # resp = client.add_tags({
184
+ # tags: [ # required
185
+ # {
186
+ # key: "TagKey",
187
+ # value: "TagValue",
188
+ # },
189
+ # ],
190
+ # resource_id: "EntityId", # required
191
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
192
+ # })
193
+ #
194
+ # @example Response structure
195
+ #
196
+ # resp.resource_id #=> String
197
+ # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
198
+ #
199
+ # @overload add_tags(params = {})
200
+ # @param [Hash] params ({})
201
+ def add_tags(params = {}, options = {})
202
+ req = build_request(:add_tags, params)
203
+ req.send_request(options)
204
+ end
244
205
 
245
- # Creates a `DataSource` object from an [ Amazon Relational Database
246
- # Service][1] (Amazon RDS). A `DataSource` references data that can be
247
- # used to perform `CreateMLModel`, `CreateEvaluation`, or
248
- # `CreateBatchPrediction` operations.
249
- #
250
- # `CreateDataSourceFromRDS` is an asynchronous operation. In response to
251
- # `CreateDataSourceFromRDS`, Amazon Machine Learning (Amazon ML)
252
- # immediately returns and sets the `DataSource` status to `PENDING`.
253
- # After the `DataSource` is created and ready for use, Amazon ML sets
254
- # the `Status` parameter to `COMPLETED`. `DataSource` in the `COMPLETED`
255
- # or `PENDING` state can be used only to perform `>CreateMLModel`&gt;,
256
- # `CreateEvaluation`, or `CreateBatchPrediction` operations.
257
- #
258
- # If Amazon ML cannot accept the input source, it sets the `Status`
259
- # parameter to `FAILED` and includes an error message in the `Message`
260
- # attribute of the `GetDataSource` operation response.
261
- #
262
- #
263
- #
264
- # [1]: http://aws.amazon.com/rds/
265
- # @option params [required, String] :data_source_id
266
- # A user-supplied ID that uniquely identifies the `DataSource`.
267
- # Typically, an Amazon Resource Number (ARN) becomes the ID for a
268
- # `DataSource`.
269
- # @option params [String] :data_source_name
270
- # A user-supplied name or description of the `DataSource`.
271
- # @option params [required, Types::RDSDataSpec] :rds_data
272
- # The data specification of an Amazon RDS `DataSource`\:
273
- #
274
- # * DatabaseInformation - * `DatabaseName` - The name of the Amazon RDS
275
- # database.
276
- # * `InstanceIdentifier ` - A unique identifier for the Amazon RDS
277
- # database instance.
278
- #
279
- # * DatabaseCredentials - AWS Identity and Access Management (IAM)
280
- # credentials that are used to connect to the Amazon RDS database.
281
- #
282
- # * ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by
283
- # an EC2 instance to carry out the copy task from Amazon RDS to Amazon
284
- # Simple Storage Service (Amazon S3). For more information, see [Role
285
- # templates][1] for data pipelines.
286
- #
287
- # * ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS
288
- # Data Pipeline service to monitor the progress of the copy task from
289
- # Amazon RDS to Amazon S3. For more information, see [Role
290
- # templates][1] for data pipelines.
291
- #
292
- # * SecurityInfo - The security information to use to access an RDS DB
293
- # instance. You need to set up appropriate ingress rules for the
294
- # security entity IDs provided to allow access to the Amazon RDS
295
- # instance. Specify a \[`SubnetId`, `SecurityGroupIds`\] pair for a
296
- # VPC-based RDS DB instance.
297
- #
298
- # * SelectSqlQuery - A query that is used to retrieve the observation
299
- # data for the `Datasource`.
300
- #
301
- # * S3StagingLocation - The Amazon S3 location for staging Amazon RDS
302
- # data. The data retrieved from Amazon RDS using `SelectSqlQuery` is
303
- # stored in this location.
304
- #
305
- # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
306
- #
307
- # * DataSchema - A JSON string representing the schema. This is not
308
- # required if `DataSchemaUri` is specified.
309
- #
310
- # * DataRearrangement - A JSON string that represents the splitting and
311
- # rearrangement requirements for the `Datasource`.
312
- #
313
- #
314
- # Sample - `
315
- # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
316
- #
317
- #
318
- #
319
- # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
320
- # @option params [required, String] :role_arn
321
- # The role that Amazon ML assumes on behalf of the user to create and
322
- # activate a data pipeline in the user's account and copy data using
323
- # the `SelectSqlQuery` query from Amazon RDS to Amazon S3.
324
- # @option params [Boolean] :compute_statistics
325
- # The compute statistics for a `DataSource`. The statistics are
326
- # generated from the observation data referenced by a `DataSource`.
327
- # Amazon ML uses the statistics internally during `MLModel` training.
328
- # This parameter must be set to `true` if the ``DataSource`` needs to be
329
- # used for `MLModel` training.
330
- # @return [Types::CreateDataSourceFromRDSOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
331
- #
332
- # * {Types::CreateDataSourceFromRDSOutput#data_source_id #DataSourceId} => String
333
- #
334
- # @example Request syntax with placeholder values
335
- # resp = client.create_data_source_from_rds({
336
- # data_source_id: "EntityId", # required
337
- # data_source_name: "EntityName",
338
- # rds_data: { # required
339
- # database_information: { # required
340
- # instance_identifier: "RDSInstanceIdentifier", # required
341
- # database_name: "RDSDatabaseName", # required
342
- # },
343
- # select_sql_query: "RDSSelectSqlQuery", # required
344
- # database_credentials: { # required
345
- # username: "RDSDatabaseUsername", # required
346
- # password: "RDSDatabasePassword", # required
347
- # },
348
- # s3_staging_location: "S3Url", # required
349
- # data_rearrangement: "DataRearrangement",
350
- # data_schema: "DataSchema",
351
- # data_schema_uri: "S3Url",
352
- # resource_role: "EDPResourceRole", # required
353
- # service_role: "EDPServiceRole", # required
354
- # subnet_id: "EDPSubnetId", # required
355
- # security_group_ids: ["EDPSecurityGroupId"], # required
356
- # },
357
- # role_arn: "RoleARN", # required
358
- # compute_statistics: false,
359
- # })
360
- #
361
- # @example Response structure
362
- # resp.data_source_id #=> String
363
- # @overload create_data_source_from_rds(params = {})
364
- # @param [Hash] params ({})
365
- def create_data_source_from_rds(params = {}, options = {})
366
- req = build_request(:create_data_source_from_rds, params)
367
- req.send_request(options)
368
- end
206
+ # Generates predictions for a group of observations. The observations to
207
+ # process exist in one or more data files referenced by a `DataSource`.
208
+ # This operation creates a new `BatchPrediction`, and uses an `MLModel`
209
+ # and the data files referenced by the `DataSource` as information
210
+ # sources.
211
+ #
212
+ # `CreateBatchPrediction` is an asynchronous operation. In response to
213
+ # `CreateBatchPrediction`, Amazon Machine Learning (Amazon ML)
214
+ # immediately returns and sets the `BatchPrediction` status to
215
+ # `PENDING`. After the `BatchPrediction` completes, Amazon ML sets the
216
+ # status to `COMPLETED`.
217
+ #
218
+ # You can poll for status updates by using the GetBatchPrediction
219
+ # operation and checking the `Status` parameter of the result. After the
220
+ # `COMPLETED` status appears, the results are available in the location
221
+ # specified by the `OutputUri` parameter.
222
+ #
223
+ # @option params [required, String] :batch_prediction_id
224
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
225
+ #
226
+ # @option params [String] :batch_prediction_name
227
+ # A user-supplied name or description of the `BatchPrediction`.
228
+ # `BatchPredictionName` can only use the UTF-8 character set.
229
+ #
230
+ # @option params [required, String] :ml_model_id
231
+ # The ID of the `MLModel` that will generate predictions for the group
232
+ # of observations.
233
+ #
234
+ # @option params [required, String] :batch_prediction_data_source_id
235
+ # The ID of the `DataSource` that points to the group of observations to
236
+ # predict.
237
+ #
238
+ # @option params [required, String] :output_uri
239
+ # The location of an Amazon Simple Storage Service (Amazon S3) bucket or
240
+ # directory to store the batch prediction results. The following
241
+ # substrings are not allowed in the `s3 key` portion of the `outputURI`
242
+ # field: ':', '//', '/./', '/../'.
243
+ #
244
+ # Amazon ML needs permissions to store and retrieve the logs on your
245
+ # behalf. For information about how to set permissions, see the [Amazon
246
+ # Machine Learning Developer Guide][1].
247
+ #
248
+ #
249
+ #
250
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
251
+ #
252
+ # @return [Types::CreateBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
253
+ #
254
+ # * {Types::CreateBatchPredictionOutput#batch_prediction_id #batch_prediction_id} => String
255
+ #
256
+ # @example Request syntax with placeholder values
257
+ #
258
+ # resp = client.create_batch_prediction({
259
+ # batch_prediction_id: "EntityId", # required
260
+ # batch_prediction_name: "EntityName",
261
+ # ml_model_id: "EntityId", # required
262
+ # batch_prediction_data_source_id: "EntityId", # required
263
+ # output_uri: "S3Url", # required
264
+ # })
265
+ #
266
+ # @example Response structure
267
+ #
268
+ # resp.batch_prediction_id #=> String
269
+ #
270
+ # @overload create_batch_prediction(params = {})
271
+ # @param [Hash] params ({})
272
+ def create_batch_prediction(params = {}, options = {})
273
+ req = build_request(:create_batch_prediction, params)
274
+ req.send_request(options)
275
+ end
369
276
 
370
- # Creates a `DataSource` from a database hosted on an Amazon Redshift
371
- # cluster. A `DataSource` references data that can be used to perform
372
- # either `CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction`
373
- # operations.
374
- #
375
- # `CreateDataSourceFromRedshift` is an asynchronous operation. In
376
- # response to `CreateDataSourceFromRedshift`, Amazon Machine Learning
377
- # (Amazon ML) immediately returns and sets the `DataSource` status to
378
- # `PENDING`. After the `DataSource` is created and ready for use, Amazon
379
- # ML sets the `Status` parameter to `COMPLETED`. `DataSource` in
380
- # `COMPLETED` or `PENDING` states can be used to perform only
381
- # `CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction`
382
- # operations.
383
- #
384
- # If Amazon ML can't accept the input source, it sets the `Status`
385
- # parameter to `FAILED` and includes an error message in the `Message`
386
- # attribute of the `GetDataSource` operation response.
387
- #
388
- # The observations should be contained in the database hosted on an
389
- # Amazon Redshift cluster and should be specified by a `SelectSqlQuery`
390
- # query. Amazon ML executes an `Unload` command in Amazon Redshift to
391
- # transfer the result set of the `SelectSqlQuery` query to
392
- # `S3StagingLocation`.
393
- #
394
- # After the `DataSource` has been created, it's ready for use in
395
- # evaluations and batch predictions. If you plan to use the `DataSource`
396
- # to train an `MLModel`, the `DataSource` also requires a recipe. A
397
- # recipe describes how each input variable will be used in training an
398
- # `MLModel`. Will the variable be included or excluded from training?
399
- # Will the variable be manipulated; for example, will it be combined
400
- # with another variable or will it be split apart into word
401
- # combinations? The recipe provides answers to these questions.
402
- #
403
- # <?oxy\_insert\_start author="laurama" timestamp="20160406T153842-0700">You can't change an existing datasource, but you can copy and modify
404
- # the settings from an existing Amazon Redshift datasource to create a
405
- # new datasource. To do so, call `GetDataSource` for an existing
406
- # datasource and copy the values to a `CreateDataSource` call. Change
407
- # the settings that you want to change and make sure that all required
408
- # fields have the appropriate values.
409
- #
410
- # <?oxy\_insert\_end>
411
- # @option params [required, String] :data_source_id
412
- # A user-supplied ID that uniquely identifies the `DataSource`.
413
- # @option params [String] :data_source_name
414
- # A user-supplied name or description of the `DataSource`.
415
- # @option params [required, Types::RedshiftDataSpec] :data_spec
416
- # The data specification of an Amazon Redshift `DataSource`\:
417
- #
418
- # * DatabaseInformation - * `DatabaseName` - The name of the Amazon
419
- # Redshift database.
420
- # * ` ClusterIdentifier` - The unique ID for the Amazon Redshift
421
- # cluster.
422
- #
423
- # * DatabaseCredentials - The AWS Identity and Access Management (IAM)
424
- # credentials that are used to connect to the Amazon Redshift
425
- # database.
426
- #
427
- # * SelectSqlQuery - The query that is used to retrieve the observation
428
- # data for the `Datasource`.
429
- #
430
- # * S3StagingLocation - The Amazon Simple Storage Service (Amazon S3)
431
- # location for staging Amazon Redshift data. The data retrieved from
432
- # Amazon Redshift using the `SelectSqlQuery` query is stored in this
433
- # location.
434
- #
435
- # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
436
- #
437
- # * DataSchema - A JSON string representing the schema. This is not
438
- # required if `DataSchemaUri` is specified.
439
- #
440
- # * DataRearrangement - A JSON string that represents the splitting and
441
- # rearrangement requirements for the `DataSource`.
442
- #
443
- # Sample - `
444
- # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
445
- # @option params [required, String] :role_arn
446
- # A fully specified role Amazon Resource Name (ARN). Amazon ML assumes
447
- # the role on behalf of the user to create the following:
448
- #
449
- # * A security group to allow Amazon ML to execute the `SelectSqlQuery`
450
- # query on an Amazon Redshift cluster
451
- #
452
- # * An Amazon S3 bucket policy to grant Amazon ML read/write permissions
453
- # on the `S3StagingLocation`
454
- # @option params [Boolean] :compute_statistics
455
- # The compute statistics for a `DataSource`. The statistics are
456
- # generated from the observation data referenced by a `DataSource`.
457
- # Amazon ML uses the statistics internally during `MLModel` training.
458
- # This parameter must be set to `true` if the `DataSource` needs to be
459
- # used for `MLModel` training.
460
- # @return [Types::CreateDataSourceFromRedshiftOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
461
- #
462
- # * {Types::CreateDataSourceFromRedshiftOutput#data_source_id #DataSourceId} => String
463
- #
464
- # @example Request syntax with placeholder values
465
- # resp = client.create_data_source_from_redshift({
466
- # data_source_id: "EntityId", # required
467
- # data_source_name: "EntityName",
468
- # data_spec: { # required
469
- # database_information: { # required
470
- # database_name: "RedshiftDatabaseName", # required
471
- # cluster_identifier: "RedshiftClusterIdentifier", # required
472
- # },
473
- # select_sql_query: "RedshiftSelectSqlQuery", # required
474
- # database_credentials: { # required
475
- # username: "RedshiftDatabaseUsername", # required
476
- # password: "RedshiftDatabasePassword", # required
477
- # },
478
- # s3_staging_location: "S3Url", # required
479
- # data_rearrangement: "DataRearrangement",
480
- # data_schema: "DataSchema",
481
- # data_schema_uri: "S3Url",
482
- # },
483
- # role_arn: "RoleARN", # required
484
- # compute_statistics: false,
485
- # })
486
- #
487
- # @example Response structure
488
- # resp.data_source_id #=> String
489
- # @overload create_data_source_from_redshift(params = {})
490
- # @param [Hash] params ({})
491
- def create_data_source_from_redshift(params = {}, options = {})
492
- req = build_request(:create_data_source_from_redshift, params)
493
- req.send_request(options)
494
- end
277
+ # Creates a `DataSource` object from an [ Amazon Relational Database
278
+ # Service][1] (Amazon RDS). A `DataSource` references data that can be
279
+ # used to perform `CreateMLModel`, `CreateEvaluation`, or
280
+ # `CreateBatchPrediction` operations.
281
+ #
282
+ # `CreateDataSourceFromRDS` is an asynchronous operation. In response to
283
+ # `CreateDataSourceFromRDS`, Amazon Machine Learning (Amazon ML)
284
+ # immediately returns and sets the `DataSource` status to `PENDING`.
285
+ # After the `DataSource` is created and ready for use, Amazon ML sets
286
+ # the `Status` parameter to `COMPLETED`. `DataSource` in the `COMPLETED`
287
+ # or `PENDING` state can be used only to perform `>CreateMLModel`&gt;,
288
+ # `CreateEvaluation`, or `CreateBatchPrediction` operations.
289
+ #
290
+ # If Amazon ML cannot accept the input source, it sets the `Status`
291
+ # parameter to `FAILED` and includes an error message in the `Message`
292
+ # attribute of the `GetDataSource` operation response.
293
+ #
294
+ #
295
+ #
296
+ # [1]: http://aws.amazon.com/rds/
297
+ #
298
+ # @option params [required, String] :data_source_id
299
+ # A user-supplied ID that uniquely identifies the `DataSource`.
300
+ # Typically, an Amazon Resource Number (ARN) becomes the ID for a
301
+ # `DataSource`.
302
+ #
303
+ # @option params [String] :data_source_name
304
+ # A user-supplied name or description of the `DataSource`.
305
+ #
306
+ # @option params [required, Types::RDSDataSpec] :rds_data
307
+ # The data specification of an Amazon RDS `DataSource`\:
308
+ #
309
+ # * DatabaseInformation - * `DatabaseName` - The name of the Amazon RDS
310
+ # database.
311
+ # * `InstanceIdentifier ` - A unique identifier for the Amazon RDS
312
+ # database instance.
313
+ #
314
+ # * DatabaseCredentials - AWS Identity and Access Management (IAM)
315
+ # credentials that are used to connect to the Amazon RDS database.
316
+ #
317
+ # * ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by
318
+ # an EC2 instance to carry out the copy task from Amazon RDS to Amazon
319
+ # Simple Storage Service (Amazon S3). For more information, see [Role
320
+ # templates][1] for data pipelines.
321
+ #
322
+ # * ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS
323
+ # Data Pipeline service to monitor the progress of the copy task from
324
+ # Amazon RDS to Amazon S3. For more information, see [Role
325
+ # templates][1] for data pipelines.
326
+ #
327
+ # * SecurityInfo - The security information to use to access an RDS DB
328
+ # instance. You need to set up appropriate ingress rules for the
329
+ # security entity IDs provided to allow access to the Amazon RDS
330
+ # instance. Specify a \[`SubnetId`, `SecurityGroupIds`\] pair for a
331
+ # VPC-based RDS DB instance.
332
+ #
333
+ # * SelectSqlQuery - A query that is used to retrieve the observation
334
+ # data for the `Datasource`.
335
+ #
336
+ # * S3StagingLocation - The Amazon S3 location for staging Amazon RDS
337
+ # data. The data retrieved from Amazon RDS using `SelectSqlQuery` is
338
+ # stored in this location.
339
+ #
340
+ # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
341
+ #
342
+ # * DataSchema - A JSON string representing the schema. This is not
343
+ # required if `DataSchemaUri` is specified.
344
+ #
345
+ # * DataRearrangement - A JSON string that represents the splitting and
346
+ # rearrangement requirements for the `Datasource`.
347
+ #
348
+ #
349
+ # Sample - `
350
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
351
+ #
352
+ #
353
+ #
354
+ # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
355
+ #
356
+ # @option params [required, String] :role_arn
357
+ # The role that Amazon ML assumes on behalf of the user to create and
358
+ # activate a data pipeline in the user's account and copy data using
359
+ # the `SelectSqlQuery` query from Amazon RDS to Amazon S3.
360
+ #
361
+ # @option params [Boolean] :compute_statistics
362
+ # The compute statistics for a `DataSource`. The statistics are
363
+ # generated from the observation data referenced by a `DataSource`.
364
+ # Amazon ML uses the statistics internally during `MLModel` training.
365
+ # This parameter must be set to `true` if the ``DataSource`` needs to be
366
+ # used for `MLModel` training.
367
+ #
368
+ # @return [Types::CreateDataSourceFromRDSOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
369
+ #
370
+ # * {Types::CreateDataSourceFromRDSOutput#data_source_id #data_source_id} => String
371
+ #
372
+ # @example Request syntax with placeholder values
373
+ #
374
+ # resp = client.create_data_source_from_rds({
375
+ # data_source_id: "EntityId", # required
376
+ # data_source_name: "EntityName",
377
+ # rds_data: { # required
378
+ # database_information: { # required
379
+ # instance_identifier: "RDSInstanceIdentifier", # required
380
+ # database_name: "RDSDatabaseName", # required
381
+ # },
382
+ # select_sql_query: "RDSSelectSqlQuery", # required
383
+ # database_credentials: { # required
384
+ # username: "RDSDatabaseUsername", # required
385
+ # password: "RDSDatabasePassword", # required
386
+ # },
387
+ # s3_staging_location: "S3Url", # required
388
+ # data_rearrangement: "DataRearrangement",
389
+ # data_schema: "DataSchema",
390
+ # data_schema_uri: "S3Url",
391
+ # resource_role: "EDPResourceRole", # required
392
+ # service_role: "EDPServiceRole", # required
393
+ # subnet_id: "EDPSubnetId", # required
394
+ # security_group_ids: ["EDPSecurityGroupId"], # required
395
+ # },
396
+ # role_arn: "RoleARN", # required
397
+ # compute_statistics: false,
398
+ # })
399
+ #
400
+ # @example Response structure
401
+ #
402
+ # resp.data_source_id #=> String
403
+ #
404
+ # @overload create_data_source_from_rds(params = {})
405
+ # @param [Hash] params ({})
406
+ def create_data_source_from_rds(params = {}, options = {})
407
+ req = build_request(:create_data_source_from_rds, params)
408
+ req.send_request(options)
409
+ end
495
410
 
496
- # Creates a `DataSource` object. A `DataSource` references data that can
497
- # be used to perform `CreateMLModel`, `CreateEvaluation`, or
498
- # `CreateBatchPrediction` operations.
499
- #
500
- # `CreateDataSourceFromS3` is an asynchronous operation. In response to
501
- # `CreateDataSourceFromS3`, Amazon Machine Learning (Amazon ML)
502
- # immediately returns and sets the `DataSource` status to `PENDING`.
503
- # After the `DataSource` has been created and is ready for use, Amazon
504
- # ML sets the `Status` parameter to `COMPLETED`. `DataSource` in the
505
- # `COMPLETED` or `PENDING` state can be used to perform only
506
- # `CreateMLModel`, `CreateEvaluation` or `CreateBatchPrediction`
507
- # operations.
508
- #
509
- # If Amazon ML can't accept the input source, it sets the `Status`
510
- # parameter to `FAILED` and includes an error message in the `Message`
511
- # attribute of the `GetDataSource` operation response.
512
- #
513
- # The observation data used in a `DataSource` should be ready to use;
514
- # that is, it should have a consistent structure, and missing data
515
- # values should be kept to a minimum. The observation data must reside
516
- # in one or more .csv files in an Amazon Simple Storage Service (Amazon
517
- # S3) location, along with a schema that describes the data items by
518
- # name and type. The same schema must be used for all of the data files
519
- # referenced by the `DataSource`.
520
- #
521
- # After the `DataSource` has been created, it's ready to use in
522
- # evaluations and batch predictions. If you plan to use the `DataSource`
523
- # to train an `MLModel`, the `DataSource` also needs a recipe. A recipe
524
- # describes how each input variable will be used in training an
525
- # `MLModel`. Will the variable be included or excluded from training?
526
- # Will the variable be manipulated; for example, will it be combined
527
- # with another variable or will it be split apart into word
528
- # combinations? The recipe provides answers to these questions.
529
- # @option params [required, String] :data_source_id
530
- # A user-supplied identifier that uniquely identifies the `DataSource`.
531
- # @option params [String] :data_source_name
532
- # A user-supplied name or description of the `DataSource`.
533
- # @option params [required, Types::S3DataSpec] :data_spec
534
- # The data specification of a `DataSource`\:
535
- #
536
- # * DataLocationS3 - The Amazon S3 location of the observation data.
537
- #
538
- # * DataSchemaLocationS3 - The Amazon S3 location of the `DataSchema`.
539
- #
540
- # * DataSchema - A JSON string representing the schema. This is not
541
- # required if `DataSchemaUri` is specified.
542
- #
543
- # * DataRearrangement - A JSON string that represents the splitting and
544
- # rearrangement requirements for the `Datasource`.
545
- #
546
- # Sample - `
547
- # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
548
- # @option params [Boolean] :compute_statistics
549
- # The compute statistics for a `DataSource`. The statistics are
550
- # generated from the observation data referenced by a `DataSource`.
551
- # Amazon ML uses the statistics internally during `MLModel` training.
552
- # This parameter must be set to `true` if the ``DataSource`` needs to be
553
- # used for `MLModel` training.
554
- # @return [Types::CreateDataSourceFromS3Output] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
555
- #
556
- # * {Types::CreateDataSourceFromS3Output#data_source_id #DataSourceId} => String
557
- #
558
- # @example Request syntax with placeholder values
559
- # resp = client.create_data_source_from_s3({
560
- # data_source_id: "EntityId", # required
561
- # data_source_name: "EntityName",
562
- # data_spec: { # required
563
- # data_location_s3: "S3Url", # required
564
- # data_rearrangement: "DataRearrangement",
565
- # data_schema: "DataSchema",
566
- # data_schema_location_s3: "S3Url",
567
- # },
568
- # compute_statistics: false,
569
- # })
570
- #
571
- # @example Response structure
572
- # resp.data_source_id #=> String
573
- # @overload create_data_source_from_s3(params = {})
574
- # @param [Hash] params ({})
575
- def create_data_source_from_s3(params = {}, options = {})
576
- req = build_request(:create_data_source_from_s3, params)
577
- req.send_request(options)
578
- end
411
+ # Creates a `DataSource` from a database hosted on an Amazon Redshift
412
+ # cluster. A `DataSource` references data that can be used to perform
413
+ # either `CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction`
414
+ # operations.
415
+ #
416
+ # `CreateDataSourceFromRedshift` is an asynchronous operation. In
417
+ # response to `CreateDataSourceFromRedshift`, Amazon Machine Learning
418
+ # (Amazon ML) immediately returns and sets the `DataSource` status to
419
+ # `PENDING`. After the `DataSource` is created and ready for use, Amazon
420
+ # ML sets the `Status` parameter to `COMPLETED`. `DataSource` in
421
+ # `COMPLETED` or `PENDING` states can be used to perform only
422
+ # `CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction`
423
+ # operations.
424
+ #
425
+ # If Amazon ML can't accept the input source, it sets the `Status`
426
+ # parameter to `FAILED` and includes an error message in the `Message`
427
+ # attribute of the `GetDataSource` operation response.
428
+ #
429
+ # The observations should be contained in the database hosted on an
430
+ # Amazon Redshift cluster and should be specified by a `SelectSqlQuery`
431
+ # query. Amazon ML executes an `Unload` command in Amazon Redshift to
432
+ # transfer the result set of the `SelectSqlQuery` query to
433
+ # `S3StagingLocation`.
434
+ #
435
+ # After the `DataSource` has been created, it's ready for use in
436
+ # evaluations and batch predictions. If you plan to use the `DataSource`
437
+ # to train an `MLModel`, the `DataSource` also requires a recipe. A
438
+ # recipe describes how each input variable will be used in training an
439
+ # `MLModel`. Will the variable be included or excluded from training?
440
+ # Will the variable be manipulated; for example, will it be combined
441
+ # with another variable or will it be split apart into word
442
+ # combinations? The recipe provides answers to these questions.
443
+ #
444
+ # <?oxy\_insert\_start author="laurama" timestamp="20160406T153842-0700">You can't change an existing datasource, but you can copy and modify
445
+ # the settings from an existing Amazon Redshift datasource to create a
446
+ # new datasource. To do so, call `GetDataSource` for an existing
447
+ # datasource and copy the values to a `CreateDataSource` call. Change
448
+ # the settings that you want to change and make sure that all required
449
+ # fields have the appropriate values.
450
+ #
451
+ # <?oxy\_insert\_end>
452
+ #
453
+ # @option params [required, String] :data_source_id
454
+ # A user-supplied ID that uniquely identifies the `DataSource`.
455
+ #
456
+ # @option params [String] :data_source_name
457
+ # A user-supplied name or description of the `DataSource`.
458
+ #
459
+ # @option params [required, Types::RedshiftDataSpec] :data_spec
460
+ # The data specification of an Amazon Redshift `DataSource`\:
461
+ #
462
+ # * DatabaseInformation - * `DatabaseName` - The name of the Amazon
463
+ # Redshift database.
464
+ # * ` ClusterIdentifier` - The unique ID for the Amazon Redshift
465
+ # cluster.
466
+ #
467
+ # * DatabaseCredentials - The AWS Identity and Access Management (IAM)
468
+ # credentials that are used to connect to the Amazon Redshift
469
+ # database.
470
+ #
471
+ # * SelectSqlQuery - The query that is used to retrieve the observation
472
+ # data for the `Datasource`.
473
+ #
474
+ # * S3StagingLocation - The Amazon Simple Storage Service (Amazon S3)
475
+ # location for staging Amazon Redshift data. The data retrieved from
476
+ # Amazon Redshift using the `SelectSqlQuery` query is stored in this
477
+ # location.
478
+ #
479
+ # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
480
+ #
481
+ # * DataSchema - A JSON string representing the schema. This is not
482
+ # required if `DataSchemaUri` is specified.
483
+ #
484
+ # * DataRearrangement - A JSON string that represents the splitting and
485
+ # rearrangement requirements for the `DataSource`.
486
+ #
487
+ # Sample - `
488
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
489
+ #
490
+ # @option params [required, String] :role_arn
491
+ # A fully specified role Amazon Resource Name (ARN). Amazon ML assumes
492
+ # the role on behalf of the user to create the following:
493
+ #
494
+ # * A security group to allow Amazon ML to execute the `SelectSqlQuery`
495
+ # query on an Amazon Redshift cluster
496
+ #
497
+ # * An Amazon S3 bucket policy to grant Amazon ML read/write permissions
498
+ # on the `S3StagingLocation`
499
+ #
500
+ # @option params [Boolean] :compute_statistics
501
+ # The compute statistics for a `DataSource`. The statistics are
502
+ # generated from the observation data referenced by a `DataSource`.
503
+ # Amazon ML uses the statistics internally during `MLModel` training.
504
+ # This parameter must be set to `true` if the `DataSource` needs to be
505
+ # used for `MLModel` training.
506
+ #
507
+ # @return [Types::CreateDataSourceFromRedshiftOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
508
+ #
509
+ # * {Types::CreateDataSourceFromRedshiftOutput#data_source_id #data_source_id} => String
510
+ #
511
+ # @example Request syntax with placeholder values
512
+ #
513
+ # resp = client.create_data_source_from_redshift({
514
+ # data_source_id: "EntityId", # required
515
+ # data_source_name: "EntityName",
516
+ # data_spec: { # required
517
+ # database_information: { # required
518
+ # database_name: "RedshiftDatabaseName", # required
519
+ # cluster_identifier: "RedshiftClusterIdentifier", # required
520
+ # },
521
+ # select_sql_query: "RedshiftSelectSqlQuery", # required
522
+ # database_credentials: { # required
523
+ # username: "RedshiftDatabaseUsername", # required
524
+ # password: "RedshiftDatabasePassword", # required
525
+ # },
526
+ # s3_staging_location: "S3Url", # required
527
+ # data_rearrangement: "DataRearrangement",
528
+ # data_schema: "DataSchema",
529
+ # data_schema_uri: "S3Url",
530
+ # },
531
+ # role_arn: "RoleARN", # required
532
+ # compute_statistics: false,
533
+ # })
534
+ #
535
+ # @example Response structure
536
+ #
537
+ # resp.data_source_id #=> String
538
+ #
539
+ # @overload create_data_source_from_redshift(params = {})
540
+ # @param [Hash] params ({})
541
+ def create_data_source_from_redshift(params = {}, options = {})
542
+ req = build_request(:create_data_source_from_redshift, params)
543
+ req.send_request(options)
544
+ end
579
545
 
580
- # Creates a new `Evaluation` of an `MLModel`. An `MLModel` is evaluated
581
- # on a set of observations associated to a `DataSource`. Like a
582
- # `DataSource` for an `MLModel`, the `DataSource` for an `Evaluation`
583
- # contains values for the `Target Variable`. The `Evaluation` compares
584
- # the predicted result for each observation to the actual outcome and
585
- # provides a summary so that you know how effective the `MLModel`
586
- # functions on the test data. Evaluation generates a relevant
587
- # performance metric, such as BinaryAUC, RegressionRMSE or
588
- # MulticlassAvgFScore based on the corresponding `MLModelType`\:
589
- # `BINARY`, `REGRESSION` or `MULTICLASS`.
590
- #
591
- # `CreateEvaluation` is an asynchronous operation. In response to
592
- # `CreateEvaluation`, Amazon Machine Learning (Amazon ML) immediately
593
- # returns and sets the evaluation status to `PENDING`. After the
594
- # `Evaluation` is created and ready for use, Amazon ML sets the status
595
- # to `COMPLETED`.
596
- #
597
- # You can use the `GetEvaluation` operation to check progress of the
598
- # evaluation during the creation operation.
599
- # @option params [required, String] :evaluation_id
600
- # A user-supplied ID that uniquely identifies the `Evaluation`.
601
- # @option params [String] :evaluation_name
602
- # A user-supplied name or description of the `Evaluation`.
603
- # @option params [required, String] :ml_model_id
604
- # The ID of the `MLModel` to evaluate.
605
- #
606
- # The schema used in creating the `MLModel` must match the schema of the
607
- # `DataSource` used in the `Evaluation`.
608
- # @option params [required, String] :evaluation_data_source_id
609
- # The ID of the `DataSource` for the evaluation. The schema of the
610
- # `DataSource` must match the schema used to create the `MLModel`.
611
- # @return [Types::CreateEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
612
- #
613
- # * {Types::CreateEvaluationOutput#evaluation_id #EvaluationId} => String
614
- #
615
- # @example Request syntax with placeholder values
616
- # resp = client.create_evaluation({
617
- # evaluation_id: "EntityId", # required
618
- # evaluation_name: "EntityName",
619
- # ml_model_id: "EntityId", # required
620
- # evaluation_data_source_id: "EntityId", # required
621
- # })
622
- #
623
- # @example Response structure
624
- # resp.evaluation_id #=> String
625
- # @overload create_evaluation(params = {})
626
- # @param [Hash] params ({})
627
- def create_evaluation(params = {}, options = {})
628
- req = build_request(:create_evaluation, params)
629
- req.send_request(options)
630
- end
546
+ # Creates a `DataSource` object. A `DataSource` references data that can
547
+ # be used to perform `CreateMLModel`, `CreateEvaluation`, or
548
+ # `CreateBatchPrediction` operations.
549
+ #
550
+ # `CreateDataSourceFromS3` is an asynchronous operation. In response to
551
+ # `CreateDataSourceFromS3`, Amazon Machine Learning (Amazon ML)
552
+ # immediately returns and sets the `DataSource` status to `PENDING`.
553
+ # After the `DataSource` has been created and is ready for use, Amazon
554
+ # ML sets the `Status` parameter to `COMPLETED`. `DataSource` in the
555
+ # `COMPLETED` or `PENDING` state can be used to perform only
556
+ # `CreateMLModel`, `CreateEvaluation` or `CreateBatchPrediction`
557
+ # operations.
558
+ #
559
+ # If Amazon ML can't accept the input source, it sets the `Status`
560
+ # parameter to `FAILED` and includes an error message in the `Message`
561
+ # attribute of the `GetDataSource` operation response.
562
+ #
563
+ # The observation data used in a `DataSource` should be ready to use;
564
+ # that is, it should have a consistent structure, and missing data
565
+ # values should be kept to a minimum. The observation data must reside
566
+ # in one or more .csv files in an Amazon Simple Storage Service (Amazon
567
+ # S3) location, along with a schema that describes the data items by
568
+ # name and type. The same schema must be used for all of the data files
569
+ # referenced by the `DataSource`.
570
+ #
571
+ # After the `DataSource` has been created, it's ready to use in
572
+ # evaluations and batch predictions. If you plan to use the `DataSource`
573
+ # to train an `MLModel`, the `DataSource` also needs a recipe. A recipe
574
+ # describes how each input variable will be used in training an
575
+ # `MLModel`. Will the variable be included or excluded from training?
576
+ # Will the variable be manipulated; for example, will it be combined
577
+ # with another variable or will it be split apart into word
578
+ # combinations? The recipe provides answers to these questions.
579
+ #
580
+ # @option params [required, String] :data_source_id
581
+ # A user-supplied identifier that uniquely identifies the `DataSource`.
582
+ #
583
+ # @option params [String] :data_source_name
584
+ # A user-supplied name or description of the `DataSource`.
585
+ #
586
+ # @option params [required, Types::S3DataSpec] :data_spec
587
+ # The data specification of a `DataSource`\:
588
+ #
589
+ # * DataLocationS3 - The Amazon S3 location of the observation data.
590
+ #
591
+ # * DataSchemaLocationS3 - The Amazon S3 location of the `DataSchema`.
592
+ #
593
+ # * DataSchema - A JSON string representing the schema. This is not
594
+ # required if `DataSchemaUri` is specified.
595
+ #
596
+ # * DataRearrangement - A JSON string that represents the splitting and
597
+ # rearrangement requirements for the `Datasource`.
598
+ #
599
+ # Sample - `
600
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
601
+ #
602
+ # @option params [Boolean] :compute_statistics
603
+ # The compute statistics for a `DataSource`. The statistics are
604
+ # generated from the observation data referenced by a `DataSource`.
605
+ # Amazon ML uses the statistics internally during `MLModel` training.
606
+ # This parameter must be set to `true` if the ``DataSource`` needs to be
607
+ # used for `MLModel` training.
608
+ #
609
+ # @return [Types::CreateDataSourceFromS3Output] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
610
+ #
611
+ # * {Types::CreateDataSourceFromS3Output#data_source_id #data_source_id} => String
612
+ #
613
+ # @example Request syntax with placeholder values
614
+ #
615
+ # resp = client.create_data_source_from_s3({
616
+ # data_source_id: "EntityId", # required
617
+ # data_source_name: "EntityName",
618
+ # data_spec: { # required
619
+ # data_location_s3: "S3Url", # required
620
+ # data_rearrangement: "DataRearrangement",
621
+ # data_schema: "DataSchema",
622
+ # data_schema_location_s3: "S3Url",
623
+ # },
624
+ # compute_statistics: false,
625
+ # })
626
+ #
627
+ # @example Response structure
628
+ #
629
+ # resp.data_source_id #=> String
630
+ #
631
+ # @overload create_data_source_from_s3(params = {})
632
+ # @param [Hash] params ({})
633
+ def create_data_source_from_s3(params = {}, options = {})
634
+ req = build_request(:create_data_source_from_s3, params)
635
+ req.send_request(options)
636
+ end
631
637
 
632
- # Creates a new `MLModel` using the `DataSource` and the recipe as
633
- # information sources.
634
- #
635
- # An `MLModel` is nearly immutable. Users can update only the
636
- # `MLModelName` and the `ScoreThreshold` in an `MLModel` without
637
- # creating a new `MLModel`.
638
- #
639
- # `CreateMLModel` is an asynchronous operation. In response to
640
- # `CreateMLModel`, Amazon Machine Learning (Amazon ML) immediately
641
- # returns and sets the `MLModel` status to `PENDING`. After the
642
- # `MLModel` has been created and ready is for use, Amazon ML sets the
643
- # status to `COMPLETED`.
644
- #
645
- # You can use the `GetMLModel` operation to check the progress of the
646
- # `MLModel` during the creation operation.
647
- #
648
- # `CreateMLModel` requires a `DataSource` with computed statistics,
649
- # which can be created by setting `ComputeStatistics` to `true` in
650
- # `CreateDataSourceFromRDS`, `CreateDataSourceFromS3`, or
651
- # `CreateDataSourceFromRedshift` operations.
652
- # @option params [required, String] :ml_model_id
653
- # A user-supplied ID that uniquely identifies the `MLModel`.
654
- # @option params [String] :ml_model_name
655
- # A user-supplied name or description of the `MLModel`.
656
- # @option params [required, String] :ml_model_type
657
- # The category of supervised learning that this `MLModel` will address.
658
- # Choose from the following types:
659
- #
660
- # * Choose `REGRESSION` if the `MLModel` will be used to predict a
661
- # numeric value.
662
- # * Choose `BINARY` if the `MLModel` result has two possible values.
663
- # * Choose `MULTICLASS` if the `MLModel` result has a limited number of
664
- # values.
665
- #
666
- # For more information, see the [Amazon Machine Learning Developer
667
- # Guide][1].
668
- #
669
- #
670
- #
671
- # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
672
- # @option params [Hash<String,String>] :parameters
673
- # A list of the training parameters in the `MLModel`. The list is
674
- # implemented as a map of key-value pairs.
675
- #
676
- # The following is the current set of training parameters:
677
- #
678
- # * `sgd.maxMLModelSizeInBytes` - The maximum allowed size of the model.
679
- # Depending on the input data, the size of the model might affect its
680
- # performance.
681
- #
682
- # The value is an integer that ranges from `100000` to `2147483648`.
683
- # The default value is `33554432`.
684
- #
685
- # * `sgd.maxPasses` - The number of times that the training process
686
- # traverses the observations to build the `MLModel`. The value is an
687
- # integer that ranges from `1` to `10000`. The default value is `10`.
688
- #
689
- # * `sgd.shuffleType` - Whether Amazon ML shuffles the training data.
690
- # Shuffling the data improves a model's ability to find the optimal
691
- # solution for a variety of data types. The valid values are `auto`
692
- # and `none`. The default value is `none`. We <?oxy\_insert\_start
693
- # author="laurama" timestamp="20160329T131121-0700">strongly
694
- # recommend that you shuffle your data.<?oxy\_insert\_end>
695
- #
696
- # * `sgd.l1RegularizationAmount` - The coefficient regularization L1
697
- # norm. It controls overfitting the data by penalizing large
698
- # coefficients. This tends to drive coefficients to zero, resulting in
699
- # a sparse feature set. If you use this parameter, start by specifying
700
- # a small value, such as `1.0E-08`.
701
- #
702
- # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
703
- # default is to not use L1 normalization. This parameter can't be
704
- # used when `L2` is specified. Use this parameter sparingly.
705
- #
706
- # * `sgd.l2RegularizationAmount` - The coefficient regularization L2
707
- # norm. It controls overfitting the data by penalizing large
708
- # coefficients. This tends to drive coefficients to small, nonzero
709
- # values. If you use this parameter, start by specifying a small
710
- # value, such as `1.0E-08`.
711
- #
712
- # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
713
- # default is to not use L2 normalization. This parameter can't be
714
- # used when `L1` is specified. Use this parameter sparingly.
715
- # @option params [required, String] :training_data_source_id
716
- # The `DataSource` that points to the training data.
717
- # @option params [String] :recipe
718
- # The data recipe for creating the `MLModel`. You must specify either
719
- # the recipe or its URI. If you don't specify a recipe or its URI,
720
- # Amazon ML creates a default.
721
- # @option params [String] :recipe_uri
722
- # The Amazon Simple Storage Service (Amazon S3) location and file name
723
- # that contains the `MLModel` recipe. You must specify either the recipe
724
- # or its URI. If you don't specify a recipe or its URI, Amazon ML
725
- # creates a default.
726
- # @return [Types::CreateMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
727
- #
728
- # * {Types::CreateMLModelOutput#ml_model_id #MLModelId} => String
729
- #
730
- # @example Request syntax with placeholder values
731
- # resp = client.create_ml_model({
732
- # ml_model_id: "EntityId", # required
733
- # ml_model_name: "EntityName",
734
- # ml_model_type: "REGRESSION", # required, accepts REGRESSION, BINARY, MULTICLASS
735
- # parameters: {
736
- # "StringType" => "StringType",
737
- # },
738
- # training_data_source_id: "EntityId", # required
739
- # recipe: "Recipe",
740
- # recipe_uri: "S3Url",
741
- # })
742
- #
743
- # @example Response structure
744
- # resp.ml_model_id #=> String
745
- # @overload create_ml_model(params = {})
746
- # @param [Hash] params ({})
747
- def create_ml_model(params = {}, options = {})
748
- req = build_request(:create_ml_model, params)
749
- req.send_request(options)
750
- end
638
+ # Creates a new `Evaluation` of an `MLModel`. An `MLModel` is evaluated
639
+ # on a set of observations associated to a `DataSource`. Like a
640
+ # `DataSource` for an `MLModel`, the `DataSource` for an `Evaluation`
641
+ # contains values for the `Target Variable`. The `Evaluation` compares
642
+ # the predicted result for each observation to the actual outcome and
643
+ # provides a summary so that you know how effective the `MLModel`
644
+ # functions on the test data. Evaluation generates a relevant
645
+ # performance metric, such as BinaryAUC, RegressionRMSE or
646
+ # MulticlassAvgFScore based on the corresponding `MLModelType`\:
647
+ # `BINARY`, `REGRESSION` or `MULTICLASS`.
648
+ #
649
+ # `CreateEvaluation` is an asynchronous operation. In response to
650
+ # `CreateEvaluation`, Amazon Machine Learning (Amazon ML) immediately
651
+ # returns and sets the evaluation status to `PENDING`. After the
652
+ # `Evaluation` is created and ready for use, Amazon ML sets the status
653
+ # to `COMPLETED`.
654
+ #
655
+ # You can use the `GetEvaluation` operation to check progress of the
656
+ # evaluation during the creation operation.
657
+ #
658
+ # @option params [required, String] :evaluation_id
659
+ # A user-supplied ID that uniquely identifies the `Evaluation`.
660
+ #
661
+ # @option params [String] :evaluation_name
662
+ # A user-supplied name or description of the `Evaluation`.
663
+ #
664
+ # @option params [required, String] :ml_model_id
665
+ # The ID of the `MLModel` to evaluate.
666
+ #
667
+ # The schema used in creating the `MLModel` must match the schema of the
668
+ # `DataSource` used in the `Evaluation`.
669
+ #
670
+ # @option params [required, String] :evaluation_data_source_id
671
+ # The ID of the `DataSource` for the evaluation. The schema of the
672
+ # `DataSource` must match the schema used to create the `MLModel`.
673
+ #
674
+ # @return [Types::CreateEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
675
+ #
676
+ # * {Types::CreateEvaluationOutput#evaluation_id #evaluation_id} => String
677
+ #
678
+ # @example Request syntax with placeholder values
679
+ #
680
+ # resp = client.create_evaluation({
681
+ # evaluation_id: "EntityId", # required
682
+ # evaluation_name: "EntityName",
683
+ # ml_model_id: "EntityId", # required
684
+ # evaluation_data_source_id: "EntityId", # required
685
+ # })
686
+ #
687
+ # @example Response structure
688
+ #
689
+ # resp.evaluation_id #=> String
690
+ #
691
+ # @overload create_evaluation(params = {})
692
+ # @param [Hash] params ({})
693
+ def create_evaluation(params = {}, options = {})
694
+ req = build_request(:create_evaluation, params)
695
+ req.send_request(options)
696
+ end
751
697
 
752
- # Creates a real-time endpoint for the `MLModel`. The endpoint contains
753
- # the URI of the `MLModel`; that is, the location to send real-time
754
- # prediction requests for the specified `MLModel`.
755
- # @option params [required, String] :ml_model_id
756
- # The ID assigned to the `MLModel` during creation.
757
- # @return [Types::CreateRealtimeEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
758
- #
759
- # * {Types::CreateRealtimeEndpointOutput#ml_model_id #MLModelId} => String
760
- # * {Types::CreateRealtimeEndpointOutput#realtime_endpoint_info #RealtimeEndpointInfo} => Types::RealtimeEndpointInfo
761
- #
762
- # @example Request syntax with placeholder values
763
- # resp = client.create_realtime_endpoint({
764
- # ml_model_id: "EntityId", # required
765
- # })
766
- #
767
- # @example Response structure
768
- # resp.ml_model_id #=> String
769
- # resp.realtime_endpoint_info.peak_requests_per_second #=> Integer
770
- # resp.realtime_endpoint_info.created_at #=> Time
771
- # resp.realtime_endpoint_info.endpoint_url #=> String
772
- # resp.realtime_endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
773
- # @overload create_realtime_endpoint(params = {})
774
- # @param [Hash] params ({})
775
- def create_realtime_endpoint(params = {}, options = {})
776
- req = build_request(:create_realtime_endpoint, params)
777
- req.send_request(options)
778
- end
698
+ # Creates a new `MLModel` using the `DataSource` and the recipe as
699
+ # information sources.
700
+ #
701
+ # An `MLModel` is nearly immutable. Users can update only the
702
+ # `MLModelName` and the `ScoreThreshold` in an `MLModel` without
703
+ # creating a new `MLModel`.
704
+ #
705
+ # `CreateMLModel` is an asynchronous operation. In response to
706
+ # `CreateMLModel`, Amazon Machine Learning (Amazon ML) immediately
707
+ # returns and sets the `MLModel` status to `PENDING`. After the
708
+ # `MLModel` has been created and ready is for use, Amazon ML sets the
709
+ # status to `COMPLETED`.
710
+ #
711
+ # You can use the `GetMLModel` operation to check the progress of the
712
+ # `MLModel` during the creation operation.
713
+ #
714
+ # `CreateMLModel` requires a `DataSource` with computed statistics,
715
+ # which can be created by setting `ComputeStatistics` to `true` in
716
+ # `CreateDataSourceFromRDS`, `CreateDataSourceFromS3`, or
717
+ # `CreateDataSourceFromRedshift` operations.
718
+ #
719
+ # @option params [required, String] :ml_model_id
720
+ # A user-supplied ID that uniquely identifies the `MLModel`.
721
+ #
722
+ # @option params [String] :ml_model_name
723
+ # A user-supplied name or description of the `MLModel`.
724
+ #
725
+ # @option params [required, String] :ml_model_type
726
+ # The category of supervised learning that this `MLModel` will address.
727
+ # Choose from the following types:
728
+ #
729
+ # * Choose `REGRESSION` if the `MLModel` will be used to predict a
730
+ # numeric value.
731
+ # * Choose `BINARY` if the `MLModel` result has two possible values.
732
+ # * Choose `MULTICLASS` if the `MLModel` result has a limited number of
733
+ # values.
734
+ #
735
+ # For more information, see the [Amazon Machine Learning Developer
736
+ # Guide][1].
737
+ #
738
+ #
739
+ #
740
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
741
+ #
742
+ # @option params [Hash<String,String>] :parameters
743
+ # A list of the training parameters in the `MLModel`. The list is
744
+ # implemented as a map of key-value pairs.
745
+ #
746
+ # The following is the current set of training parameters:
747
+ #
748
+ # * `sgd.maxMLModelSizeInBytes` - The maximum allowed size of the model.
749
+ # Depending on the input data, the size of the model might affect its
750
+ # performance.
751
+ #
752
+ # The value is an integer that ranges from `100000` to `2147483648`.
753
+ # The default value is `33554432`.
754
+ #
755
+ # * `sgd.maxPasses` - The number of times that the training process
756
+ # traverses the observations to build the `MLModel`. The value is an
757
+ # integer that ranges from `1` to `10000`. The default value is `10`.
758
+ #
759
+ # * `sgd.shuffleType` - Whether Amazon ML shuffles the training data.
760
+ # Shuffling the data improves a model's ability to find the optimal
761
+ # solution for a variety of data types. The valid values are `auto`
762
+ # and `none`. The default value is `none`. We <?oxy\_insert\_start
763
+ # author="laurama" timestamp="20160329T131121-0700">strongly
764
+ # recommend that you shuffle your data.<?oxy\_insert\_end>
765
+ #
766
+ # * `sgd.l1RegularizationAmount` - The coefficient regularization L1
767
+ # norm. It controls overfitting the data by penalizing large
768
+ # coefficients. This tends to drive coefficients to zero, resulting in
769
+ # a sparse feature set. If you use this parameter, start by specifying
770
+ # a small value, such as `1.0E-08`.
771
+ #
772
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
773
+ # default is to not use L1 normalization. This parameter can't be
774
+ # used when `L2` is specified. Use this parameter sparingly.
775
+ #
776
+ # * `sgd.l2RegularizationAmount` - The coefficient regularization L2
777
+ # norm. It controls overfitting the data by penalizing large
778
+ # coefficients. This tends to drive coefficients to small, nonzero
779
+ # values. If you use this parameter, start by specifying a small
780
+ # value, such as `1.0E-08`.
781
+ #
782
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
783
+ # default is to not use L2 normalization. This parameter can't be
784
+ # used when `L1` is specified. Use this parameter sparingly.
785
+ #
786
+ # @option params [required, String] :training_data_source_id
787
+ # The `DataSource` that points to the training data.
788
+ #
789
+ # @option params [String] :recipe
790
+ # The data recipe for creating the `MLModel`. You must specify either
791
+ # the recipe or its URI. If you don't specify a recipe or its URI,
792
+ # Amazon ML creates a default.
793
+ #
794
+ # @option params [String] :recipe_uri
795
+ # The Amazon Simple Storage Service (Amazon S3) location and file name
796
+ # that contains the `MLModel` recipe. You must specify either the recipe
797
+ # or its URI. If you don't specify a recipe or its URI, Amazon ML
798
+ # creates a default.
799
+ #
800
+ # @return [Types::CreateMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
801
+ #
802
+ # * {Types::CreateMLModelOutput#ml_model_id #ml_model_id} => String
803
+ #
804
+ # @example Request syntax with placeholder values
805
+ #
806
+ # resp = client.create_ml_model({
807
+ # ml_model_id: "EntityId", # required
808
+ # ml_model_name: "EntityName",
809
+ # ml_model_type: "REGRESSION", # required, accepts REGRESSION, BINARY, MULTICLASS
810
+ # parameters: {
811
+ # "StringType" => "StringType",
812
+ # },
813
+ # training_data_source_id: "EntityId", # required
814
+ # recipe: "Recipe",
815
+ # recipe_uri: "S3Url",
816
+ # })
817
+ #
818
+ # @example Response structure
819
+ #
820
+ # resp.ml_model_id #=> String
821
+ #
822
+ # @overload create_ml_model(params = {})
823
+ # @param [Hash] params ({})
824
+ def create_ml_model(params = {}, options = {})
825
+ req = build_request(:create_ml_model, params)
826
+ req.send_request(options)
827
+ end
779
828
 
780
- # Assigns the DELETED status to a `BatchPrediction`, rendering it
781
- # unusable.
782
- #
783
- # After using the `DeleteBatchPrediction` operation, you can use the
784
- # GetBatchPrediction operation to verify that the status of the
785
- # `BatchPrediction` changed to DELETED.
786
- #
787
- # **Caution:** The result of the `DeleteBatchPrediction` operation is
788
- # irreversible.
789
- # @option params [required, String] :batch_prediction_id
790
- # A user-supplied ID that uniquely identifies the `BatchPrediction`.
791
- # @return [Types::DeleteBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
792
- #
793
- # * {Types::DeleteBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
794
- #
795
- # @example Request syntax with placeholder values
796
- # resp = client.delete_batch_prediction({
797
- # batch_prediction_id: "EntityId", # required
798
- # })
799
- #
800
- # @example Response structure
801
- # resp.batch_prediction_id #=> String
802
- # @overload delete_batch_prediction(params = {})
803
- # @param [Hash] params ({})
804
- def delete_batch_prediction(params = {}, options = {})
805
- req = build_request(:delete_batch_prediction, params)
806
- req.send_request(options)
807
- end
829
+ # Creates a real-time endpoint for the `MLModel`. The endpoint contains
830
+ # the URI of the `MLModel`; that is, the location to send real-time
831
+ # prediction requests for the specified `MLModel`.
832
+ #
833
+ # @option params [required, String] :ml_model_id
834
+ # The ID assigned to the `MLModel` during creation.
835
+ #
836
+ # @return [Types::CreateRealtimeEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
837
+ #
838
+ # * {Types::CreateRealtimeEndpointOutput#ml_model_id #ml_model_id} => String
839
+ # * {Types::CreateRealtimeEndpointOutput#realtime_endpoint_info #realtime_endpoint_info} => Types::RealtimeEndpointInfo
840
+ #
841
+ # @example Request syntax with placeholder values
842
+ #
843
+ # resp = client.create_realtime_endpoint({
844
+ # ml_model_id: "EntityId", # required
845
+ # })
846
+ #
847
+ # @example Response structure
848
+ #
849
+ # resp.ml_model_id #=> String
850
+ # resp.realtime_endpoint_info.peak_requests_per_second #=> Integer
851
+ # resp.realtime_endpoint_info.created_at #=> Time
852
+ # resp.realtime_endpoint_info.endpoint_url #=> String
853
+ # resp.realtime_endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
854
+ #
855
+ # @overload create_realtime_endpoint(params = {})
856
+ # @param [Hash] params ({})
857
+ def create_realtime_endpoint(params = {}, options = {})
858
+ req = build_request(:create_realtime_endpoint, params)
859
+ req.send_request(options)
860
+ end
808
861
 
809
- # Assigns the DELETED status to a `DataSource`, rendering it unusable.
810
- #
811
- # After using the `DeleteDataSource` operation, you can use the
812
- # GetDataSource operation to verify that the status of the `DataSource`
813
- # changed to DELETED.
814
- #
815
- # **Caution:** The results of the `DeleteDataSource` operation are
816
- # irreversible.
817
- # @option params [required, String] :data_source_id
818
- # A user-supplied ID that uniquely identifies the `DataSource`.
819
- # @return [Types::DeleteDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
820
- #
821
- # * {Types::DeleteDataSourceOutput#data_source_id #DataSourceId} => String
822
- #
823
- # @example Request syntax with placeholder values
824
- # resp = client.delete_data_source({
825
- # data_source_id: "EntityId", # required
826
- # })
827
- #
828
- # @example Response structure
829
- # resp.data_source_id #=> String
830
- # @overload delete_data_source(params = {})
831
- # @param [Hash] params ({})
832
- def delete_data_source(params = {}, options = {})
833
- req = build_request(:delete_data_source, params)
834
- req.send_request(options)
835
- end
862
+ # Assigns the DELETED status to a `BatchPrediction`, rendering it
863
+ # unusable.
864
+ #
865
+ # After using the `DeleteBatchPrediction` operation, you can use the
866
+ # GetBatchPrediction operation to verify that the status of the
867
+ # `BatchPrediction` changed to DELETED.
868
+ #
869
+ # **Caution:** The result of the `DeleteBatchPrediction` operation is
870
+ # irreversible.
871
+ #
872
+ # @option params [required, String] :batch_prediction_id
873
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
874
+ #
875
+ # @return [Types::DeleteBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
876
+ #
877
+ # * {Types::DeleteBatchPredictionOutput#batch_prediction_id #batch_prediction_id} => String
878
+ #
879
+ # @example Request syntax with placeholder values
880
+ #
881
+ # resp = client.delete_batch_prediction({
882
+ # batch_prediction_id: "EntityId", # required
883
+ # })
884
+ #
885
+ # @example Response structure
886
+ #
887
+ # resp.batch_prediction_id #=> String
888
+ #
889
+ # @overload delete_batch_prediction(params = {})
890
+ # @param [Hash] params ({})
891
+ def delete_batch_prediction(params = {}, options = {})
892
+ req = build_request(:delete_batch_prediction, params)
893
+ req.send_request(options)
894
+ end
836
895
 
837
- # Assigns the `DELETED` status to an `Evaluation`, rendering it
838
- # unusable.
839
- #
840
- # After invoking the `DeleteEvaluation` operation, you can use the
841
- # `GetEvaluation` operation to verify that the status of the
842
- # `Evaluation` changed to `DELETED`.
843
- #
844
- # <caution markdown="1"><title>Caution</title> The results of the `DeleteEvaluation` operation are irreversible.
845
- #
846
- # </caution>
847
- # @option params [required, String] :evaluation_id
848
- # A user-supplied ID that uniquely identifies the `Evaluation` to
849
- # delete.
850
- # @return [Types::DeleteEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
851
- #
852
- # * {Types::DeleteEvaluationOutput#evaluation_id #EvaluationId} => String
853
- #
854
- # @example Request syntax with placeholder values
855
- # resp = client.delete_evaluation({
856
- # evaluation_id: "EntityId", # required
857
- # })
858
- #
859
- # @example Response structure
860
- # resp.evaluation_id #=> String
861
- # @overload delete_evaluation(params = {})
862
- # @param [Hash] params ({})
863
- def delete_evaluation(params = {}, options = {})
864
- req = build_request(:delete_evaluation, params)
865
- req.send_request(options)
866
- end
896
+ # Assigns the DELETED status to a `DataSource`, rendering it unusable.
897
+ #
898
+ # After using the `DeleteDataSource` operation, you can use the
899
+ # GetDataSource operation to verify that the status of the `DataSource`
900
+ # changed to DELETED.
901
+ #
902
+ # **Caution:** The results of the `DeleteDataSource` operation are
903
+ # irreversible.
904
+ #
905
+ # @option params [required, String] :data_source_id
906
+ # A user-supplied ID that uniquely identifies the `DataSource`.
907
+ #
908
+ # @return [Types::DeleteDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
909
+ #
910
+ # * {Types::DeleteDataSourceOutput#data_source_id #data_source_id} => String
911
+ #
912
+ # @example Request syntax with placeholder values
913
+ #
914
+ # resp = client.delete_data_source({
915
+ # data_source_id: "EntityId", # required
916
+ # })
917
+ #
918
+ # @example Response structure
919
+ #
920
+ # resp.data_source_id #=> String
921
+ #
922
+ # @overload delete_data_source(params = {})
923
+ # @param [Hash] params ({})
924
+ def delete_data_source(params = {}, options = {})
925
+ req = build_request(:delete_data_source, params)
926
+ req.send_request(options)
927
+ end
867
928
 
868
- # Assigns the `DELETED` status to an `MLModel`, rendering it unusable.
869
- #
870
- # After using the `DeleteMLModel` operation, you can use the
871
- # `GetMLModel` operation to verify that the status of the `MLModel`
872
- # changed to DELETED.
873
- #
874
- # **Caution:** The result of the `DeleteMLModel` operation is
875
- # irreversible.
876
- # @option params [required, String] :ml_model_id
877
- # A user-supplied ID that uniquely identifies the `MLModel`.
878
- # @return [Types::DeleteMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
879
- #
880
- # * {Types::DeleteMLModelOutput#ml_model_id #MLModelId} => String
881
- #
882
- # @example Request syntax with placeholder values
883
- # resp = client.delete_ml_model({
884
- # ml_model_id: "EntityId", # required
885
- # })
886
- #
887
- # @example Response structure
888
- # resp.ml_model_id #=> String
889
- # @overload delete_ml_model(params = {})
890
- # @param [Hash] params ({})
891
- def delete_ml_model(params = {}, options = {})
892
- req = build_request(:delete_ml_model, params)
893
- req.send_request(options)
894
- end
929
+ # Assigns the `DELETED` status to an `Evaluation`, rendering it
930
+ # unusable.
931
+ #
932
+ # After invoking the `DeleteEvaluation` operation, you can use the
933
+ # `GetEvaluation` operation to verify that the status of the
934
+ # `Evaluation` changed to `DELETED`.
935
+ #
936
+ # <caution markdown="1"><title>Caution</title> The results of the `DeleteEvaluation` operation are irreversible.
937
+ #
938
+ # </caution>
939
+ #
940
+ # @option params [required, String] :evaluation_id
941
+ # A user-supplied ID that uniquely identifies the `Evaluation` to
942
+ # delete.
943
+ #
944
+ # @return [Types::DeleteEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
945
+ #
946
+ # * {Types::DeleteEvaluationOutput#evaluation_id #evaluation_id} => String
947
+ #
948
+ # @example Request syntax with placeholder values
949
+ #
950
+ # resp = client.delete_evaluation({
951
+ # evaluation_id: "EntityId", # required
952
+ # })
953
+ #
954
+ # @example Response structure
955
+ #
956
+ # resp.evaluation_id #=> String
957
+ #
958
+ # @overload delete_evaluation(params = {})
959
+ # @param [Hash] params ({})
960
+ def delete_evaluation(params = {}, options = {})
961
+ req = build_request(:delete_evaluation, params)
962
+ req.send_request(options)
963
+ end
895
964
 
896
- # Deletes a real time endpoint of an `MLModel`.
897
- # @option params [required, String] :ml_model_id
898
- # The ID assigned to the `MLModel` during creation.
899
- # @return [Types::DeleteRealtimeEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
900
- #
901
- # * {Types::DeleteRealtimeEndpointOutput#ml_model_id #MLModelId} => String
902
- # * {Types::DeleteRealtimeEndpointOutput#realtime_endpoint_info #RealtimeEndpointInfo} => Types::RealtimeEndpointInfo
903
- #
904
- # @example Request syntax with placeholder values
905
- # resp = client.delete_realtime_endpoint({
906
- # ml_model_id: "EntityId", # required
907
- # })
908
- #
909
- # @example Response structure
910
- # resp.ml_model_id #=> String
911
- # resp.realtime_endpoint_info.peak_requests_per_second #=> Integer
912
- # resp.realtime_endpoint_info.created_at #=> Time
913
- # resp.realtime_endpoint_info.endpoint_url #=> String
914
- # resp.realtime_endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
915
- # @overload delete_realtime_endpoint(params = {})
916
- # @param [Hash] params ({})
917
- def delete_realtime_endpoint(params = {}, options = {})
918
- req = build_request(:delete_realtime_endpoint, params)
919
- req.send_request(options)
920
- end
965
+ # Assigns the `DELETED` status to an `MLModel`, rendering it unusable.
966
+ #
967
+ # After using the `DeleteMLModel` operation, you can use the
968
+ # `GetMLModel` operation to verify that the status of the `MLModel`
969
+ # changed to DELETED.
970
+ #
971
+ # **Caution:** The result of the `DeleteMLModel` operation is
972
+ # irreversible.
973
+ #
974
+ # @option params [required, String] :ml_model_id
975
+ # A user-supplied ID that uniquely identifies the `MLModel`.
976
+ #
977
+ # @return [Types::DeleteMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
978
+ #
979
+ # * {Types::DeleteMLModelOutput#ml_model_id #ml_model_id} => String
980
+ #
981
+ # @example Request syntax with placeholder values
982
+ #
983
+ # resp = client.delete_ml_model({
984
+ # ml_model_id: "EntityId", # required
985
+ # })
986
+ #
987
+ # @example Response structure
988
+ #
989
+ # resp.ml_model_id #=> String
990
+ #
991
+ # @overload delete_ml_model(params = {})
992
+ # @param [Hash] params ({})
993
+ def delete_ml_model(params = {}, options = {})
994
+ req = build_request(:delete_ml_model, params)
995
+ req.send_request(options)
996
+ end
921
997
 
922
- # Deletes the specified tags associated with an ML object. After this
923
- # operation is complete, you can't recover deleted tags.
924
- #
925
- # If you specify a tag that doesn't exist, Amazon ML ignores it.
926
- # @option params [required, Array<String>] :tag_keys
927
- # One or more tags to delete.
928
- # @option params [required, String] :resource_id
929
- # The ID of the tagged ML object. For example, `exampleModelId`.
930
- # @option params [required, String] :resource_type
931
- # The type of the tagged ML object.
932
- # @return [Types::DeleteTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
933
- #
934
- # * {Types::DeleteTagsOutput#resource_id #ResourceId} => String
935
- # * {Types::DeleteTagsOutput#resource_type #ResourceType} => String
936
- #
937
- # @example Request syntax with placeholder values
938
- # resp = client.delete_tags({
939
- # tag_keys: ["TagKey"], # required
940
- # resource_id: "EntityId", # required
941
- # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
942
- # })
943
- #
944
- # @example Response structure
945
- # resp.resource_id #=> String
946
- # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
947
- # @overload delete_tags(params = {})
948
- # @param [Hash] params ({})
949
- def delete_tags(params = {}, options = {})
950
- req = build_request(:delete_tags, params)
951
- req.send_request(options)
952
- end
998
+ # Deletes a real time endpoint of an `MLModel`.
999
+ #
1000
+ # @option params [required, String] :ml_model_id
1001
+ # The ID assigned to the `MLModel` during creation.
1002
+ #
1003
+ # @return [Types::DeleteRealtimeEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1004
+ #
1005
+ # * {Types::DeleteRealtimeEndpointOutput#ml_model_id #ml_model_id} => String
1006
+ # * {Types::DeleteRealtimeEndpointOutput#realtime_endpoint_info #realtime_endpoint_info} => Types::RealtimeEndpointInfo
1007
+ #
1008
+ # @example Request syntax with placeholder values
1009
+ #
1010
+ # resp = client.delete_realtime_endpoint({
1011
+ # ml_model_id: "EntityId", # required
1012
+ # })
1013
+ #
1014
+ # @example Response structure
1015
+ #
1016
+ # resp.ml_model_id #=> String
1017
+ # resp.realtime_endpoint_info.peak_requests_per_second #=> Integer
1018
+ # resp.realtime_endpoint_info.created_at #=> Time
1019
+ # resp.realtime_endpoint_info.endpoint_url #=> String
1020
+ # resp.realtime_endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
1021
+ #
1022
+ # @overload delete_realtime_endpoint(params = {})
1023
+ # @param [Hash] params ({})
1024
+ def delete_realtime_endpoint(params = {}, options = {})
1025
+ req = build_request(:delete_realtime_endpoint, params)
1026
+ req.send_request(options)
1027
+ end
953
1028
 
954
- # Returns a list of `BatchPrediction` operations that match the search
955
- # criteria in the request.
956
- # @option params [String] :filter_variable
957
- # Use one of the following variables to filter a list of
958
- # `BatchPrediction`\:
959
- #
960
- # * `CreatedAt` - Sets the search criteria to the `BatchPrediction`
961
- # creation date.
962
- # * `Status` - Sets the search criteria to the `BatchPrediction` status.
963
- # * `Name` - Sets the search criteria to the contents of the
964
- # `BatchPrediction`<b> </b> `Name`.
965
- # * `IAMUser` - Sets the search criteria to the user account that
966
- # invoked the `BatchPrediction` creation.
967
- # * `MLModelId` - Sets the search criteria to the `MLModel` used in the
968
- # `BatchPrediction`.
969
- # * `DataSourceId` - Sets the search criteria to the `DataSource` used
970
- # in the `BatchPrediction`.
971
- # * `DataURI` - Sets the search criteria to the data file(s) used in the
972
- # `BatchPrediction`. The URL can identify either a file or an Amazon
973
- # Simple Storage Solution (Amazon S3) bucket or directory.
974
- # @option params [String] :eq
975
- # The equal to operator. The `BatchPrediction` results will have
976
- # `FilterVariable` values that exactly match the value specified with
977
- # `EQ`.
978
- # @option params [String] :gt
979
- # The greater than operator. The `BatchPrediction` results will have
980
- # `FilterVariable` values that are greater than the value specified with
981
- # `GT`.
982
- # @option params [String] :lt
983
- # The less than operator. The `BatchPrediction` results will have
984
- # `FilterVariable` values that are less than the value specified with
985
- # `LT`.
986
- # @option params [String] :ge
987
- # The greater than or equal to operator. The `BatchPrediction` results
988
- # will have `FilterVariable` values that are greater than or equal to
989
- # the value specified with `GE`.
990
- # @option params [String] :le
991
- # The less than or equal to operator. The `BatchPrediction` results will
992
- # have `FilterVariable` values that are less than or equal to the value
993
- # specified with `LE`.
994
- # @option params [String] :ne
995
- # The not equal to operator. The `BatchPrediction` results will have
996
- # `FilterVariable` values not equal to the value specified with `NE`.
997
- # @option params [String] :prefix
998
- # A string that is found at the beginning of a variable, such as `Name`
999
- # or `Id`.
1000
- #
1001
- # For example, a `Batch Prediction` operation could have the `Name`
1002
- # `2014-09-09-HolidayGiftMailer`. To search for this `BatchPrediction`,
1003
- # select `Name` for the `FilterVariable` and any of the following
1004
- # strings for the `Prefix`\:
1005
- #
1006
- # * 2014-09
1007
- #
1008
- # * 2014-09-09
1009
- #
1010
- # * 2014-09-09-Holiday
1011
- # @option params [String] :sort_order
1012
- # A two-value parameter that determines the sequence of the resulting
1013
- # list of `MLModel`s.
1014
- #
1015
- # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1016
- # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1017
- #
1018
- # Results are sorted by `FilterVariable`.
1019
- # @option params [String] :next_token
1020
- # An ID of the page in the paginated results.
1021
- # @option params [Integer] :limit
1022
- # The number of pages of information to include in the result. The range
1023
- # of acceptable values is `1` through `100`. The default value is `100`.
1024
- # @return [Types::DescribeBatchPredictionsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1025
- #
1026
- # * {Types::DescribeBatchPredictionsOutput#results #Results} => Array&lt;Types::BatchPrediction&gt;
1027
- # * {Types::DescribeBatchPredictionsOutput#next_token #NextToken} => String
1028
- #
1029
- # @example Request syntax with placeholder values
1030
- # resp = client.describe_batch_predictions({
1031
- # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1032
- # eq: "ComparatorValue",
1033
- # gt: "ComparatorValue",
1034
- # lt: "ComparatorValue",
1035
- # ge: "ComparatorValue",
1036
- # le: "ComparatorValue",
1037
- # ne: "ComparatorValue",
1038
- # prefix: "ComparatorValue",
1039
- # sort_order: "asc", # accepts asc, dsc
1040
- # next_token: "StringType",
1041
- # limit: 1,
1042
- # })
1043
- #
1044
- # @example Response structure
1045
- # resp.results #=> Array
1046
- # resp.results[0].batch_prediction_id #=> String
1047
- # resp.results[0].ml_model_id #=> String
1048
- # resp.results[0].batch_prediction_data_source_id #=> String
1049
- # resp.results[0].input_data_location_s3 #=> String
1050
- # resp.results[0].created_by_iam_user #=> String
1051
- # resp.results[0].created_at #=> Time
1052
- # resp.results[0].last_updated_at #=> Time
1053
- # resp.results[0].name #=> String
1054
- # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1055
- # resp.results[0].output_uri #=> String
1056
- # resp.results[0].message #=> String
1057
- # resp.results[0].compute_time #=> Integer
1058
- # resp.results[0].finished_at #=> Time
1059
- # resp.results[0].started_at #=> Time
1060
- # resp.results[0].total_record_count #=> Integer
1061
- # resp.results[0].invalid_record_count #=> Integer
1062
- # resp.next_token #=> String
1063
- # @overload describe_batch_predictions(params = {})
1064
- # @param [Hash] params ({})
1065
- def describe_batch_predictions(params = {}, options = {})
1066
- req = build_request(:describe_batch_predictions, params)
1067
- req.send_request(options)
1068
- end
1029
+ # Deletes the specified tags associated with an ML object. After this
1030
+ # operation is complete, you can't recover deleted tags.
1031
+ #
1032
+ # If you specify a tag that doesn't exist, Amazon ML ignores it.
1033
+ #
1034
+ # @option params [required, Array<String>] :tag_keys
1035
+ # One or more tags to delete.
1036
+ #
1037
+ # @option params [required, String] :resource_id
1038
+ # The ID of the tagged ML object. For example, `exampleModelId`.
1039
+ #
1040
+ # @option params [required, String] :resource_type
1041
+ # The type of the tagged ML object.
1042
+ #
1043
+ # @return [Types::DeleteTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1044
+ #
1045
+ # * {Types::DeleteTagsOutput#resource_id #resource_id} => String
1046
+ # * {Types::DeleteTagsOutput#resource_type #resource_type} => String
1047
+ #
1048
+ # @example Request syntax with placeholder values
1049
+ #
1050
+ # resp = client.delete_tags({
1051
+ # tag_keys: ["TagKey"], # required
1052
+ # resource_id: "EntityId", # required
1053
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
1054
+ # })
1055
+ #
1056
+ # @example Response structure
1057
+ #
1058
+ # resp.resource_id #=> String
1059
+ # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
1060
+ #
1061
+ # @overload delete_tags(params = {})
1062
+ # @param [Hash] params ({})
1063
+ def delete_tags(params = {}, options = {})
1064
+ req = build_request(:delete_tags, params)
1065
+ req.send_request(options)
1066
+ end
1069
1067
 
1070
- # Returns a list of `DataSource` that match the search criteria in the
1071
- # request.
1072
- # @option params [String] :filter_variable
1073
- # Use one of the following variables to filter a list of `DataSource`\:
1074
- #
1075
- # * `CreatedAt` - Sets the search criteria to `DataSource` creation
1076
- # dates.
1077
- # * `Status` - Sets the search criteria to `DataSource` statuses.
1078
- # * `Name` - Sets the search criteria to the contents of `DataSource`
1079
- # <b> </b> `Name`.
1080
- # * `DataUri` - Sets the search criteria to the URI of data files used
1081
- # to create the `DataSource`. The URI can identify either a file or an
1082
- # Amazon Simple Storage Service (Amazon S3) bucket or directory.
1083
- # * `IAMUser` - Sets the search criteria to the user account that
1084
- # invoked the `DataSource` creation.
1085
- # @option params [String] :eq
1086
- # The equal to operator. The `DataSource` results will have
1087
- # `FilterVariable` values that exactly match the value specified with
1088
- # `EQ`.
1089
- # @option params [String] :gt
1090
- # The greater than operator. The `DataSource` results will have
1091
- # `FilterVariable` values that are greater than the value specified with
1092
- # `GT`.
1093
- # @option params [String] :lt
1094
- # The less than operator. The `DataSource` results will have
1095
- # `FilterVariable` values that are less than the value specified with
1096
- # `LT`.
1097
- # @option params [String] :ge
1098
- # The greater than or equal to operator. The `DataSource` results will
1099
- # have `FilterVariable` values that are greater than or equal to the
1100
- # value specified with `GE`.
1101
- # @option params [String] :le
1102
- # The less than or equal to operator. The `DataSource` results will have
1103
- # `FilterVariable` values that are less than or equal to the value
1104
- # specified with `LE`.
1105
- # @option params [String] :ne
1106
- # The not equal to operator. The `DataSource` results will have
1107
- # `FilterVariable` values not equal to the value specified with `NE`.
1108
- # @option params [String] :prefix
1109
- # A string that is found at the beginning of a variable, such as `Name`
1110
- # or `Id`.
1111
- #
1112
- # For example, a `DataSource` could have the `Name`
1113
- # `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`,
1114
- # select `Name` for the `FilterVariable` and any of the following
1115
- # strings for the `Prefix`\:
1116
- #
1117
- # * 2014-09
1118
- #
1119
- # * 2014-09-09
1120
- #
1121
- # * 2014-09-09-Holiday
1122
- # @option params [String] :sort_order
1123
- # A two-value parameter that determines the sequence of the resulting
1124
- # list of `DataSource`.
1125
- #
1126
- # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1127
- # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1128
- #
1129
- # Results are sorted by `FilterVariable`.
1130
- # @option params [String] :next_token
1131
- # The ID of the page in the paginated results.
1132
- # @option params [Integer] :limit
1133
- # The maximum number of `DataSource` to include in the result.
1134
- # @return [Types::DescribeDataSourcesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1135
- #
1136
- # * {Types::DescribeDataSourcesOutput#results #Results} => Array&lt;Types::DataSource&gt;
1137
- # * {Types::DescribeDataSourcesOutput#next_token #NextToken} => String
1138
- #
1139
- # @example Request syntax with placeholder values
1140
- # resp = client.describe_data_sources({
1141
- # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, DataLocationS3, IAMUser
1142
- # eq: "ComparatorValue",
1143
- # gt: "ComparatorValue",
1144
- # lt: "ComparatorValue",
1145
- # ge: "ComparatorValue",
1146
- # le: "ComparatorValue",
1147
- # ne: "ComparatorValue",
1148
- # prefix: "ComparatorValue",
1149
- # sort_order: "asc", # accepts asc, dsc
1150
- # next_token: "StringType",
1151
- # limit: 1,
1152
- # })
1153
- #
1154
- # @example Response structure
1155
- # resp.results #=> Array
1156
- # resp.results[0].data_source_id #=> String
1157
- # resp.results[0].data_location_s3 #=> String
1158
- # resp.results[0].data_rearrangement #=> String
1159
- # resp.results[0].created_by_iam_user #=> String
1160
- # resp.results[0].created_at #=> Time
1161
- # resp.results[0].last_updated_at #=> Time
1162
- # resp.results[0].data_size_in_bytes #=> Integer
1163
- # resp.results[0].number_of_files #=> Integer
1164
- # resp.results[0].name #=> String
1165
- # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1166
- # resp.results[0].message #=> String
1167
- # resp.results[0].redshift_metadata.redshift_database.database_name #=> String
1168
- # resp.results[0].redshift_metadata.redshift_database.cluster_identifier #=> String
1169
- # resp.results[0].redshift_metadata.database_user_name #=> String
1170
- # resp.results[0].redshift_metadata.select_sql_query #=> String
1171
- # resp.results[0].rds_metadata.database.instance_identifier #=> String
1172
- # resp.results[0].rds_metadata.database.database_name #=> String
1173
- # resp.results[0].rds_metadata.database_user_name #=> String
1174
- # resp.results[0].rds_metadata.select_sql_query #=> String
1175
- # resp.results[0].rds_metadata.resource_role #=> String
1176
- # resp.results[0].rds_metadata.service_role #=> String
1177
- # resp.results[0].rds_metadata.data_pipeline_id #=> String
1178
- # resp.results[0].role_arn #=> String
1179
- # resp.results[0].compute_statistics #=> Boolean
1180
- # resp.results[0].compute_time #=> Integer
1181
- # resp.results[0].finished_at #=> Time
1182
- # resp.results[0].started_at #=> Time
1183
- # resp.next_token #=> String
1184
- # @overload describe_data_sources(params = {})
1185
- # @param [Hash] params ({})
1186
- def describe_data_sources(params = {}, options = {})
1187
- req = build_request(:describe_data_sources, params)
1188
- req.send_request(options)
1189
- end
1068
+ # Returns a list of `BatchPrediction` operations that match the search
1069
+ # criteria in the request.
1070
+ #
1071
+ # @option params [String] :filter_variable
1072
+ # Use one of the following variables to filter a list of
1073
+ # `BatchPrediction`\:
1074
+ #
1075
+ # * `CreatedAt` - Sets the search criteria to the `BatchPrediction`
1076
+ # creation date.
1077
+ # * `Status` - Sets the search criteria to the `BatchPrediction` status.
1078
+ # * `Name` - Sets the search criteria to the contents of the
1079
+ # `BatchPrediction`<b> </b> `Name`.
1080
+ # * `IAMUser` - Sets the search criteria to the user account that
1081
+ # invoked the `BatchPrediction` creation.
1082
+ # * `MLModelId` - Sets the search criteria to the `MLModel` used in the
1083
+ # `BatchPrediction`.
1084
+ # * `DataSourceId` - Sets the search criteria to the `DataSource` used
1085
+ # in the `BatchPrediction`.
1086
+ # * `DataURI` - Sets the search criteria to the data file(s) used in the
1087
+ # `BatchPrediction`. The URL can identify either a file or an Amazon
1088
+ # Simple Storage Solution (Amazon S3) bucket or directory.
1089
+ #
1090
+ # @option params [String] :eq
1091
+ # The equal to operator. The `BatchPrediction` results will have
1092
+ # `FilterVariable` values that exactly match the value specified with
1093
+ # `EQ`.
1094
+ #
1095
+ # @option params [String] :gt
1096
+ # The greater than operator. The `BatchPrediction` results will have
1097
+ # `FilterVariable` values that are greater than the value specified with
1098
+ # `GT`.
1099
+ #
1100
+ # @option params [String] :lt
1101
+ # The less than operator. The `BatchPrediction` results will have
1102
+ # `FilterVariable` values that are less than the value specified with
1103
+ # `LT`.
1104
+ #
1105
+ # @option params [String] :ge
1106
+ # The greater than or equal to operator. The `BatchPrediction` results
1107
+ # will have `FilterVariable` values that are greater than or equal to
1108
+ # the value specified with `GE`.
1109
+ #
1110
+ # @option params [String] :le
1111
+ # The less than or equal to operator. The `BatchPrediction` results will
1112
+ # have `FilterVariable` values that are less than or equal to the value
1113
+ # specified with `LE`.
1114
+ #
1115
+ # @option params [String] :ne
1116
+ # The not equal to operator. The `BatchPrediction` results will have
1117
+ # `FilterVariable` values not equal to the value specified with `NE`.
1118
+ #
1119
+ # @option params [String] :prefix
1120
+ # A string that is found at the beginning of a variable, such as `Name`
1121
+ # or `Id`.
1122
+ #
1123
+ # For example, a `Batch Prediction` operation could have the `Name`
1124
+ # `2014-09-09-HolidayGiftMailer`. To search for this `BatchPrediction`,
1125
+ # select `Name` for the `FilterVariable` and any of the following
1126
+ # strings for the `Prefix`\:
1127
+ #
1128
+ # * 2014-09
1129
+ #
1130
+ # * 2014-09-09
1131
+ #
1132
+ # * 2014-09-09-Holiday
1133
+ #
1134
+ # @option params [String] :sort_order
1135
+ # A two-value parameter that determines the sequence of the resulting
1136
+ # list of `MLModel`s.
1137
+ #
1138
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1139
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1140
+ #
1141
+ # Results are sorted by `FilterVariable`.
1142
+ #
1143
+ # @option params [String] :next_token
1144
+ # An ID of the page in the paginated results.
1145
+ #
1146
+ # @option params [Integer] :limit
1147
+ # The number of pages of information to include in the result. The range
1148
+ # of acceptable values is `1` through `100`. The default value is `100`.
1149
+ #
1150
+ # @return [Types::DescribeBatchPredictionsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1151
+ #
1152
+ # * {Types::DescribeBatchPredictionsOutput#results #results} => Array&lt;Types::BatchPrediction&gt;
1153
+ # * {Types::DescribeBatchPredictionsOutput#next_token #next_token} => String
1154
+ #
1155
+ # @example Request syntax with placeholder values
1156
+ #
1157
+ # resp = client.describe_batch_predictions({
1158
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1159
+ # eq: "ComparatorValue",
1160
+ # gt: "ComparatorValue",
1161
+ # lt: "ComparatorValue",
1162
+ # ge: "ComparatorValue",
1163
+ # le: "ComparatorValue",
1164
+ # ne: "ComparatorValue",
1165
+ # prefix: "ComparatorValue",
1166
+ # sort_order: "asc", # accepts asc, dsc
1167
+ # next_token: "StringType",
1168
+ # limit: 1,
1169
+ # })
1170
+ #
1171
+ # @example Response structure
1172
+ #
1173
+ # resp.results #=> Array
1174
+ # resp.results[0].batch_prediction_id #=> String
1175
+ # resp.results[0].ml_model_id #=> String
1176
+ # resp.results[0].batch_prediction_data_source_id #=> String
1177
+ # resp.results[0].input_data_location_s3 #=> String
1178
+ # resp.results[0].created_by_iam_user #=> String
1179
+ # resp.results[0].created_at #=> Time
1180
+ # resp.results[0].last_updated_at #=> Time
1181
+ # resp.results[0].name #=> String
1182
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1183
+ # resp.results[0].output_uri #=> String
1184
+ # resp.results[0].message #=> String
1185
+ # resp.results[0].compute_time #=> Integer
1186
+ # resp.results[0].finished_at #=> Time
1187
+ # resp.results[0].started_at #=> Time
1188
+ # resp.results[0].total_record_count #=> Integer
1189
+ # resp.results[0].invalid_record_count #=> Integer
1190
+ # resp.next_token #=> String
1191
+ #
1192
+ # @overload describe_batch_predictions(params = {})
1193
+ # @param [Hash] params ({})
1194
+ def describe_batch_predictions(params = {}, options = {})
1195
+ req = build_request(:describe_batch_predictions, params)
1196
+ req.send_request(options)
1197
+ end
1190
1198
 
1191
- # Returns a list of `DescribeEvaluations` that match the search criteria
1192
- # in the request.
1193
- # @option params [String] :filter_variable
1194
- # Use one of the following variable to filter a list of `Evaluation`
1195
- # objects:
1196
- #
1197
- # * `CreatedAt` - Sets the search criteria to the `Evaluation` creation
1198
- # date.
1199
- # * `Status` - Sets the search criteria to the `Evaluation` status.
1200
- # * `Name` - Sets the search criteria to the contents of `Evaluation`
1201
- # <b> </b> `Name`.
1202
- # * `IAMUser` - Sets the search criteria to the user account that
1203
- # invoked an `Evaluation`.
1204
- # * `MLModelId` - Sets the search criteria to the `MLModel` that was
1205
- # evaluated.
1206
- # * `DataSourceId` - Sets the search criteria to the `DataSource` used
1207
- # in `Evaluation`.
1208
- # * `DataUri` - Sets the search criteria to the data file(s) used in
1209
- # `Evaluation`. The URL can identify either a file or an Amazon Simple
1210
- # Storage Solution (Amazon S3) bucket or directory.
1211
- # @option params [String] :eq
1212
- # The equal to operator. The `Evaluation` results will have
1213
- # `FilterVariable` values that exactly match the value specified with
1214
- # `EQ`.
1215
- # @option params [String] :gt
1216
- # The greater than operator. The `Evaluation` results will have
1217
- # `FilterVariable` values that are greater than the value specified with
1218
- # `GT`.
1219
- # @option params [String] :lt
1220
- # The less than operator. The `Evaluation` results will have
1221
- # `FilterVariable` values that are less than the value specified with
1222
- # `LT`.
1223
- # @option params [String] :ge
1224
- # The greater than or equal to operator. The `Evaluation` results will
1225
- # have `FilterVariable` values that are greater than or equal to the
1226
- # value specified with `GE`.
1227
- # @option params [String] :le
1228
- # The less than or equal to operator. The `Evaluation` results will have
1229
- # `FilterVariable` values that are less than or equal to the value
1230
- # specified with `LE`.
1231
- # @option params [String] :ne
1232
- # The not equal to operator. The `Evaluation` results will have
1233
- # `FilterVariable` values not equal to the value specified with `NE`.
1234
- # @option params [String] :prefix
1235
- # A string that is found at the beginning of a variable, such as `Name`
1236
- # or `Id`.
1237
- #
1238
- # For example, an `Evaluation` could have the `Name`
1239
- # `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`,
1240
- # select `Name` for the `FilterVariable` and any of the following
1241
- # strings for the `Prefix`\:
1242
- #
1243
- # * 2014-09
1244
- #
1245
- # * 2014-09-09
1246
- #
1247
- # * 2014-09-09-Holiday
1248
- # @option params [String] :sort_order
1249
- # A two-value parameter that determines the sequence of the resulting
1250
- # list of `Evaluation`.
1251
- #
1252
- # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1253
- # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1254
- #
1255
- # Results are sorted by `FilterVariable`.
1256
- # @option params [String] :next_token
1257
- # The ID of the page in the paginated results.
1258
- # @option params [Integer] :limit
1259
- # The maximum number of `Evaluation` to include in the result.
1260
- # @return [Types::DescribeEvaluationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1261
- #
1262
- # * {Types::DescribeEvaluationsOutput#results #Results} => Array&lt;Types::Evaluation&gt;
1263
- # * {Types::DescribeEvaluationsOutput#next_token #NextToken} => String
1264
- #
1265
- # @example Request syntax with placeholder values
1266
- # resp = client.describe_evaluations({
1267
- # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1268
- # eq: "ComparatorValue",
1269
- # gt: "ComparatorValue",
1270
- # lt: "ComparatorValue",
1271
- # ge: "ComparatorValue",
1272
- # le: "ComparatorValue",
1273
- # ne: "ComparatorValue",
1274
- # prefix: "ComparatorValue",
1275
- # sort_order: "asc", # accepts asc, dsc
1276
- # next_token: "StringType",
1277
- # limit: 1,
1278
- # })
1279
- #
1280
- # @example Response structure
1281
- # resp.results #=> Array
1282
- # resp.results[0].evaluation_id #=> String
1283
- # resp.results[0].ml_model_id #=> String
1284
- # resp.results[0].evaluation_data_source_id #=> String
1285
- # resp.results[0].input_data_location_s3 #=> String
1286
- # resp.results[0].created_by_iam_user #=> String
1287
- # resp.results[0].created_at #=> Time
1288
- # resp.results[0].last_updated_at #=> Time
1289
- # resp.results[0].name #=> String
1290
- # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1291
- # resp.results[0].performance_metrics.properties #=> Hash
1292
- # resp.results[0].performance_metrics.properties["PerformanceMetricsPropertyKey"] #=> String
1293
- # resp.results[0].message #=> String
1294
- # resp.results[0].compute_time #=> Integer
1295
- # resp.results[0].finished_at #=> Time
1296
- # resp.results[0].started_at #=> Time
1297
- # resp.next_token #=> String
1298
- # @overload describe_evaluations(params = {})
1299
- # @param [Hash] params ({})
1300
- def describe_evaluations(params = {}, options = {})
1301
- req = build_request(:describe_evaluations, params)
1302
- req.send_request(options)
1303
- end
1199
+ # Returns a list of `DataSource` that match the search criteria in the
1200
+ # request.
1201
+ #
1202
+ # @option params [String] :filter_variable
1203
+ # Use one of the following variables to filter a list of `DataSource`\:
1204
+ #
1205
+ # * `CreatedAt` - Sets the search criteria to `DataSource` creation
1206
+ # dates.
1207
+ # * `Status` - Sets the search criteria to `DataSource` statuses.
1208
+ # * `Name` - Sets the search criteria to the contents of `DataSource`
1209
+ # <b> </b> `Name`.
1210
+ # * `DataUri` - Sets the search criteria to the URI of data files used
1211
+ # to create the `DataSource`. The URI can identify either a file or an
1212
+ # Amazon Simple Storage Service (Amazon S3) bucket or directory.
1213
+ # * `IAMUser` - Sets the search criteria to the user account that
1214
+ # invoked the `DataSource` creation.
1215
+ #
1216
+ # @option params [String] :eq
1217
+ # The equal to operator. The `DataSource` results will have
1218
+ # `FilterVariable` values that exactly match the value specified with
1219
+ # `EQ`.
1220
+ #
1221
+ # @option params [String] :gt
1222
+ # The greater than operator. The `DataSource` results will have
1223
+ # `FilterVariable` values that are greater than the value specified with
1224
+ # `GT`.
1225
+ #
1226
+ # @option params [String] :lt
1227
+ # The less than operator. The `DataSource` results will have
1228
+ # `FilterVariable` values that are less than the value specified with
1229
+ # `LT`.
1230
+ #
1231
+ # @option params [String] :ge
1232
+ # The greater than or equal to operator. The `DataSource` results will
1233
+ # have `FilterVariable` values that are greater than or equal to the
1234
+ # value specified with `GE`.
1235
+ #
1236
+ # @option params [String] :le
1237
+ # The less than or equal to operator. The `DataSource` results will have
1238
+ # `FilterVariable` values that are less than or equal to the value
1239
+ # specified with `LE`.
1240
+ #
1241
+ # @option params [String] :ne
1242
+ # The not equal to operator. The `DataSource` results will have
1243
+ # `FilterVariable` values not equal to the value specified with `NE`.
1244
+ #
1245
+ # @option params [String] :prefix
1246
+ # A string that is found at the beginning of a variable, such as `Name`
1247
+ # or `Id`.
1248
+ #
1249
+ # For example, a `DataSource` could have the `Name`
1250
+ # `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`,
1251
+ # select `Name` for the `FilterVariable` and any of the following
1252
+ # strings for the `Prefix`\:
1253
+ #
1254
+ # * 2014-09
1255
+ #
1256
+ # * 2014-09-09
1257
+ #
1258
+ # * 2014-09-09-Holiday
1259
+ #
1260
+ # @option params [String] :sort_order
1261
+ # A two-value parameter that determines the sequence of the resulting
1262
+ # list of `DataSource`.
1263
+ #
1264
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1265
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1266
+ #
1267
+ # Results are sorted by `FilterVariable`.
1268
+ #
1269
+ # @option params [String] :next_token
1270
+ # The ID of the page in the paginated results.
1271
+ #
1272
+ # @option params [Integer] :limit
1273
+ # The maximum number of `DataSource` to include in the result.
1274
+ #
1275
+ # @return [Types::DescribeDataSourcesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1276
+ #
1277
+ # * {Types::DescribeDataSourcesOutput#results #results} => Array&lt;Types::DataSource&gt;
1278
+ # * {Types::DescribeDataSourcesOutput#next_token #next_token} => String
1279
+ #
1280
+ # @example Request syntax with placeholder values
1281
+ #
1282
+ # resp = client.describe_data_sources({
1283
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, DataLocationS3, IAMUser
1284
+ # eq: "ComparatorValue",
1285
+ # gt: "ComparatorValue",
1286
+ # lt: "ComparatorValue",
1287
+ # ge: "ComparatorValue",
1288
+ # le: "ComparatorValue",
1289
+ # ne: "ComparatorValue",
1290
+ # prefix: "ComparatorValue",
1291
+ # sort_order: "asc", # accepts asc, dsc
1292
+ # next_token: "StringType",
1293
+ # limit: 1,
1294
+ # })
1295
+ #
1296
+ # @example Response structure
1297
+ #
1298
+ # resp.results #=> Array
1299
+ # resp.results[0].data_source_id #=> String
1300
+ # resp.results[0].data_location_s3 #=> String
1301
+ # resp.results[0].data_rearrangement #=> String
1302
+ # resp.results[0].created_by_iam_user #=> String
1303
+ # resp.results[0].created_at #=> Time
1304
+ # resp.results[0].last_updated_at #=> Time
1305
+ # resp.results[0].data_size_in_bytes #=> Integer
1306
+ # resp.results[0].number_of_files #=> Integer
1307
+ # resp.results[0].name #=> String
1308
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1309
+ # resp.results[0].message #=> String
1310
+ # resp.results[0].redshift_metadata.redshift_database.database_name #=> String
1311
+ # resp.results[0].redshift_metadata.redshift_database.cluster_identifier #=> String
1312
+ # resp.results[0].redshift_metadata.database_user_name #=> String
1313
+ # resp.results[0].redshift_metadata.select_sql_query #=> String
1314
+ # resp.results[0].rds_metadata.database.instance_identifier #=> String
1315
+ # resp.results[0].rds_metadata.database.database_name #=> String
1316
+ # resp.results[0].rds_metadata.database_user_name #=> String
1317
+ # resp.results[0].rds_metadata.select_sql_query #=> String
1318
+ # resp.results[0].rds_metadata.resource_role #=> String
1319
+ # resp.results[0].rds_metadata.service_role #=> String
1320
+ # resp.results[0].rds_metadata.data_pipeline_id #=> String
1321
+ # resp.results[0].role_arn #=> String
1322
+ # resp.results[0].compute_statistics #=> Boolean
1323
+ # resp.results[0].compute_time #=> Integer
1324
+ # resp.results[0].finished_at #=> Time
1325
+ # resp.results[0].started_at #=> Time
1326
+ # resp.next_token #=> String
1327
+ #
1328
+ # @overload describe_data_sources(params = {})
1329
+ # @param [Hash] params ({})
1330
+ def describe_data_sources(params = {}, options = {})
1331
+ req = build_request(:describe_data_sources, params)
1332
+ req.send_request(options)
1333
+ end
1304
1334
 
1305
- # Returns a list of `MLModel` that match the search criteria in the
1306
- # request.
1307
- # @option params [String] :filter_variable
1308
- # Use one of the following variables to filter a list of `MLModel`\:
1309
- #
1310
- # * `CreatedAt` - Sets the search criteria to `MLModel` creation date.
1311
- # * `Status` - Sets the search criteria to `MLModel` status.
1312
- # * `Name` - Sets the search criteria to the contents of `MLModel`<b>
1313
- # </b> `Name`.
1314
- # * `IAMUser` - Sets the search criteria to the user account that
1315
- # invoked the `MLModel` creation.
1316
- # * `TrainingDataSourceId` - Sets the search criteria to the
1317
- # `DataSource` used to train one or more `MLModel`.
1318
- # * `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel`
1319
- # real-time endpoint status.
1320
- # * `MLModelType` - Sets the search criteria to `MLModel` type: binary,
1321
- # regression, or multi-class.
1322
- # * `Algorithm` - Sets the search criteria to the algorithm that the
1323
- # `MLModel` uses.
1324
- # * `TrainingDataURI` - Sets the search criteria to the data file(s)
1325
- # used in training a `MLModel`. The URL can identify either a file or
1326
- # an Amazon Simple Storage Service (Amazon S3) bucket or directory.
1327
- # @option params [String] :eq
1328
- # The equal to operator. The `MLModel` results will have
1329
- # `FilterVariable` values that exactly match the value specified with
1330
- # `EQ`.
1331
- # @option params [String] :gt
1332
- # The greater than operator. The `MLModel` results will have
1333
- # `FilterVariable` values that are greater than the value specified with
1334
- # `GT`.
1335
- # @option params [String] :lt
1336
- # The less than operator. The `MLModel` results will have
1337
- # `FilterVariable` values that are less than the value specified with
1338
- # `LT`.
1339
- # @option params [String] :ge
1340
- # The greater than or equal to operator. The `MLModel` results will have
1341
- # `FilterVariable` values that are greater than or equal to the value
1342
- # specified with `GE`.
1343
- # @option params [String] :le
1344
- # The less than or equal to operator. The `MLModel` results will have
1345
- # `FilterVariable` values that are less than or equal to the value
1346
- # specified with `LE`.
1347
- # @option params [String] :ne
1348
- # The not equal to operator. The `MLModel` results will have
1349
- # `FilterVariable` values not equal to the value specified with `NE`.
1350
- # @option params [String] :prefix
1351
- # A string that is found at the beginning of a variable, such as `Name`
1352
- # or `Id`.
1353
- #
1354
- # For example, an `MLModel` could have the `Name`
1355
- # `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, select
1356
- # `Name` for the `FilterVariable` and any of the following strings for
1357
- # the `Prefix`\:
1358
- #
1359
- # * 2014-09
1360
- #
1361
- # * 2014-09-09
1362
- #
1363
- # * 2014-09-09-Holiday
1364
- # @option params [String] :sort_order
1365
- # A two-value parameter that determines the sequence of the resulting
1366
- # list of `MLModel`.
1367
- #
1368
- # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1369
- # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1370
- #
1371
- # Results are sorted by `FilterVariable`.
1372
- # @option params [String] :next_token
1373
- # The ID of the page in the paginated results.
1374
- # @option params [Integer] :limit
1375
- # The number of pages of information to include in the result. The range
1376
- # of acceptable values is `1` through `100`. The default value is `100`.
1377
- # @return [Types::DescribeMLModelsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1378
- #
1379
- # * {Types::DescribeMLModelsOutput#results #Results} => Array&lt;Types::MLModel&gt;
1380
- # * {Types::DescribeMLModelsOutput#next_token #NextToken} => String
1381
- #
1382
- # @example Request syntax with placeholder values
1383
- # resp = client.describe_ml_models({
1384
- # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, TrainingDataSourceId, RealtimeEndpointStatus, MLModelType, Algorithm, TrainingDataURI
1385
- # eq: "ComparatorValue",
1386
- # gt: "ComparatorValue",
1387
- # lt: "ComparatorValue",
1388
- # ge: "ComparatorValue",
1389
- # le: "ComparatorValue",
1390
- # ne: "ComparatorValue",
1391
- # prefix: "ComparatorValue",
1392
- # sort_order: "asc", # accepts asc, dsc
1393
- # next_token: "StringType",
1394
- # limit: 1,
1395
- # })
1396
- #
1397
- # @example Response structure
1398
- # resp.results #=> Array
1399
- # resp.results[0].ml_model_id #=> String
1400
- # resp.results[0].training_data_source_id #=> String
1401
- # resp.results[0].created_by_iam_user #=> String
1402
- # resp.results[0].created_at #=> Time
1403
- # resp.results[0].last_updated_at #=> Time
1404
- # resp.results[0].name #=> String
1405
- # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1406
- # resp.results[0].size_in_bytes #=> Integer
1407
- # resp.results[0].endpoint_info.peak_requests_per_second #=> Integer
1408
- # resp.results[0].endpoint_info.created_at #=> Time
1409
- # resp.results[0].endpoint_info.endpoint_url #=> String
1410
- # resp.results[0].endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
1411
- # resp.results[0].training_parameters #=> Hash
1412
- # resp.results[0].training_parameters["StringType"] #=> String
1413
- # resp.results[0].input_data_location_s3 #=> String
1414
- # resp.results[0].algorithm #=> String, one of "sgd"
1415
- # resp.results[0].ml_model_type #=> String, one of "REGRESSION", "BINARY", "MULTICLASS"
1416
- # resp.results[0].score_threshold #=> Float
1417
- # resp.results[0].score_threshold_last_updated_at #=> Time
1418
- # resp.results[0].message #=> String
1419
- # resp.results[0].compute_time #=> Integer
1420
- # resp.results[0].finished_at #=> Time
1421
- # resp.results[0].started_at #=> Time
1422
- # resp.next_token #=> String
1423
- # @overload describe_ml_models(params = {})
1424
- # @param [Hash] params ({})
1425
- def describe_ml_models(params = {}, options = {})
1426
- req = build_request(:describe_ml_models, params)
1427
- req.send_request(options)
1428
- end
1335
+ # Returns a list of `DescribeEvaluations` that match the search criteria
1336
+ # in the request.
1337
+ #
1338
+ # @option params [String] :filter_variable
1339
+ # Use one of the following variable to filter a list of `Evaluation`
1340
+ # objects:
1341
+ #
1342
+ # * `CreatedAt` - Sets the search criteria to the `Evaluation` creation
1343
+ # date.
1344
+ # * `Status` - Sets the search criteria to the `Evaluation` status.
1345
+ # * `Name` - Sets the search criteria to the contents of `Evaluation`
1346
+ # <b> </b> `Name`.
1347
+ # * `IAMUser` - Sets the search criteria to the user account that
1348
+ # invoked an `Evaluation`.
1349
+ # * `MLModelId` - Sets the search criteria to the `MLModel` that was
1350
+ # evaluated.
1351
+ # * `DataSourceId` - Sets the search criteria to the `DataSource` used
1352
+ # in `Evaluation`.
1353
+ # * `DataUri` - Sets the search criteria to the data file(s) used in
1354
+ # `Evaluation`. The URL can identify either a file or an Amazon Simple
1355
+ # Storage Solution (Amazon S3) bucket or directory.
1356
+ #
1357
+ # @option params [String] :eq
1358
+ # The equal to operator. The `Evaluation` results will have
1359
+ # `FilterVariable` values that exactly match the value specified with
1360
+ # `EQ`.
1361
+ #
1362
+ # @option params [String] :gt
1363
+ # The greater than operator. The `Evaluation` results will have
1364
+ # `FilterVariable` values that are greater than the value specified with
1365
+ # `GT`.
1366
+ #
1367
+ # @option params [String] :lt
1368
+ # The less than operator. The `Evaluation` results will have
1369
+ # `FilterVariable` values that are less than the value specified with
1370
+ # `LT`.
1371
+ #
1372
+ # @option params [String] :ge
1373
+ # The greater than or equal to operator. The `Evaluation` results will
1374
+ # have `FilterVariable` values that are greater than or equal to the
1375
+ # value specified with `GE`.
1376
+ #
1377
+ # @option params [String] :le
1378
+ # The less than or equal to operator. The `Evaluation` results will have
1379
+ # `FilterVariable` values that are less than or equal to the value
1380
+ # specified with `LE`.
1381
+ #
1382
+ # @option params [String] :ne
1383
+ # The not equal to operator. The `Evaluation` results will have
1384
+ # `FilterVariable` values not equal to the value specified with `NE`.
1385
+ #
1386
+ # @option params [String] :prefix
1387
+ # A string that is found at the beginning of a variable, such as `Name`
1388
+ # or `Id`.
1389
+ #
1390
+ # For example, an `Evaluation` could have the `Name`
1391
+ # `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`,
1392
+ # select `Name` for the `FilterVariable` and any of the following
1393
+ # strings for the `Prefix`\:
1394
+ #
1395
+ # * 2014-09
1396
+ #
1397
+ # * 2014-09-09
1398
+ #
1399
+ # * 2014-09-09-Holiday
1400
+ #
1401
+ # @option params [String] :sort_order
1402
+ # A two-value parameter that determines the sequence of the resulting
1403
+ # list of `Evaluation`.
1404
+ #
1405
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1406
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1407
+ #
1408
+ # Results are sorted by `FilterVariable`.
1409
+ #
1410
+ # @option params [String] :next_token
1411
+ # The ID of the page in the paginated results.
1412
+ #
1413
+ # @option params [Integer] :limit
1414
+ # The maximum number of `Evaluation` to include in the result.
1415
+ #
1416
+ # @return [Types::DescribeEvaluationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1417
+ #
1418
+ # * {Types::DescribeEvaluationsOutput#results #results} => Array&lt;Types::Evaluation&gt;
1419
+ # * {Types::DescribeEvaluationsOutput#next_token #next_token} => String
1420
+ #
1421
+ # @example Request syntax with placeholder values
1422
+ #
1423
+ # resp = client.describe_evaluations({
1424
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1425
+ # eq: "ComparatorValue",
1426
+ # gt: "ComparatorValue",
1427
+ # lt: "ComparatorValue",
1428
+ # ge: "ComparatorValue",
1429
+ # le: "ComparatorValue",
1430
+ # ne: "ComparatorValue",
1431
+ # prefix: "ComparatorValue",
1432
+ # sort_order: "asc", # accepts asc, dsc
1433
+ # next_token: "StringType",
1434
+ # limit: 1,
1435
+ # })
1436
+ #
1437
+ # @example Response structure
1438
+ #
1439
+ # resp.results #=> Array
1440
+ # resp.results[0].evaluation_id #=> String
1441
+ # resp.results[0].ml_model_id #=> String
1442
+ # resp.results[0].evaluation_data_source_id #=> String
1443
+ # resp.results[0].input_data_location_s3 #=> String
1444
+ # resp.results[0].created_by_iam_user #=> String
1445
+ # resp.results[0].created_at #=> Time
1446
+ # resp.results[0].last_updated_at #=> Time
1447
+ # resp.results[0].name #=> String
1448
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1449
+ # resp.results[0].performance_metrics.properties #=> Hash
1450
+ # resp.results[0].performance_metrics.properties["PerformanceMetricsPropertyKey"] #=> String
1451
+ # resp.results[0].message #=> String
1452
+ # resp.results[0].compute_time #=> Integer
1453
+ # resp.results[0].finished_at #=> Time
1454
+ # resp.results[0].started_at #=> Time
1455
+ # resp.next_token #=> String
1456
+ #
1457
+ # @overload describe_evaluations(params = {})
1458
+ # @param [Hash] params ({})
1459
+ def describe_evaluations(params = {}, options = {})
1460
+ req = build_request(:describe_evaluations, params)
1461
+ req.send_request(options)
1462
+ end
1429
1463
 
1430
- # Describes one or more of the tags for your Amazon ML object.
1431
- # @option params [required, String] :resource_id
1432
- # The ID of the ML object. For example, `exampleModelId`.
1433
- # @option params [required, String] :resource_type
1434
- # The type of the ML object.
1435
- # @return [Types::DescribeTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1436
- #
1437
- # * {Types::DescribeTagsOutput#resource_id #ResourceId} => String
1438
- # * {Types::DescribeTagsOutput#resource_type #ResourceType} => String
1439
- # * {Types::DescribeTagsOutput#tags #Tags} => Array&lt;Types::Tag&gt;
1440
- #
1441
- # @example Request syntax with placeholder values
1442
- # resp = client.describe_tags({
1443
- # resource_id: "EntityId", # required
1444
- # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
1445
- # })
1446
- #
1447
- # @example Response structure
1448
- # resp.resource_id #=> String
1449
- # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
1450
- # resp.tags #=> Array
1451
- # resp.tags[0].key #=> String
1452
- # resp.tags[0].value #=> String
1453
- # @overload describe_tags(params = {})
1454
- # @param [Hash] params ({})
1455
- def describe_tags(params = {}, options = {})
1456
- req = build_request(:describe_tags, params)
1457
- req.send_request(options)
1458
- end
1464
+ # Returns a list of `MLModel` that match the search criteria in the
1465
+ # request.
1466
+ #
1467
+ # @option params [String] :filter_variable
1468
+ # Use one of the following variables to filter a list of `MLModel`\:
1469
+ #
1470
+ # * `CreatedAt` - Sets the search criteria to `MLModel` creation date.
1471
+ # * `Status` - Sets the search criteria to `MLModel` status.
1472
+ # * `Name` - Sets the search criteria to the contents of `MLModel`<b>
1473
+ # </b> `Name`.
1474
+ # * `IAMUser` - Sets the search criteria to the user account that
1475
+ # invoked the `MLModel` creation.
1476
+ # * `TrainingDataSourceId` - Sets the search criteria to the
1477
+ # `DataSource` used to train one or more `MLModel`.
1478
+ # * `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel`
1479
+ # real-time endpoint status.
1480
+ # * `MLModelType` - Sets the search criteria to `MLModel` type: binary,
1481
+ # regression, or multi-class.
1482
+ # * `Algorithm` - Sets the search criteria to the algorithm that the
1483
+ # `MLModel` uses.
1484
+ # * `TrainingDataURI` - Sets the search criteria to the data file(s)
1485
+ # used in training a `MLModel`. The URL can identify either a file or
1486
+ # an Amazon Simple Storage Service (Amazon S3) bucket or directory.
1487
+ #
1488
+ # @option params [String] :eq
1489
+ # The equal to operator. The `MLModel` results will have
1490
+ # `FilterVariable` values that exactly match the value specified with
1491
+ # `EQ`.
1492
+ #
1493
+ # @option params [String] :gt
1494
+ # The greater than operator. The `MLModel` results will have
1495
+ # `FilterVariable` values that are greater than the value specified with
1496
+ # `GT`.
1497
+ #
1498
+ # @option params [String] :lt
1499
+ # The less than operator. The `MLModel` results will have
1500
+ # `FilterVariable` values that are less than the value specified with
1501
+ # `LT`.
1502
+ #
1503
+ # @option params [String] :ge
1504
+ # The greater than or equal to operator. The `MLModel` results will have
1505
+ # `FilterVariable` values that are greater than or equal to the value
1506
+ # specified with `GE`.
1507
+ #
1508
+ # @option params [String] :le
1509
+ # The less than or equal to operator. The `MLModel` results will have
1510
+ # `FilterVariable` values that are less than or equal to the value
1511
+ # specified with `LE`.
1512
+ #
1513
+ # @option params [String] :ne
1514
+ # The not equal to operator. The `MLModel` results will have
1515
+ # `FilterVariable` values not equal to the value specified with `NE`.
1516
+ #
1517
+ # @option params [String] :prefix
1518
+ # A string that is found at the beginning of a variable, such as `Name`
1519
+ # or `Id`.
1520
+ #
1521
+ # For example, an `MLModel` could have the `Name`
1522
+ # `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, select
1523
+ # `Name` for the `FilterVariable` and any of the following strings for
1524
+ # the `Prefix`\:
1525
+ #
1526
+ # * 2014-09
1527
+ #
1528
+ # * 2014-09-09
1529
+ #
1530
+ # * 2014-09-09-Holiday
1531
+ #
1532
+ # @option params [String] :sort_order
1533
+ # A two-value parameter that determines the sequence of the resulting
1534
+ # list of `MLModel`.
1535
+ #
1536
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1537
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1538
+ #
1539
+ # Results are sorted by `FilterVariable`.
1540
+ #
1541
+ # @option params [String] :next_token
1542
+ # The ID of the page in the paginated results.
1543
+ #
1544
+ # @option params [Integer] :limit
1545
+ # The number of pages of information to include in the result. The range
1546
+ # of acceptable values is `1` through `100`. The default value is `100`.
1547
+ #
1548
+ # @return [Types::DescribeMLModelsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1549
+ #
1550
+ # * {Types::DescribeMLModelsOutput#results #results} => Array&lt;Types::MLModel&gt;
1551
+ # * {Types::DescribeMLModelsOutput#next_token #next_token} => String
1552
+ #
1553
+ # @example Request syntax with placeholder values
1554
+ #
1555
+ # resp = client.describe_ml_models({
1556
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, TrainingDataSourceId, RealtimeEndpointStatus, MLModelType, Algorithm, TrainingDataURI
1557
+ # eq: "ComparatorValue",
1558
+ # gt: "ComparatorValue",
1559
+ # lt: "ComparatorValue",
1560
+ # ge: "ComparatorValue",
1561
+ # le: "ComparatorValue",
1562
+ # ne: "ComparatorValue",
1563
+ # prefix: "ComparatorValue",
1564
+ # sort_order: "asc", # accepts asc, dsc
1565
+ # next_token: "StringType",
1566
+ # limit: 1,
1567
+ # })
1568
+ #
1569
+ # @example Response structure
1570
+ #
1571
+ # resp.results #=> Array
1572
+ # resp.results[0].ml_model_id #=> String
1573
+ # resp.results[0].training_data_source_id #=> String
1574
+ # resp.results[0].created_by_iam_user #=> String
1575
+ # resp.results[0].created_at #=> Time
1576
+ # resp.results[0].last_updated_at #=> Time
1577
+ # resp.results[0].name #=> String
1578
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1579
+ # resp.results[0].size_in_bytes #=> Integer
1580
+ # resp.results[0].endpoint_info.peak_requests_per_second #=> Integer
1581
+ # resp.results[0].endpoint_info.created_at #=> Time
1582
+ # resp.results[0].endpoint_info.endpoint_url #=> String
1583
+ # resp.results[0].endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
1584
+ # resp.results[0].training_parameters #=> Hash
1585
+ # resp.results[0].training_parameters["StringType"] #=> String
1586
+ # resp.results[0].input_data_location_s3 #=> String
1587
+ # resp.results[0].algorithm #=> String, one of "sgd"
1588
+ # resp.results[0].ml_model_type #=> String, one of "REGRESSION", "BINARY", "MULTICLASS"
1589
+ # resp.results[0].score_threshold #=> Float
1590
+ # resp.results[0].score_threshold_last_updated_at #=> Time
1591
+ # resp.results[0].message #=> String
1592
+ # resp.results[0].compute_time #=> Integer
1593
+ # resp.results[0].finished_at #=> Time
1594
+ # resp.results[0].started_at #=> Time
1595
+ # resp.next_token #=> String
1596
+ #
1597
+ # @overload describe_ml_models(params = {})
1598
+ # @param [Hash] params ({})
1599
+ def describe_ml_models(params = {}, options = {})
1600
+ req = build_request(:describe_ml_models, params)
1601
+ req.send_request(options)
1602
+ end
1459
1603
 
1460
- # Returns a `BatchPrediction` that includes detailed metadata, status,
1461
- # and data file information for a `Batch Prediction` request.
1462
- # @option params [required, String] :batch_prediction_id
1463
- # An ID assigned to the `BatchPrediction` at creation.
1464
- # @return [Types::GetBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1465
- #
1466
- # * {Types::GetBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
1467
- # * {Types::GetBatchPredictionOutput#ml_model_id #MLModelId} => String
1468
- # * {Types::GetBatchPredictionOutput#batch_prediction_data_source_id #BatchPredictionDataSourceId} => String
1469
- # * {Types::GetBatchPredictionOutput#input_data_location_s3 #InputDataLocationS3} => String
1470
- # * {Types::GetBatchPredictionOutput#created_by_iam_user #CreatedByIamUser} => String
1471
- # * {Types::GetBatchPredictionOutput#created_at #CreatedAt} => Time
1472
- # * {Types::GetBatchPredictionOutput#last_updated_at #LastUpdatedAt} => Time
1473
- # * {Types::GetBatchPredictionOutput#name #Name} => String
1474
- # * {Types::GetBatchPredictionOutput#status #Status} => String
1475
- # * {Types::GetBatchPredictionOutput#output_uri #OutputUri} => String
1476
- # * {Types::GetBatchPredictionOutput#log_uri #LogUri} => String
1477
- # * {Types::GetBatchPredictionOutput#message #Message} => String
1478
- # * {Types::GetBatchPredictionOutput#compute_time #ComputeTime} => Integer
1479
- # * {Types::GetBatchPredictionOutput#finished_at #FinishedAt} => Time
1480
- # * {Types::GetBatchPredictionOutput#started_at #StartedAt} => Time
1481
- # * {Types::GetBatchPredictionOutput#total_record_count #TotalRecordCount} => Integer
1482
- # * {Types::GetBatchPredictionOutput#invalid_record_count #InvalidRecordCount} => Integer
1483
- #
1484
- # @example Request syntax with placeholder values
1485
- # resp = client.get_batch_prediction({
1486
- # batch_prediction_id: "EntityId", # required
1487
- # })
1488
- #
1489
- # @example Response structure
1490
- # resp.batch_prediction_id #=> String
1491
- # resp.ml_model_id #=> String
1492
- # resp.batch_prediction_data_source_id #=> String
1493
- # resp.input_data_location_s3 #=> String
1494
- # resp.created_by_iam_user #=> String
1495
- # resp.created_at #=> Time
1496
- # resp.last_updated_at #=> Time
1497
- # resp.name #=> String
1498
- # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1499
- # resp.output_uri #=> String
1500
- # resp.log_uri #=> String
1501
- # resp.message #=> String
1502
- # resp.compute_time #=> Integer
1503
- # resp.finished_at #=> Time
1504
- # resp.started_at #=> Time
1505
- # resp.total_record_count #=> Integer
1506
- # resp.invalid_record_count #=> Integer
1507
- # @overload get_batch_prediction(params = {})
1508
- # @param [Hash] params ({})
1509
- def get_batch_prediction(params = {}, options = {})
1510
- req = build_request(:get_batch_prediction, params)
1511
- req.send_request(options)
1512
- end
1604
+ # Describes one or more of the tags for your Amazon ML object.
1605
+ #
1606
+ # @option params [required, String] :resource_id
1607
+ # The ID of the ML object. For example, `exampleModelId`.
1608
+ #
1609
+ # @option params [required, String] :resource_type
1610
+ # The type of the ML object.
1611
+ #
1612
+ # @return [Types::DescribeTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1613
+ #
1614
+ # * {Types::DescribeTagsOutput#resource_id #resource_id} => String
1615
+ # * {Types::DescribeTagsOutput#resource_type #resource_type} => String
1616
+ # * {Types::DescribeTagsOutput#tags #tags} => Array&lt;Types::Tag&gt;
1617
+ #
1618
+ # @example Request syntax with placeholder values
1619
+ #
1620
+ # resp = client.describe_tags({
1621
+ # resource_id: "EntityId", # required
1622
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
1623
+ # })
1624
+ #
1625
+ # @example Response structure
1626
+ #
1627
+ # resp.resource_id #=> String
1628
+ # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
1629
+ # resp.tags #=> Array
1630
+ # resp.tags[0].key #=> String
1631
+ # resp.tags[0].value #=> String
1632
+ #
1633
+ # @overload describe_tags(params = {})
1634
+ # @param [Hash] params ({})
1635
+ def describe_tags(params = {}, options = {})
1636
+ req = build_request(:describe_tags, params)
1637
+ req.send_request(options)
1638
+ end
1513
1639
 
1514
- # Returns a `DataSource` that includes metadata and data file
1515
- # information, as well as the current status of the `DataSource`.
1516
- #
1517
- # `GetDataSource` provides results in normal or verbose format. The
1518
- # verbose format adds the schema description and the list of files
1519
- # pointed to by the DataSource to the normal format.
1520
- # @option params [required, String] :data_source_id
1521
- # The ID assigned to the `DataSource` at creation.
1522
- # @option params [Boolean] :verbose
1523
- # Specifies whether the `GetDataSource` operation should return
1524
- # `DataSourceSchema`.
1525
- #
1526
- # If true, `DataSourceSchema` is returned.
1527
- #
1528
- # If false, `DataSourceSchema` is not returned.
1529
- # @return [Types::GetDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1530
- #
1531
- # * {Types::GetDataSourceOutput#data_source_id #DataSourceId} => String
1532
- # * {Types::GetDataSourceOutput#data_location_s3 #DataLocationS3} => String
1533
- # * {Types::GetDataSourceOutput#data_rearrangement #DataRearrangement} => String
1534
- # * {Types::GetDataSourceOutput#created_by_iam_user #CreatedByIamUser} => String
1535
- # * {Types::GetDataSourceOutput#created_at #CreatedAt} => Time
1536
- # * {Types::GetDataSourceOutput#last_updated_at #LastUpdatedAt} => Time
1537
- # * {Types::GetDataSourceOutput#data_size_in_bytes #DataSizeInBytes} => Integer
1538
- # * {Types::GetDataSourceOutput#number_of_files #NumberOfFiles} => Integer
1539
- # * {Types::GetDataSourceOutput#name #Name} => String
1540
- # * {Types::GetDataSourceOutput#status #Status} => String
1541
- # * {Types::GetDataSourceOutput#log_uri #LogUri} => String
1542
- # * {Types::GetDataSourceOutput#message #Message} => String
1543
- # * {Types::GetDataSourceOutput#redshift_metadata #RedshiftMetadata} => Types::RedshiftMetadata
1544
- # * {Types::GetDataSourceOutput#rds_metadata #RDSMetadata} => Types::RDSMetadata
1545
- # * {Types::GetDataSourceOutput#role_arn #RoleARN} => String
1546
- # * {Types::GetDataSourceOutput#compute_statistics #ComputeStatistics} => Boolean
1547
- # * {Types::GetDataSourceOutput#compute_time #ComputeTime} => Integer
1548
- # * {Types::GetDataSourceOutput#finished_at #FinishedAt} => Time
1549
- # * {Types::GetDataSourceOutput#started_at #StartedAt} => Time
1550
- # * {Types::GetDataSourceOutput#data_source_schema #DataSourceSchema} => String
1551
- #
1552
- # @example Request syntax with placeholder values
1553
- # resp = client.get_data_source({
1554
- # data_source_id: "EntityId", # required
1555
- # verbose: false,
1556
- # })
1557
- #
1558
- # @example Response structure
1559
- # resp.data_source_id #=> String
1560
- # resp.data_location_s3 #=> String
1561
- # resp.data_rearrangement #=> String
1562
- # resp.created_by_iam_user #=> String
1563
- # resp.created_at #=> Time
1564
- # resp.last_updated_at #=> Time
1565
- # resp.data_size_in_bytes #=> Integer
1566
- # resp.number_of_files #=> Integer
1567
- # resp.name #=> String
1568
- # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1569
- # resp.log_uri #=> String
1570
- # resp.message #=> String
1571
- # resp.redshift_metadata.redshift_database.database_name #=> String
1572
- # resp.redshift_metadata.redshift_database.cluster_identifier #=> String
1573
- # resp.redshift_metadata.database_user_name #=> String
1574
- # resp.redshift_metadata.select_sql_query #=> String
1575
- # resp.rds_metadata.database.instance_identifier #=> String
1576
- # resp.rds_metadata.database.database_name #=> String
1577
- # resp.rds_metadata.database_user_name #=> String
1578
- # resp.rds_metadata.select_sql_query #=> String
1579
- # resp.rds_metadata.resource_role #=> String
1580
- # resp.rds_metadata.service_role #=> String
1581
- # resp.rds_metadata.data_pipeline_id #=> String
1582
- # resp.role_arn #=> String
1583
- # resp.compute_statistics #=> Boolean
1584
- # resp.compute_time #=> Integer
1585
- # resp.finished_at #=> Time
1586
- # resp.started_at #=> Time
1587
- # resp.data_source_schema #=> String
1588
- # @overload get_data_source(params = {})
1589
- # @param [Hash] params ({})
1590
- def get_data_source(params = {}, options = {})
1591
- req = build_request(:get_data_source, params)
1592
- req.send_request(options)
1593
- end
1640
+ # Returns a `BatchPrediction` that includes detailed metadata, status,
1641
+ # and data file information for a `Batch Prediction` request.
1642
+ #
1643
+ # @option params [required, String] :batch_prediction_id
1644
+ # An ID assigned to the `BatchPrediction` at creation.
1645
+ #
1646
+ # @return [Types::GetBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1647
+ #
1648
+ # * {Types::GetBatchPredictionOutput#batch_prediction_id #batch_prediction_id} => String
1649
+ # * {Types::GetBatchPredictionOutput#ml_model_id #ml_model_id} => String
1650
+ # * {Types::GetBatchPredictionOutput#batch_prediction_data_source_id #batch_prediction_data_source_id} => String
1651
+ # * {Types::GetBatchPredictionOutput#input_data_location_s3 #input_data_location_s3} => String
1652
+ # * {Types::GetBatchPredictionOutput#created_by_iam_user #created_by_iam_user} => String
1653
+ # * {Types::GetBatchPredictionOutput#created_at #created_at} => Time
1654
+ # * {Types::GetBatchPredictionOutput#last_updated_at #last_updated_at} => Time
1655
+ # * {Types::GetBatchPredictionOutput#name #name} => String
1656
+ # * {Types::GetBatchPredictionOutput#status #status} => String
1657
+ # * {Types::GetBatchPredictionOutput#output_uri #output_uri} => String
1658
+ # * {Types::GetBatchPredictionOutput#log_uri #log_uri} => String
1659
+ # * {Types::GetBatchPredictionOutput#message #message} => String
1660
+ # * {Types::GetBatchPredictionOutput#compute_time #compute_time} => Integer
1661
+ # * {Types::GetBatchPredictionOutput#finished_at #finished_at} => Time
1662
+ # * {Types::GetBatchPredictionOutput#started_at #started_at} => Time
1663
+ # * {Types::GetBatchPredictionOutput#total_record_count #total_record_count} => Integer
1664
+ # * {Types::GetBatchPredictionOutput#invalid_record_count #invalid_record_count} => Integer
1665
+ #
1666
+ # @example Request syntax with placeholder values
1667
+ #
1668
+ # resp = client.get_batch_prediction({
1669
+ # batch_prediction_id: "EntityId", # required
1670
+ # })
1671
+ #
1672
+ # @example Response structure
1673
+ #
1674
+ # resp.batch_prediction_id #=> String
1675
+ # resp.ml_model_id #=> String
1676
+ # resp.batch_prediction_data_source_id #=> String
1677
+ # resp.input_data_location_s3 #=> String
1678
+ # resp.created_by_iam_user #=> String
1679
+ # resp.created_at #=> Time
1680
+ # resp.last_updated_at #=> Time
1681
+ # resp.name #=> String
1682
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1683
+ # resp.output_uri #=> String
1684
+ # resp.log_uri #=> String
1685
+ # resp.message #=> String
1686
+ # resp.compute_time #=> Integer
1687
+ # resp.finished_at #=> Time
1688
+ # resp.started_at #=> Time
1689
+ # resp.total_record_count #=> Integer
1690
+ # resp.invalid_record_count #=> Integer
1691
+ #
1692
+ # @overload get_batch_prediction(params = {})
1693
+ # @param [Hash] params ({})
1694
+ def get_batch_prediction(params = {}, options = {})
1695
+ req = build_request(:get_batch_prediction, params)
1696
+ req.send_request(options)
1697
+ end
1594
1698
 
1595
- # Returns an `Evaluation` that includes metadata as well as the current
1596
- # status of the `Evaluation`.
1597
- # @option params [required, String] :evaluation_id
1598
- # The ID of the `Evaluation` to retrieve. The evaluation of each
1599
- # `MLModel` is recorded and cataloged. The ID provides the means to
1600
- # access the information.
1601
- # @return [Types::GetEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1602
- #
1603
- # * {Types::GetEvaluationOutput#evaluation_id #EvaluationId} => String
1604
- # * {Types::GetEvaluationOutput#ml_model_id #MLModelId} => String
1605
- # * {Types::GetEvaluationOutput#evaluation_data_source_id #EvaluationDataSourceId} => String
1606
- # * {Types::GetEvaluationOutput#input_data_location_s3 #InputDataLocationS3} => String
1607
- # * {Types::GetEvaluationOutput#created_by_iam_user #CreatedByIamUser} => String
1608
- # * {Types::GetEvaluationOutput#created_at #CreatedAt} => Time
1609
- # * {Types::GetEvaluationOutput#last_updated_at #LastUpdatedAt} => Time
1610
- # * {Types::GetEvaluationOutput#name #Name} => String
1611
- # * {Types::GetEvaluationOutput#status #Status} => String
1612
- # * {Types::GetEvaluationOutput#performance_metrics #PerformanceMetrics} => Types::PerformanceMetrics
1613
- # * {Types::GetEvaluationOutput#log_uri #LogUri} => String
1614
- # * {Types::GetEvaluationOutput#message #Message} => String
1615
- # * {Types::GetEvaluationOutput#compute_time #ComputeTime} => Integer
1616
- # * {Types::GetEvaluationOutput#finished_at #FinishedAt} => Time
1617
- # * {Types::GetEvaluationOutput#started_at #StartedAt} => Time
1618
- #
1619
- # @example Request syntax with placeholder values
1620
- # resp = client.get_evaluation({
1621
- # evaluation_id: "EntityId", # required
1622
- # })
1623
- #
1624
- # @example Response structure
1625
- # resp.evaluation_id #=> String
1626
- # resp.ml_model_id #=> String
1627
- # resp.evaluation_data_source_id #=> String
1628
- # resp.input_data_location_s3 #=> String
1629
- # resp.created_by_iam_user #=> String
1630
- # resp.created_at #=> Time
1631
- # resp.last_updated_at #=> Time
1632
- # resp.name #=> String
1633
- # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1634
- # resp.performance_metrics.properties #=> Hash
1635
- # resp.performance_metrics.properties["PerformanceMetricsPropertyKey"] #=> String
1636
- # resp.log_uri #=> String
1637
- # resp.message #=> String
1638
- # resp.compute_time #=> Integer
1639
- # resp.finished_at #=> Time
1640
- # resp.started_at #=> Time
1641
- # @overload get_evaluation(params = {})
1642
- # @param [Hash] params ({})
1643
- def get_evaluation(params = {}, options = {})
1644
- req = build_request(:get_evaluation, params)
1645
- req.send_request(options)
1646
- end
1699
+ # Returns a `DataSource` that includes metadata and data file
1700
+ # information, as well as the current status of the `DataSource`.
1701
+ #
1702
+ # `GetDataSource` provides results in normal or verbose format. The
1703
+ # verbose format adds the schema description and the list of files
1704
+ # pointed to by the DataSource to the normal format.
1705
+ #
1706
+ # @option params [required, String] :data_source_id
1707
+ # The ID assigned to the `DataSource` at creation.
1708
+ #
1709
+ # @option params [Boolean] :verbose
1710
+ # Specifies whether the `GetDataSource` operation should return
1711
+ # `DataSourceSchema`.
1712
+ #
1713
+ # If true, `DataSourceSchema` is returned.
1714
+ #
1715
+ # If false, `DataSourceSchema` is not returned.
1716
+ #
1717
+ # @return [Types::GetDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1718
+ #
1719
+ # * {Types::GetDataSourceOutput#data_source_id #data_source_id} => String
1720
+ # * {Types::GetDataSourceOutput#data_location_s3 #data_location_s3} => String
1721
+ # * {Types::GetDataSourceOutput#data_rearrangement #data_rearrangement} => String
1722
+ # * {Types::GetDataSourceOutput#created_by_iam_user #created_by_iam_user} => String
1723
+ # * {Types::GetDataSourceOutput#created_at #created_at} => Time
1724
+ # * {Types::GetDataSourceOutput#last_updated_at #last_updated_at} => Time
1725
+ # * {Types::GetDataSourceOutput#data_size_in_bytes #data_size_in_bytes} => Integer
1726
+ # * {Types::GetDataSourceOutput#number_of_files #number_of_files} => Integer
1727
+ # * {Types::GetDataSourceOutput#name #name} => String
1728
+ # * {Types::GetDataSourceOutput#status #status} => String
1729
+ # * {Types::GetDataSourceOutput#log_uri #log_uri} => String
1730
+ # * {Types::GetDataSourceOutput#message #message} => String
1731
+ # * {Types::GetDataSourceOutput#redshift_metadata #redshift_metadata} => Types::RedshiftMetadata
1732
+ # * {Types::GetDataSourceOutput#rds_metadata #rds_metadata} => Types::RDSMetadata
1733
+ # * {Types::GetDataSourceOutput#role_arn #role_arn} => String
1734
+ # * {Types::GetDataSourceOutput#compute_statistics #compute_statistics} => Boolean
1735
+ # * {Types::GetDataSourceOutput#compute_time #compute_time} => Integer
1736
+ # * {Types::GetDataSourceOutput#finished_at #finished_at} => Time
1737
+ # * {Types::GetDataSourceOutput#started_at #started_at} => Time
1738
+ # * {Types::GetDataSourceOutput#data_source_schema #data_source_schema} => String
1739
+ #
1740
+ # @example Request syntax with placeholder values
1741
+ #
1742
+ # resp = client.get_data_source({
1743
+ # data_source_id: "EntityId", # required
1744
+ # verbose: false,
1745
+ # })
1746
+ #
1747
+ # @example Response structure
1748
+ #
1749
+ # resp.data_source_id #=> String
1750
+ # resp.data_location_s3 #=> String
1751
+ # resp.data_rearrangement #=> String
1752
+ # resp.created_by_iam_user #=> String
1753
+ # resp.created_at #=> Time
1754
+ # resp.last_updated_at #=> Time
1755
+ # resp.data_size_in_bytes #=> Integer
1756
+ # resp.number_of_files #=> Integer
1757
+ # resp.name #=> String
1758
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1759
+ # resp.log_uri #=> String
1760
+ # resp.message #=> String
1761
+ # resp.redshift_metadata.redshift_database.database_name #=> String
1762
+ # resp.redshift_metadata.redshift_database.cluster_identifier #=> String
1763
+ # resp.redshift_metadata.database_user_name #=> String
1764
+ # resp.redshift_metadata.select_sql_query #=> String
1765
+ # resp.rds_metadata.database.instance_identifier #=> String
1766
+ # resp.rds_metadata.database.database_name #=> String
1767
+ # resp.rds_metadata.database_user_name #=> String
1768
+ # resp.rds_metadata.select_sql_query #=> String
1769
+ # resp.rds_metadata.resource_role #=> String
1770
+ # resp.rds_metadata.service_role #=> String
1771
+ # resp.rds_metadata.data_pipeline_id #=> String
1772
+ # resp.role_arn #=> String
1773
+ # resp.compute_statistics #=> Boolean
1774
+ # resp.compute_time #=> Integer
1775
+ # resp.finished_at #=> Time
1776
+ # resp.started_at #=> Time
1777
+ # resp.data_source_schema #=> String
1778
+ #
1779
+ # @overload get_data_source(params = {})
1780
+ # @param [Hash] params ({})
1781
+ def get_data_source(params = {}, options = {})
1782
+ req = build_request(:get_data_source, params)
1783
+ req.send_request(options)
1784
+ end
1647
1785
 
1648
- # Returns an `MLModel` that includes detailed metadata, data source
1649
- # information, and the current status of the `MLModel`.
1650
- #
1651
- # `GetMLModel` provides results in normal or verbose format.
1652
- # @option params [required, String] :ml_model_id
1653
- # The ID assigned to the `MLModel` at creation.
1654
- # @option params [Boolean] :verbose
1655
- # Specifies whether the `GetMLModel` operation should return `Recipe`.
1656
- #
1657
- # If true, `Recipe` is returned.
1658
- #
1659
- # If false, `Recipe` is not returned.
1660
- # @return [Types::GetMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1661
- #
1662
- # * {Types::GetMLModelOutput#ml_model_id #MLModelId} => String
1663
- # * {Types::GetMLModelOutput#training_data_source_id #TrainingDataSourceId} => String
1664
- # * {Types::GetMLModelOutput#created_by_iam_user #CreatedByIamUser} => String
1665
- # * {Types::GetMLModelOutput#created_at #CreatedAt} => Time
1666
- # * {Types::GetMLModelOutput#last_updated_at #LastUpdatedAt} => Time
1667
- # * {Types::GetMLModelOutput#name #Name} => String
1668
- # * {Types::GetMLModelOutput#status #Status} => String
1669
- # * {Types::GetMLModelOutput#size_in_bytes #SizeInBytes} => Integer
1670
- # * {Types::GetMLModelOutput#endpoint_info #EndpointInfo} => Types::RealtimeEndpointInfo
1671
- # * {Types::GetMLModelOutput#training_parameters #TrainingParameters} => Hash&lt;String,String&gt;
1672
- # * {Types::GetMLModelOutput#input_data_location_s3 #InputDataLocationS3} => String
1673
- # * {Types::GetMLModelOutput#ml_model_type #MLModelType} => String
1674
- # * {Types::GetMLModelOutput#score_threshold #ScoreThreshold} => Float
1675
- # * {Types::GetMLModelOutput#score_threshold_last_updated_at #ScoreThresholdLastUpdatedAt} => Time
1676
- # * {Types::GetMLModelOutput#log_uri #LogUri} => String
1677
- # * {Types::GetMLModelOutput#message #Message} => String
1678
- # * {Types::GetMLModelOutput#compute_time #ComputeTime} => Integer
1679
- # * {Types::GetMLModelOutput#finished_at #FinishedAt} => Time
1680
- # * {Types::GetMLModelOutput#started_at #StartedAt} => Time
1681
- # * {Types::GetMLModelOutput#recipe #Recipe} => String
1682
- # * {Types::GetMLModelOutput#schema #Schema} => String
1683
- #
1684
- # @example Request syntax with placeholder values
1685
- # resp = client.get_ml_model({
1686
- # ml_model_id: "EntityId", # required
1687
- # verbose: false,
1688
- # })
1689
- #
1690
- # @example Response structure
1691
- # resp.ml_model_id #=> String
1692
- # resp.training_data_source_id #=> String
1693
- # resp.created_by_iam_user #=> String
1694
- # resp.created_at #=> Time
1695
- # resp.last_updated_at #=> Time
1696
- # resp.name #=> String
1697
- # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1698
- # resp.size_in_bytes #=> Integer
1699
- # resp.endpoint_info.peak_requests_per_second #=> Integer
1700
- # resp.endpoint_info.created_at #=> Time
1701
- # resp.endpoint_info.endpoint_url #=> String
1702
- # resp.endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
1703
- # resp.training_parameters #=> Hash
1704
- # resp.training_parameters["StringType"] #=> String
1705
- # resp.input_data_location_s3 #=> String
1706
- # resp.ml_model_type #=> String, one of "REGRESSION", "BINARY", "MULTICLASS"
1707
- # resp.score_threshold #=> Float
1708
- # resp.score_threshold_last_updated_at #=> Time
1709
- # resp.log_uri #=> String
1710
- # resp.message #=> String
1711
- # resp.compute_time #=> Integer
1712
- # resp.finished_at #=> Time
1713
- # resp.started_at #=> Time
1714
- # resp.recipe #=> String
1715
- # resp.schema #=> String
1716
- # @overload get_ml_model(params = {})
1717
- # @param [Hash] params ({})
1718
- def get_ml_model(params = {}, options = {})
1719
- req = build_request(:get_ml_model, params)
1720
- req.send_request(options)
1721
- end
1786
+ # Returns an `Evaluation` that includes metadata as well as the current
1787
+ # status of the `Evaluation`.
1788
+ #
1789
+ # @option params [required, String] :evaluation_id
1790
+ # The ID of the `Evaluation` to retrieve. The evaluation of each
1791
+ # `MLModel` is recorded and cataloged. The ID provides the means to
1792
+ # access the information.
1793
+ #
1794
+ # @return [Types::GetEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1795
+ #
1796
+ # * {Types::GetEvaluationOutput#evaluation_id #evaluation_id} => String
1797
+ # * {Types::GetEvaluationOutput#ml_model_id #ml_model_id} => String
1798
+ # * {Types::GetEvaluationOutput#evaluation_data_source_id #evaluation_data_source_id} => String
1799
+ # * {Types::GetEvaluationOutput#input_data_location_s3 #input_data_location_s3} => String
1800
+ # * {Types::GetEvaluationOutput#created_by_iam_user #created_by_iam_user} => String
1801
+ # * {Types::GetEvaluationOutput#created_at #created_at} => Time
1802
+ # * {Types::GetEvaluationOutput#last_updated_at #last_updated_at} => Time
1803
+ # * {Types::GetEvaluationOutput#name #name} => String
1804
+ # * {Types::GetEvaluationOutput#status #status} => String
1805
+ # * {Types::GetEvaluationOutput#performance_metrics #performance_metrics} => Types::PerformanceMetrics
1806
+ # * {Types::GetEvaluationOutput#log_uri #log_uri} => String
1807
+ # * {Types::GetEvaluationOutput#message #message} => String
1808
+ # * {Types::GetEvaluationOutput#compute_time #compute_time} => Integer
1809
+ # * {Types::GetEvaluationOutput#finished_at #finished_at} => Time
1810
+ # * {Types::GetEvaluationOutput#started_at #started_at} => Time
1811
+ #
1812
+ # @example Request syntax with placeholder values
1813
+ #
1814
+ # resp = client.get_evaluation({
1815
+ # evaluation_id: "EntityId", # required
1816
+ # })
1817
+ #
1818
+ # @example Response structure
1819
+ #
1820
+ # resp.evaluation_id #=> String
1821
+ # resp.ml_model_id #=> String
1822
+ # resp.evaluation_data_source_id #=> String
1823
+ # resp.input_data_location_s3 #=> String
1824
+ # resp.created_by_iam_user #=> String
1825
+ # resp.created_at #=> Time
1826
+ # resp.last_updated_at #=> Time
1827
+ # resp.name #=> String
1828
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1829
+ # resp.performance_metrics.properties #=> Hash
1830
+ # resp.performance_metrics.properties["PerformanceMetricsPropertyKey"] #=> String
1831
+ # resp.log_uri #=> String
1832
+ # resp.message #=> String
1833
+ # resp.compute_time #=> Integer
1834
+ # resp.finished_at #=> Time
1835
+ # resp.started_at #=> Time
1836
+ #
1837
+ # @overload get_evaluation(params = {})
1838
+ # @param [Hash] params ({})
1839
+ def get_evaluation(params = {}, options = {})
1840
+ req = build_request(:get_evaluation, params)
1841
+ req.send_request(options)
1842
+ end
1722
1843
 
1723
- # Generates a prediction for the observation using the specified `ML
1724
- # Model`.
1725
- #
1726
- # <note markdown="1"><title>Note</title> Not all response parameters will be populated. Whether a response
1727
- # parameter is populated depends on the type of model requested.
1728
- #
1729
- # </note>
1730
- # @option params [required, String] :ml_model_id
1731
- # A unique identifier of the `MLModel`.
1732
- # @option params [required, Hash<String,String>] :record
1733
- # A map of variable name-value pairs that represent an observation.
1734
- # @option params [required, String] :predict_endpoint
1735
- # @return [Types::PredictOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1736
- #
1737
- # * {Types::PredictOutput#prediction #Prediction} => Types::Prediction
1738
- #
1739
- # @example Request syntax with placeholder values
1740
- # resp = client.predict({
1741
- # ml_model_id: "EntityId", # required
1742
- # record: { # required
1743
- # "VariableName" => "VariableValue",
1744
- # },
1745
- # predict_endpoint: "VipURL", # required
1746
- # })
1747
- #
1748
- # @example Response structure
1749
- # resp.prediction.predicted_label #=> String
1750
- # resp.prediction.predicted_value #=> Float
1751
- # resp.prediction.predicted_scores #=> Hash
1752
- # resp.prediction.predicted_scores["Label"] #=> Float
1753
- # resp.prediction.details #=> Hash
1754
- # resp.prediction.details["DetailsAttributes"] #=> String
1755
- # @overload predict(params = {})
1756
- # @param [Hash] params ({})
1757
- def predict(params = {}, options = {})
1758
- req = build_request(:predict, params)
1759
- req.send_request(options)
1760
- end
1844
+ # Returns an `MLModel` that includes detailed metadata, data source
1845
+ # information, and the current status of the `MLModel`.
1846
+ #
1847
+ # `GetMLModel` provides results in normal or verbose format.
1848
+ #
1849
+ # @option params [required, String] :ml_model_id
1850
+ # The ID assigned to the `MLModel` at creation.
1851
+ #
1852
+ # @option params [Boolean] :verbose
1853
+ # Specifies whether the `GetMLModel` operation should return `Recipe`.
1854
+ #
1855
+ # If true, `Recipe` is returned.
1856
+ #
1857
+ # If false, `Recipe` is not returned.
1858
+ #
1859
+ # @return [Types::GetMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1860
+ #
1861
+ # * {Types::GetMLModelOutput#ml_model_id #ml_model_id} => String
1862
+ # * {Types::GetMLModelOutput#training_data_source_id #training_data_source_id} => String
1863
+ # * {Types::GetMLModelOutput#created_by_iam_user #created_by_iam_user} => String
1864
+ # * {Types::GetMLModelOutput#created_at #created_at} => Time
1865
+ # * {Types::GetMLModelOutput#last_updated_at #last_updated_at} => Time
1866
+ # * {Types::GetMLModelOutput#name #name} => String
1867
+ # * {Types::GetMLModelOutput#status #status} => String
1868
+ # * {Types::GetMLModelOutput#size_in_bytes #size_in_bytes} => Integer
1869
+ # * {Types::GetMLModelOutput#endpoint_info #endpoint_info} => Types::RealtimeEndpointInfo
1870
+ # * {Types::GetMLModelOutput#training_parameters #training_parameters} => Hash&lt;String,String&gt;
1871
+ # * {Types::GetMLModelOutput#input_data_location_s3 #input_data_location_s3} => String
1872
+ # * {Types::GetMLModelOutput#ml_model_type #ml_model_type} => String
1873
+ # * {Types::GetMLModelOutput#score_threshold #score_threshold} => Float
1874
+ # * {Types::GetMLModelOutput#score_threshold_last_updated_at #score_threshold_last_updated_at} => Time
1875
+ # * {Types::GetMLModelOutput#log_uri #log_uri} => String
1876
+ # * {Types::GetMLModelOutput#message #message} => String
1877
+ # * {Types::GetMLModelOutput#compute_time #compute_time} => Integer
1878
+ # * {Types::GetMLModelOutput#finished_at #finished_at} => Time
1879
+ # * {Types::GetMLModelOutput#started_at #started_at} => Time
1880
+ # * {Types::GetMLModelOutput#recipe #recipe} => String
1881
+ # * {Types::GetMLModelOutput#schema #schema} => String
1882
+ #
1883
+ # @example Request syntax with placeholder values
1884
+ #
1885
+ # resp = client.get_ml_model({
1886
+ # ml_model_id: "EntityId", # required
1887
+ # verbose: false,
1888
+ # })
1889
+ #
1890
+ # @example Response structure
1891
+ #
1892
+ # resp.ml_model_id #=> String
1893
+ # resp.training_data_source_id #=> String
1894
+ # resp.created_by_iam_user #=> String
1895
+ # resp.created_at #=> Time
1896
+ # resp.last_updated_at #=> Time
1897
+ # resp.name #=> String
1898
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1899
+ # resp.size_in_bytes #=> Integer
1900
+ # resp.endpoint_info.peak_requests_per_second #=> Integer
1901
+ # resp.endpoint_info.created_at #=> Time
1902
+ # resp.endpoint_info.endpoint_url #=> String
1903
+ # resp.endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
1904
+ # resp.training_parameters #=> Hash
1905
+ # resp.training_parameters["StringType"] #=> String
1906
+ # resp.input_data_location_s3 #=> String
1907
+ # resp.ml_model_type #=> String, one of "REGRESSION", "BINARY", "MULTICLASS"
1908
+ # resp.score_threshold #=> Float
1909
+ # resp.score_threshold_last_updated_at #=> Time
1910
+ # resp.log_uri #=> String
1911
+ # resp.message #=> String
1912
+ # resp.compute_time #=> Integer
1913
+ # resp.finished_at #=> Time
1914
+ # resp.started_at #=> Time
1915
+ # resp.recipe #=> String
1916
+ # resp.schema #=> String
1917
+ #
1918
+ # @overload get_ml_model(params = {})
1919
+ # @param [Hash] params ({})
1920
+ def get_ml_model(params = {}, options = {})
1921
+ req = build_request(:get_ml_model, params)
1922
+ req.send_request(options)
1923
+ end
1761
1924
 
1762
- # Updates the `BatchPredictionName` of a `BatchPrediction`.
1763
- #
1764
- # You can use the `GetBatchPrediction` operation to view the contents of
1765
- # the updated data element.
1766
- # @option params [required, String] :batch_prediction_id
1767
- # The ID assigned to the `BatchPrediction` during creation.
1768
- # @option params [required, String] :batch_prediction_name
1769
- # A new user-supplied name or description of the `BatchPrediction`.
1770
- # @return [Types::UpdateBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1771
- #
1772
- # * {Types::UpdateBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
1773
- #
1774
- # @example Request syntax with placeholder values
1775
- # resp = client.update_batch_prediction({
1776
- # batch_prediction_id: "EntityId", # required
1777
- # batch_prediction_name: "EntityName", # required
1778
- # })
1779
- #
1780
- # @example Response structure
1781
- # resp.batch_prediction_id #=> String
1782
- # @overload update_batch_prediction(params = {})
1783
- # @param [Hash] params ({})
1784
- def update_batch_prediction(params = {}, options = {})
1785
- req = build_request(:update_batch_prediction, params)
1786
- req.send_request(options)
1787
- end
1925
+ # Generates a prediction for the observation using the specified `ML
1926
+ # Model`.
1927
+ #
1928
+ # <note markdown="1"><title>Note</title> Not all response parameters will be populated. Whether a response
1929
+ # parameter is populated depends on the type of model requested.
1930
+ #
1931
+ # </note>
1932
+ #
1933
+ # @option params [required, String] :ml_model_id
1934
+ # A unique identifier of the `MLModel`.
1935
+ #
1936
+ # @option params [required, Hash<String,String>] :record
1937
+ # A map of variable name-value pairs that represent an observation.
1938
+ #
1939
+ # @option params [required, String] :predict_endpoint
1940
+ #
1941
+ # @return [Types::PredictOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1942
+ #
1943
+ # * {Types::PredictOutput#prediction #prediction} => Types::Prediction
1944
+ #
1945
+ # @example Request syntax with placeholder values
1946
+ #
1947
+ # resp = client.predict({
1948
+ # ml_model_id: "EntityId", # required
1949
+ # record: { # required
1950
+ # "VariableName" => "VariableValue",
1951
+ # },
1952
+ # predict_endpoint: "VipURL", # required
1953
+ # })
1954
+ #
1955
+ # @example Response structure
1956
+ #
1957
+ # resp.prediction.predicted_label #=> String
1958
+ # resp.prediction.predicted_value #=> Float
1959
+ # resp.prediction.predicted_scores #=> Hash
1960
+ # resp.prediction.predicted_scores["Label"] #=> Float
1961
+ # resp.prediction.details #=> Hash
1962
+ # resp.prediction.details["DetailsAttributes"] #=> String
1963
+ #
1964
+ # @overload predict(params = {})
1965
+ # @param [Hash] params ({})
1966
+ def predict(params = {}, options = {})
1967
+ req = build_request(:predict, params)
1968
+ req.send_request(options)
1969
+ end
1788
1970
 
1789
- # Updates the `DataSourceName` of a `DataSource`.
1790
- #
1791
- # You can use the `GetDataSource` operation to view the contents of the
1792
- # updated data element.
1793
- # @option params [required, String] :data_source_id
1794
- # The ID assigned to the `DataSource` during creation.
1795
- # @option params [required, String] :data_source_name
1796
- # A new user-supplied name or description of the `DataSource` that will
1797
- # replace the current description.
1798
- # @return [Types::UpdateDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1799
- #
1800
- # * {Types::UpdateDataSourceOutput#data_source_id #DataSourceId} => String
1801
- #
1802
- # @example Request syntax with placeholder values
1803
- # resp = client.update_data_source({
1804
- # data_source_id: "EntityId", # required
1805
- # data_source_name: "EntityName", # required
1806
- # })
1807
- #
1808
- # @example Response structure
1809
- # resp.data_source_id #=> String
1810
- # @overload update_data_source(params = {})
1811
- # @param [Hash] params ({})
1812
- def update_data_source(params = {}, options = {})
1813
- req = build_request(:update_data_source, params)
1814
- req.send_request(options)
1815
- end
1971
+ # Updates the `BatchPredictionName` of a `BatchPrediction`.
1972
+ #
1973
+ # You can use the `GetBatchPrediction` operation to view the contents of
1974
+ # the updated data element.
1975
+ #
1976
+ # @option params [required, String] :batch_prediction_id
1977
+ # The ID assigned to the `BatchPrediction` during creation.
1978
+ #
1979
+ # @option params [required, String] :batch_prediction_name
1980
+ # A new user-supplied name or description of the `BatchPrediction`.
1981
+ #
1982
+ # @return [Types::UpdateBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1983
+ #
1984
+ # * {Types::UpdateBatchPredictionOutput#batch_prediction_id #batch_prediction_id} => String
1985
+ #
1986
+ # @example Request syntax with placeholder values
1987
+ #
1988
+ # resp = client.update_batch_prediction({
1989
+ # batch_prediction_id: "EntityId", # required
1990
+ # batch_prediction_name: "EntityName", # required
1991
+ # })
1992
+ #
1993
+ # @example Response structure
1994
+ #
1995
+ # resp.batch_prediction_id #=> String
1996
+ #
1997
+ # @overload update_batch_prediction(params = {})
1998
+ # @param [Hash] params ({})
1999
+ def update_batch_prediction(params = {}, options = {})
2000
+ req = build_request(:update_batch_prediction, params)
2001
+ req.send_request(options)
2002
+ end
1816
2003
 
1817
- # Updates the `EvaluationName` of an `Evaluation`.
1818
- #
1819
- # You can use the `GetEvaluation` operation to view the contents of the
1820
- # updated data element.
1821
- # @option params [required, String] :evaluation_id
1822
- # The ID assigned to the `Evaluation` during creation.
1823
- # @option params [required, String] :evaluation_name
1824
- # A new user-supplied name or description of the `Evaluation` that will
1825
- # replace the current content.
1826
- # @return [Types::UpdateEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1827
- #
1828
- # * {Types::UpdateEvaluationOutput#evaluation_id #EvaluationId} => String
1829
- #
1830
- # @example Request syntax with placeholder values
1831
- # resp = client.update_evaluation({
1832
- # evaluation_id: "EntityId", # required
1833
- # evaluation_name: "EntityName", # required
1834
- # })
1835
- #
1836
- # @example Response structure
1837
- # resp.evaluation_id #=> String
1838
- # @overload update_evaluation(params = {})
1839
- # @param [Hash] params ({})
1840
- def update_evaluation(params = {}, options = {})
1841
- req = build_request(:update_evaluation, params)
1842
- req.send_request(options)
1843
- end
2004
+ # Updates the `DataSourceName` of a `DataSource`.
2005
+ #
2006
+ # You can use the `GetDataSource` operation to view the contents of the
2007
+ # updated data element.
2008
+ #
2009
+ # @option params [required, String] :data_source_id
2010
+ # The ID assigned to the `DataSource` during creation.
2011
+ #
2012
+ # @option params [required, String] :data_source_name
2013
+ # A new user-supplied name or description of the `DataSource` that will
2014
+ # replace the current description.
2015
+ #
2016
+ # @return [Types::UpdateDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2017
+ #
2018
+ # * {Types::UpdateDataSourceOutput#data_source_id #data_source_id} => String
2019
+ #
2020
+ # @example Request syntax with placeholder values
2021
+ #
2022
+ # resp = client.update_data_source({
2023
+ # data_source_id: "EntityId", # required
2024
+ # data_source_name: "EntityName", # required
2025
+ # })
2026
+ #
2027
+ # @example Response structure
2028
+ #
2029
+ # resp.data_source_id #=> String
2030
+ #
2031
+ # @overload update_data_source(params = {})
2032
+ # @param [Hash] params ({})
2033
+ def update_data_source(params = {}, options = {})
2034
+ req = build_request(:update_data_source, params)
2035
+ req.send_request(options)
2036
+ end
1844
2037
 
1845
- # Updates the `MLModelName` and the `ScoreThreshold` of an `MLModel`.
1846
- #
1847
- # You can use the `GetMLModel` operation to view the contents of the
1848
- # updated data element.
1849
- # @option params [required, String] :ml_model_id
1850
- # The ID assigned to the `MLModel` during creation.
1851
- # @option params [String] :ml_model_name
1852
- # A user-supplied name or description of the `MLModel`.
1853
- # @option params [Float] :score_threshold
1854
- # The `ScoreThreshold` used in binary classification `MLModel` that
1855
- # marks the boundary between a positive prediction and a negative
1856
- # prediction.
1857
- #
1858
- # Output values greater than or equal to the `ScoreThreshold` receive a
1859
- # positive result from the `MLModel`, such as `true`. Output values less
1860
- # than the `ScoreThreshold` receive a negative response from the
1861
- # `MLModel`, such as `false`.
1862
- # @return [Types::UpdateMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1863
- #
1864
- # * {Types::UpdateMLModelOutput#ml_model_id #MLModelId} => String
1865
- #
1866
- # @example Request syntax with placeholder values
1867
- # resp = client.update_ml_model({
1868
- # ml_model_id: "EntityId", # required
1869
- # ml_model_name: "EntityName",
1870
- # score_threshold: 1.0,
1871
- # })
1872
- #
1873
- # @example Response structure
1874
- # resp.ml_model_id #=> String
1875
- # @overload update_ml_model(params = {})
1876
- # @param [Hash] params ({})
1877
- def update_ml_model(params = {}, options = {})
1878
- req = build_request(:update_ml_model, params)
1879
- req.send_request(options)
1880
- end
2038
+ # Updates the `EvaluationName` of an `Evaluation`.
2039
+ #
2040
+ # You can use the `GetEvaluation` operation to view the contents of the
2041
+ # updated data element.
2042
+ #
2043
+ # @option params [required, String] :evaluation_id
2044
+ # The ID assigned to the `Evaluation` during creation.
2045
+ #
2046
+ # @option params [required, String] :evaluation_name
2047
+ # A new user-supplied name or description of the `Evaluation` that will
2048
+ # replace the current content.
2049
+ #
2050
+ # @return [Types::UpdateEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2051
+ #
2052
+ # * {Types::UpdateEvaluationOutput#evaluation_id #evaluation_id} => String
2053
+ #
2054
+ # @example Request syntax with placeholder values
2055
+ #
2056
+ # resp = client.update_evaluation({
2057
+ # evaluation_id: "EntityId", # required
2058
+ # evaluation_name: "EntityName", # required
2059
+ # })
2060
+ #
2061
+ # @example Response structure
2062
+ #
2063
+ # resp.evaluation_id #=> String
2064
+ #
2065
+ # @overload update_evaluation(params = {})
2066
+ # @param [Hash] params ({})
2067
+ def update_evaluation(params = {}, options = {})
2068
+ req = build_request(:update_evaluation, params)
2069
+ req.send_request(options)
2070
+ end
1881
2071
 
1882
- # @!endgroup
2072
+ # Updates the `MLModelName` and the `ScoreThreshold` of an `MLModel`.
2073
+ #
2074
+ # You can use the `GetMLModel` operation to view the contents of the
2075
+ # updated data element.
2076
+ #
2077
+ # @option params [required, String] :ml_model_id
2078
+ # The ID assigned to the `MLModel` during creation.
2079
+ #
2080
+ # @option params [String] :ml_model_name
2081
+ # A user-supplied name or description of the `MLModel`.
2082
+ #
2083
+ # @option params [Float] :score_threshold
2084
+ # The `ScoreThreshold` used in binary classification `MLModel` that
2085
+ # marks the boundary between a positive prediction and a negative
2086
+ # prediction.
2087
+ #
2088
+ # Output values greater than or equal to the `ScoreThreshold` receive a
2089
+ # positive result from the `MLModel`, such as `true`. Output values less
2090
+ # than the `ScoreThreshold` receive a negative response from the
2091
+ # `MLModel`, such as `false`.
2092
+ #
2093
+ # @return [Types::UpdateMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2094
+ #
2095
+ # * {Types::UpdateMLModelOutput#ml_model_id #ml_model_id} => String
2096
+ #
2097
+ # @example Request syntax with placeholder values
2098
+ #
2099
+ # resp = client.update_ml_model({
2100
+ # ml_model_id: "EntityId", # required
2101
+ # ml_model_name: "EntityName",
2102
+ # score_threshold: 1.0,
2103
+ # })
2104
+ #
2105
+ # @example Response structure
2106
+ #
2107
+ # resp.ml_model_id #=> String
2108
+ #
2109
+ # @overload update_ml_model(params = {})
2110
+ # @param [Hash] params ({})
2111
+ def update_ml_model(params = {}, options = {})
2112
+ req = build_request(:update_ml_model, params)
2113
+ req.send_request(options)
2114
+ end
1883
2115
 
1884
- # @param params ({})
1885
- # @api private
1886
- def build_request(operation_name, params = {})
1887
- handlers = @handlers.for(operation_name)
1888
- context = Seahorse::Client::RequestContext.new(
1889
- operation_name: operation_name,
1890
- operation: config.api.operation(operation_name),
1891
- client: self,
1892
- params: params,
1893
- config: config)
1894
- context[:gem_name] = 'aws-sdk-machinelearning'
1895
- context[:gem_version] = '1.0.0.rc1'
1896
- Seahorse::Client::Request.new(handlers, context)
1897
- end
2116
+ # @!endgroup
1898
2117
 
1899
- # Polls an API operation until a resource enters a desired state.
1900
- #
1901
- # ## Basic Usage
1902
- #
1903
- # A waiter will call an API operation until:
1904
- #
1905
- # * It is successful
1906
- # * It enters a terminal state
1907
- # * It makes the maximum number of attempts
1908
- #
1909
- # In between attempts, the waiter will sleep.
1910
- #
1911
- # # polls in a loop, sleeping between attempts
1912
- # client.waiter_until(waiter_name, params)
1913
- #
1914
- # ## Configuration
1915
- #
1916
- # You can configure the maximum number of polling attempts, and the
1917
- # delay (in seconds) between each polling attempt. You can pass
1918
- # configuration as the final arguments hash.
1919
- #
1920
- # # poll for ~25 seconds
1921
- # client.wait_until(waiter_name, params, {
1922
- # max_attempts: 5,
1923
- # delay: 5,
1924
- # })
1925
- #
1926
- # ## Callbacks
1927
- #
1928
- # You can be notified before each polling attempt and before each
1929
- # delay. If you throw `:success` or `:failure` from these callbacks,
1930
- # it will terminate the waiter.
1931
- #
1932
- # started_at = Time.now
1933
- # client.wait_until(waiter_name, params, {
1934
- #
1935
- # # disable max attempts
1936
- # max_attempts: nil,
1937
- #
1938
- # # poll for 1 hour, instead of a number of attempts
1939
- # before_wait: -> (attempts, response) do
1940
- # throw :failure if Time.now - started_at > 3600
1941
- # end
1942
- # })
1943
- #
1944
- # ## Handling Errors
1945
- #
1946
- # When a waiter is unsuccessful, it will raise an error.
1947
- # All of the failure errors extend from
1948
- # {Aws::Waiters::Errors::WaiterFailed}.
1949
- #
1950
- # begin
1951
- # client.wait_until(...)
1952
- # rescue Aws::Waiters::Errors::WaiterFailed
1953
- # # resource did not enter the desired state in time
1954
- # end
1955
- #
1956
- # ## Valid Waiters
1957
- #
1958
- # The following table lists the valid waiter names, the operations they call,
1959
- # and the default `:delay` and `:max_attempts` values.
1960
- #
1961
- # | waiter_name | params | :delay | :max_attempts |
1962
- # | -------------------------- | ----------------------------- | -------- | ------------- |
1963
- # | batch_prediction_available | {#describe_batch_predictions} | 30 | 60 |
1964
- # | data_source_available | {#describe_data_sources} | 30 | 60 |
1965
- # | evaluation_available | {#describe_evaluations} | 30 | 60 |
1966
- # | ml_model_available | {#describe_ml_models} | 30 | 60 |
1967
- #
1968
- # @raise [Errors::FailureStateError] Raised when the waiter terminates
1969
- # because the waiter has entered a state that it will not transition
1970
- # out of, preventing success.
1971
- #
1972
- # @raise [Errors::TooManyAttemptsError] Raised when the configured
1973
- # maximum number of attempts have been made, and the waiter is not
1974
- # yet successful.
1975
- #
1976
- # @raise [Errors::UnexpectedError] Raised when an error is encounted
1977
- # while polling for a resource that is not expected.
1978
- #
1979
- # @raise [Errors::NoSuchWaiterError] Raised when you request to wait
1980
- # for an unknown state.
1981
- #
1982
- # @return [Boolean] Returns `true` if the waiter was successful.
1983
- # @param [Symbol] waiter_name
1984
- # @param [Hash] params ({})
1985
- # @param [Hash] options ({})
1986
- # @option options [Integer] :max_attempts
1987
- # @option options [Integer] :delay
1988
- # @option options [Proc] :before_attempt
1989
- # @option options [Proc] :before_wait
1990
- def wait_until(waiter_name, params = {}, options = {})
1991
- w = waiter(waiter_name, options)
1992
- yield(w.waiter) if block_given? # deprecated
1993
- w.wait(params)
1994
- end
2118
+ # @param params ({})
2119
+ # @api private
2120
+ def build_request(operation_name, params = {})
2121
+ handlers = @handlers.for(operation_name)
2122
+ context = Seahorse::Client::RequestContext.new(
2123
+ operation_name: operation_name,
2124
+ operation: config.api.operation(operation_name),
2125
+ client: self,
2126
+ params: params,
2127
+ config: config)
2128
+ context[:gem_name] = 'aws-sdk-machinelearning'
2129
+ context[:gem_version] = '1.0.0.rc2'
2130
+ Seahorse::Client::Request.new(handlers, context)
2131
+ end
1995
2132
 
1996
- # @api private
1997
- # @deprecated
1998
- def waiter_names
1999
- waiters.keys
2000
- end
2133
+ # Polls an API operation until a resource enters a desired state.
2134
+ #
2135
+ # ## Basic Usage
2136
+ #
2137
+ # A waiter will call an API operation until:
2138
+ #
2139
+ # * It is successful
2140
+ # * It enters a terminal state
2141
+ # * It makes the maximum number of attempts
2142
+ #
2143
+ # In between attempts, the waiter will sleep.
2144
+ #
2145
+ # # polls in a loop, sleeping between attempts
2146
+ # client.waiter_until(waiter_name, params)
2147
+ #
2148
+ # ## Configuration
2149
+ #
2150
+ # You can configure the maximum number of polling attempts, and the
2151
+ # delay (in seconds) between each polling attempt. You can pass
2152
+ # configuration as the final arguments hash.
2153
+ #
2154
+ # # poll for ~25 seconds
2155
+ # client.wait_until(waiter_name, params, {
2156
+ # max_attempts: 5,
2157
+ # delay: 5,
2158
+ # })
2159
+ #
2160
+ # ## Callbacks
2161
+ #
2162
+ # You can be notified before each polling attempt and before each
2163
+ # delay. If you throw `:success` or `:failure` from these callbacks,
2164
+ # it will terminate the waiter.
2165
+ #
2166
+ # started_at = Time.now
2167
+ # client.wait_until(waiter_name, params, {
2168
+ #
2169
+ # # disable max attempts
2170
+ # max_attempts: nil,
2171
+ #
2172
+ # # poll for 1 hour, instead of a number of attempts
2173
+ # before_wait: -> (attempts, response) do
2174
+ # throw :failure if Time.now - started_at > 3600
2175
+ # end
2176
+ # })
2177
+ #
2178
+ # ## Handling Errors
2179
+ #
2180
+ # When a waiter is unsuccessful, it will raise an error.
2181
+ # All of the failure errors extend from
2182
+ # {Aws::Waiters::Errors::WaiterFailed}.
2183
+ #
2184
+ # begin
2185
+ # client.wait_until(...)
2186
+ # rescue Aws::Waiters::Errors::WaiterFailed
2187
+ # # resource did not enter the desired state in time
2188
+ # end
2189
+ #
2190
+ # ## Valid Waiters
2191
+ #
2192
+ # The following table lists the valid waiter names, the operations they call,
2193
+ # and the default `:delay` and `:max_attempts` values.
2194
+ #
2195
+ # | waiter_name | params | :delay | :max_attempts |
2196
+ # | -------------------------- | ----------------------------- | -------- | ------------- |
2197
+ # | batch_prediction_available | {#describe_batch_predictions} | 30 | 60 |
2198
+ # | data_source_available | {#describe_data_sources} | 30 | 60 |
2199
+ # | evaluation_available | {#describe_evaluations} | 30 | 60 |
2200
+ # | ml_model_available | {#describe_ml_models} | 30 | 60 |
2201
+ #
2202
+ # @raise [Errors::FailureStateError] Raised when the waiter terminates
2203
+ # because the waiter has entered a state that it will not transition
2204
+ # out of, preventing success.
2205
+ #
2206
+ # @raise [Errors::TooManyAttemptsError] Raised when the configured
2207
+ # maximum number of attempts have been made, and the waiter is not
2208
+ # yet successful.
2209
+ #
2210
+ # @raise [Errors::UnexpectedError] Raised when an error is encounted
2211
+ # while polling for a resource that is not expected.
2212
+ #
2213
+ # @raise [Errors::NoSuchWaiterError] Raised when you request to wait
2214
+ # for an unknown state.
2215
+ #
2216
+ # @return [Boolean] Returns `true` if the waiter was successful.
2217
+ # @param [Symbol] waiter_name
2218
+ # @param [Hash] params ({})
2219
+ # @param [Hash] options ({})
2220
+ # @option options [Integer] :max_attempts
2221
+ # @option options [Integer] :delay
2222
+ # @option options [Proc] :before_attempt
2223
+ # @option options [Proc] :before_wait
2224
+ def wait_until(waiter_name, params = {}, options = {})
2225
+ w = waiter(waiter_name, options)
2226
+ yield(w.waiter) if block_given? # deprecated
2227
+ w.wait(params)
2228
+ end
2001
2229
 
2002
- private
2230
+ # @api private
2231
+ # @deprecated
2232
+ def waiter_names
2233
+ waiters.keys
2234
+ end
2003
2235
 
2004
- # @param [Symbol] waiter_name
2005
- # @param [Hash] options ({})
2006
- def waiter(waiter_name, options = {})
2007
- waiter_class = waiters[waiter_name]
2008
- if waiter_class
2009
- waiter_class.new(options.merge(client: self))
2010
- else
2011
- raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
2012
- end
2013
- end
2236
+ private
2014
2237
 
2015
- def waiters
2016
- {
2017
- data_source_available: Waiters::DataSourceAvailable,
2018
- ml_model_available: Waiters::MLModelAvailable,
2019
- evaluation_available: Waiters::EvaluationAvailable,
2020
- batch_prediction_available: Waiters::BatchPredictionAvailable
2021
- }
2238
+ # @param [Symbol] waiter_name
2239
+ # @param [Hash] options ({})
2240
+ def waiter(waiter_name, options = {})
2241
+ waiter_class = waiters[waiter_name]
2242
+ if waiter_class
2243
+ waiter_class.new(options.merge(client: self))
2244
+ else
2245
+ raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
2022
2246
  end
2247
+ end
2023
2248
 
2024
- class << self
2249
+ def waiters
2250
+ {
2251
+ batch_prediction_available: Waiters::BatchPredictionAvailable,
2252
+ data_source_available: Waiters::DataSourceAvailable,
2253
+ evaluation_available: Waiters::EvaluationAvailable,
2254
+ ml_model_available: Waiters::MLModelAvailable
2255
+ }
2256
+ end
2025
2257
 
2026
- # @api private
2027
- attr_reader :identifier
2258
+ class << self
2028
2259
 
2029
- # @api private
2030
- def errors_module
2031
- Errors
2032
- end
2260
+ # @api private
2261
+ attr_reader :identifier
2033
2262
 
2263
+ # @api private
2264
+ def errors_module
2265
+ Errors
2034
2266
  end
2267
+
2035
2268
  end
2036
2269
  end
2037
2270
  end