aws-sdk-machinelearning 1.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 9ba71ed1db314c4ecc1e9971bc9ad657ea98a71b
4
+ data.tar.gz: 27b6212254cac811e498620a1602371f12a45755
5
+ SHA512:
6
+ metadata.gz: b47aeee82d78fd427d620fdb2d3ee27623893a4001512ef555f5e9825ee30bc0558ae986014b4593ba4a0e11f0ee79f4827c32d54e147166f836a3da03801bbc
7
+ data.tar.gz: 0bb0dae5ea3dbe0c7ab243b1c8790010ff3292c2f6446981b37bd8483e4c53a4b32ea15f7aaf6df677bcc4dbd8de5fc3571e4293f1e7e82e317cc8c1c1fba1dc
@@ -0,0 +1,48 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'aws-sdk-core'
9
+ require 'aws-sigv4'
10
+
11
+ require_relative 'aws-sdk-machinelearning/types'
12
+ require_relative 'aws-sdk-machinelearning/client_api'
13
+ require_relative 'aws-sdk-machinelearning/client'
14
+ require_relative 'aws-sdk-machinelearning/errors'
15
+ require_relative 'aws-sdk-machinelearning/waiters'
16
+ require_relative 'aws-sdk-machinelearning/resource'
17
+ require_relative 'aws-sdk-machinelearning/customizations'
18
+
19
+ # This module provides support for Amazon Machine Learning. This module is available in the
20
+ # `aws-sdk-machinelearning` gem.
21
+ #
22
+ # # Client
23
+ #
24
+ # The {Client} class provides one method for each API operation. Operation
25
+ # methods each accept a hash of request parameters and return a response
26
+ # structure.
27
+ #
28
+ # See {Client} for more information.
29
+ #
30
+ # # Errors
31
+ #
32
+ # Errors returned from Amazon Machine Learning all
33
+ # extend {Errors::ServiceError}.
34
+ #
35
+ # begin
36
+ # # do stuff
37
+ # rescue Aws::MachineLearning::Errors::ServiceError
38
+ # # rescues all service API errors
39
+ # end
40
+ #
41
+ # See {Errors} for more information.
42
+ #
43
+ # @service
44
+ module Aws::MachineLearning
45
+
46
+ GEM_VERSION = '1.0.0.rc1'
47
+
48
+ end
@@ -0,0 +1,2037 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ require 'seahorse/client/plugins/content_length.rb'
9
+ require 'aws-sdk-core/plugins/credentials_configuration.rb'
10
+ require 'aws-sdk-core/plugins/logging.rb'
11
+ require 'aws-sdk-core/plugins/param_converter.rb'
12
+ require 'aws-sdk-core/plugins/param_validator.rb'
13
+ require 'aws-sdk-core/plugins/user_agent.rb'
14
+ require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
15
+ require 'aws-sdk-core/plugins/retry_errors.rb'
16
+ require 'aws-sdk-core/plugins/global_configuration.rb'
17
+ require 'aws-sdk-core/plugins/regional_endpoint.rb'
18
+ require 'aws-sdk-core/plugins/response_paging.rb'
19
+ require 'aws-sdk-core/plugins/stub_responses.rb'
20
+ require 'aws-sdk-core/plugins/idempotency_token.rb'
21
+ require 'aws-sdk-core/plugins/signature_v4.rb'
22
+ require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
23
+ require 'aws-sdk-machinelearning/plugins/predict_endpoint.rb'
24
+
25
+ Aws::Plugins::GlobalConfiguration.add_identifier(:machinelearning)
26
+
27
+ module Aws
28
+ module MachineLearning
29
+ class Client < Seahorse::Client::Base
30
+
31
+ include Aws::ClientStubs
32
+
33
+ @identifier = :machinelearning
34
+
35
+ set_api(ClientApi::API)
36
+
37
+ add_plugin(Seahorse::Client::Plugins::ContentLength)
38
+ add_plugin(Aws::Plugins::CredentialsConfiguration)
39
+ add_plugin(Aws::Plugins::Logging)
40
+ add_plugin(Aws::Plugins::ParamConverter)
41
+ add_plugin(Aws::Plugins::ParamValidator)
42
+ add_plugin(Aws::Plugins::UserAgent)
43
+ add_plugin(Aws::Plugins::HelpfulSocketErrors)
44
+ add_plugin(Aws::Plugins::RetryErrors)
45
+ add_plugin(Aws::Plugins::GlobalConfiguration)
46
+ add_plugin(Aws::Plugins::RegionalEndpoint)
47
+ add_plugin(Aws::Plugins::ResponsePaging)
48
+ add_plugin(Aws::Plugins::StubResponses)
49
+ add_plugin(Aws::Plugins::IdempotencyToken)
50
+ add_plugin(Aws::Plugins::SignatureV4)
51
+ add_plugin(Aws::Plugins::Protocols::JsonRpc)
52
+ add_plugin(Aws::MachineLearning::Plugins::PredictEndpoint)
53
+
54
+ # @option options [required, Aws::CredentialProvider] :credentials
55
+ # Your AWS credentials. This can be an instance of any one of the
56
+ # following classes:
57
+ #
58
+ # * `Aws::Credentials` - Used for configuring static, non-refreshing
59
+ # credentials.
60
+ #
61
+ # * `Aws::InstanceProfileCredentials` - Used for loading credentials
62
+ # from an EC2 IMDS on an EC2 instance.
63
+ #
64
+ # * `Aws::SharedCredentials` - Used for loading credentials from a
65
+ # shared file, such as `~/.aws/config`.
66
+ #
67
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
68
+ #
69
+ # When `:credentials` are not configured directly, the following
70
+ # locations will be searched for credentials:
71
+ #
72
+ # * `Aws.config[:credentials]`
73
+ # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
74
+ # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
75
+ # * `~/.aws/credentials`
76
+ # * `~/.aws/config`
77
+ # * EC2 IMDS instance profile - When used by default, the timeouts are
78
+ # very aggressive. Construct and pass an instance of
79
+ # `Aws::InstanceProfileCredentails` to enable retries and extended
80
+ # timeouts.
81
+ # @option options [required, String] :region
82
+ # The AWS region to connect to. The configured `:region` is
83
+ # used to determine the service `:endpoint`. When not passed,
84
+ # a default `:region` is search for in the following locations:
85
+ #
86
+ # * `Aws.config[:region]`
87
+ # * `ENV['AWS_REGION']`
88
+ # * `ENV['AMAZON_REGION']`
89
+ # * `ENV['AWS_DEFAULT_REGION']`
90
+ # * `~/.aws/credentials`
91
+ # * `~/.aws/config`
92
+ # @option options [String] :access_key_id
93
+ # @option options [Boolean] :convert_params (true)
94
+ # When `true`, an attempt is made to coerce request parameters into
95
+ # the required types.
96
+ # @option options [String] :endpoint
97
+ # The client endpoint is normally constructed from the `:region`
98
+ # option. You should only configure an `:endpoint` when connecting
99
+ # to test endpoints. This should be avalid HTTP(S) URI.
100
+ # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
101
+ # The log formatter.
102
+ # @option options [Symbol] :log_level (:info)
103
+ # The log level to send messages to the `:logger` at.
104
+ # @option options [Logger] :logger
105
+ # The Logger instance to send log messages to. If this option
106
+ # is not set, logging will be disabled.
107
+ # @option options [String] :profile ("default")
108
+ # Used when loading credentials from the shared credentials file
109
+ # at HOME/.aws/credentials. When not specified, 'default' is used.
110
+ # @option options [Integer] :retry_limit (3)
111
+ # The maximum number of times to retry failed requests. Only
112
+ # ~ 500 level server errors and certain ~ 400 level client errors
113
+ # are retried. Generally, these are throttling errors, data
114
+ # checksum errors, networking errors, timeout errors and auth
115
+ # errors from expired credentials.
116
+ # @option options [String] :secret_access_key
117
+ # @option options [String] :session_token
118
+ # @option options [Boolean] :simple_json (false)
119
+ # Disables request parameter conversion, validation, and formatting.
120
+ # Also disable response data type conversions. This option is useful
121
+ # when you want to ensure the highest level of performance by
122
+ # avoiding overhead of walking request parameters and response data
123
+ # structures.
124
+ #
125
+ # When `:simple_json` is enabled, the request parameters hash must
126
+ # be formatted exactly as the DynamoDB API expects.
127
+ # @option options [Boolean] :stub_responses (false)
128
+ # Causes the client to return stubbed responses. By default
129
+ # fake responses are generated and returned. You can specify
130
+ # the response data to return or errors to raise by calling
131
+ # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
132
+ #
133
+ # ** Please note ** When response stubbing is enabled, no HTTP
134
+ # requests are made, and retries are disabled.
135
+ # @option options [Boolean] :validate_params (true)
136
+ # When `true`, request parameters are validated before
137
+ # sending the request.
138
+ def initialize(*args)
139
+ super
140
+ end
141
+
142
+ # @!group API Operations
143
+
144
+ # Adds one or more tags to an object, up to a limit of 10. Each tag
145
+ # consists of a key and an optional value. If you add a tag using a key
146
+ # that is already associated with the ML object, `AddTags` updates the
147
+ # tag's value.
148
+ # @option params [required, Array<Types::Tag>] :tags
149
+ # The key-value pairs to use to create tags. If you specify a key
150
+ # without specifying a value, Amazon ML creates a tag with the specified
151
+ # key and a value of null.
152
+ # @option params [required, String] :resource_id
153
+ # The ID of the ML object to tag. For example, `exampleModelId`.
154
+ # @option params [required, String] :resource_type
155
+ # The type of the ML object to tag.
156
+ # @return [Types::AddTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
157
+ #
158
+ # * {Types::AddTagsOutput#resource_id #ResourceId} => String
159
+ # * {Types::AddTagsOutput#resource_type #ResourceType} => String
160
+ #
161
+ # @example Request syntax with placeholder values
162
+ # resp = client.add_tags({
163
+ # tags: [ # required
164
+ # {
165
+ # key: "TagKey",
166
+ # value: "TagValue",
167
+ # },
168
+ # ],
169
+ # resource_id: "EntityId", # required
170
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
171
+ # })
172
+ #
173
+ # @example Response structure
174
+ # resp.resource_id #=> String
175
+ # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
176
+ # @overload add_tags(params = {})
177
+ # @param [Hash] params ({})
178
+ def add_tags(params = {}, options = {})
179
+ req = build_request(:add_tags, params)
180
+ req.send_request(options)
181
+ end
182
+
183
+ # Generates predictions for a group of observations. The observations to
184
+ # process exist in one or more data files referenced by a `DataSource`.
185
+ # This operation creates a new `BatchPrediction`, and uses an `MLModel`
186
+ # and the data files referenced by the `DataSource` as information
187
+ # sources.
188
+ #
189
+ # `CreateBatchPrediction` is an asynchronous operation. In response to
190
+ # `CreateBatchPrediction`, Amazon Machine Learning (Amazon ML)
191
+ # immediately returns and sets the `BatchPrediction` status to
192
+ # `PENDING`. After the `BatchPrediction` completes, Amazon ML sets the
193
+ # status to `COMPLETED`.
194
+ #
195
+ # You can poll for status updates by using the GetBatchPrediction
196
+ # operation and checking the `Status` parameter of the result. After the
197
+ # `COMPLETED` status appears, the results are available in the location
198
+ # specified by the `OutputUri` parameter.
199
+ # @option params [required, String] :batch_prediction_id
200
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
201
+ # @option params [String] :batch_prediction_name
202
+ # A user-supplied name or description of the `BatchPrediction`.
203
+ # `BatchPredictionName` can only use the UTF-8 character set.
204
+ # @option params [required, String] :ml_model_id
205
+ # The ID of the `MLModel` that will generate predictions for the group
206
+ # of observations.
207
+ # @option params [required, String] :batch_prediction_data_source_id
208
+ # The ID of the `DataSource` that points to the group of observations to
209
+ # predict.
210
+ # @option params [required, String] :output_uri
211
+ # The location of an Amazon Simple Storage Service (Amazon S3) bucket or
212
+ # directory to store the batch prediction results. The following
213
+ # substrings are not allowed in the `s3 key` portion of the `outputURI`
214
+ # field: ':', '//', '/./', '/../'.
215
+ #
216
+ # Amazon ML needs permissions to store and retrieve the logs on your
217
+ # behalf. For information about how to set permissions, see the [Amazon
218
+ # Machine Learning Developer Guide][1].
219
+ #
220
+ #
221
+ #
222
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
223
+ # @return [Types::CreateBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
224
+ #
225
+ # * {Types::CreateBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
226
+ #
227
+ # @example Request syntax with placeholder values
228
+ # resp = client.create_batch_prediction({
229
+ # batch_prediction_id: "EntityId", # required
230
+ # batch_prediction_name: "EntityName",
231
+ # ml_model_id: "EntityId", # required
232
+ # batch_prediction_data_source_id: "EntityId", # required
233
+ # output_uri: "S3Url", # required
234
+ # })
235
+ #
236
+ # @example Response structure
237
+ # resp.batch_prediction_id #=> String
238
+ # @overload create_batch_prediction(params = {})
239
+ # @param [Hash] params ({})
240
+ def create_batch_prediction(params = {}, options = {})
241
+ req = build_request(:create_batch_prediction, params)
242
+ req.send_request(options)
243
+ end
244
+
245
+ # Creates a `DataSource` object from an [ Amazon Relational Database
246
+ # Service][1] (Amazon RDS). A `DataSource` references data that can be
247
+ # used to perform `CreateMLModel`, `CreateEvaluation`, or
248
+ # `CreateBatchPrediction` operations.
249
+ #
250
+ # `CreateDataSourceFromRDS` is an asynchronous operation. In response to
251
+ # `CreateDataSourceFromRDS`, Amazon Machine Learning (Amazon ML)
252
+ # immediately returns and sets the `DataSource` status to `PENDING`.
253
+ # After the `DataSource` is created and ready for use, Amazon ML sets
254
+ # the `Status` parameter to `COMPLETED`. `DataSource` in the `COMPLETED`
255
+ # or `PENDING` state can be used only to perform `>CreateMLModel`&gt;,
256
+ # `CreateEvaluation`, or `CreateBatchPrediction` operations.
257
+ #
258
+ # If Amazon ML cannot accept the input source, it sets the `Status`
259
+ # parameter to `FAILED` and includes an error message in the `Message`
260
+ # attribute of the `GetDataSource` operation response.
261
+ #
262
+ #
263
+ #
264
+ # [1]: http://aws.amazon.com/rds/
265
+ # @option params [required, String] :data_source_id
266
+ # A user-supplied ID that uniquely identifies the `DataSource`.
267
+ # Typically, an Amazon Resource Number (ARN) becomes the ID for a
268
+ # `DataSource`.
269
+ # @option params [String] :data_source_name
270
+ # A user-supplied name or description of the `DataSource`.
271
+ # @option params [required, Types::RDSDataSpec] :rds_data
272
+ # The data specification of an Amazon RDS `DataSource`\:
273
+ #
274
+ # * DatabaseInformation - * `DatabaseName` - The name of the Amazon RDS
275
+ # database.
276
+ # * `InstanceIdentifier ` - A unique identifier for the Amazon RDS
277
+ # database instance.
278
+ #
279
+ # * DatabaseCredentials - AWS Identity and Access Management (IAM)
280
+ # credentials that are used to connect to the Amazon RDS database.
281
+ #
282
+ # * ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by
283
+ # an EC2 instance to carry out the copy task from Amazon RDS to Amazon
284
+ # Simple Storage Service (Amazon S3). For more information, see [Role
285
+ # templates][1] for data pipelines.
286
+ #
287
+ # * ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS
288
+ # Data Pipeline service to monitor the progress of the copy task from
289
+ # Amazon RDS to Amazon S3. For more information, see [Role
290
+ # templates][1] for data pipelines.
291
+ #
292
+ # * SecurityInfo - The security information to use to access an RDS DB
293
+ # instance. You need to set up appropriate ingress rules for the
294
+ # security entity IDs provided to allow access to the Amazon RDS
295
+ # instance. Specify a \[`SubnetId`, `SecurityGroupIds`\] pair for a
296
+ # VPC-based RDS DB instance.
297
+ #
298
+ # * SelectSqlQuery - A query that is used to retrieve the observation
299
+ # data for the `Datasource`.
300
+ #
301
+ # * S3StagingLocation - The Amazon S3 location for staging Amazon RDS
302
+ # data. The data retrieved from Amazon RDS using `SelectSqlQuery` is
303
+ # stored in this location.
304
+ #
305
+ # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
306
+ #
307
+ # * DataSchema - A JSON string representing the schema. This is not
308
+ # required if `DataSchemaUri` is specified.
309
+ #
310
+ # * DataRearrangement - A JSON string that represents the splitting and
311
+ # rearrangement requirements for the `Datasource`.
312
+ #
313
+ #
314
+ # Sample - `
315
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
316
+ #
317
+ #
318
+ #
319
+ # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
320
+ # @option params [required, String] :role_arn
321
+ # The role that Amazon ML assumes on behalf of the user to create and
322
+ # activate a data pipeline in the user's account and copy data using
323
+ # the `SelectSqlQuery` query from Amazon RDS to Amazon S3.
324
+ # @option params [Boolean] :compute_statistics
325
+ # The compute statistics for a `DataSource`. The statistics are
326
+ # generated from the observation data referenced by a `DataSource`.
327
+ # Amazon ML uses the statistics internally during `MLModel` training.
328
+ # This parameter must be set to `true` if the ``DataSource`` needs to be
329
+ # used for `MLModel` training.
330
+ # @return [Types::CreateDataSourceFromRDSOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
331
+ #
332
+ # * {Types::CreateDataSourceFromRDSOutput#data_source_id #DataSourceId} => String
333
+ #
334
+ # @example Request syntax with placeholder values
335
+ # resp = client.create_data_source_from_rds({
336
+ # data_source_id: "EntityId", # required
337
+ # data_source_name: "EntityName",
338
+ # rds_data: { # required
339
+ # database_information: { # required
340
+ # instance_identifier: "RDSInstanceIdentifier", # required
341
+ # database_name: "RDSDatabaseName", # required
342
+ # },
343
+ # select_sql_query: "RDSSelectSqlQuery", # required
344
+ # database_credentials: { # required
345
+ # username: "RDSDatabaseUsername", # required
346
+ # password: "RDSDatabasePassword", # required
347
+ # },
348
+ # s3_staging_location: "S3Url", # required
349
+ # data_rearrangement: "DataRearrangement",
350
+ # data_schema: "DataSchema",
351
+ # data_schema_uri: "S3Url",
352
+ # resource_role: "EDPResourceRole", # required
353
+ # service_role: "EDPServiceRole", # required
354
+ # subnet_id: "EDPSubnetId", # required
355
+ # security_group_ids: ["EDPSecurityGroupId"], # required
356
+ # },
357
+ # role_arn: "RoleARN", # required
358
+ # compute_statistics: false,
359
+ # })
360
+ #
361
+ # @example Response structure
362
+ # resp.data_source_id #=> String
363
+ # @overload create_data_source_from_rds(params = {})
364
+ # @param [Hash] params ({})
365
+ def create_data_source_from_rds(params = {}, options = {})
366
+ req = build_request(:create_data_source_from_rds, params)
367
+ req.send_request(options)
368
+ end
369
+
370
+ # Creates a `DataSource` from a database hosted on an Amazon Redshift
371
+ # cluster. A `DataSource` references data that can be used to perform
372
+ # either `CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction`
373
+ # operations.
374
+ #
375
+ # `CreateDataSourceFromRedshift` is an asynchronous operation. In
376
+ # response to `CreateDataSourceFromRedshift`, Amazon Machine Learning
377
+ # (Amazon ML) immediately returns and sets the `DataSource` status to
378
+ # `PENDING`. After the `DataSource` is created and ready for use, Amazon
379
+ # ML sets the `Status` parameter to `COMPLETED`. `DataSource` in
380
+ # `COMPLETED` or `PENDING` states can be used to perform only
381
+ # `CreateMLModel`, `CreateEvaluation`, or `CreateBatchPrediction`
382
+ # operations.
383
+ #
384
+ # If Amazon ML can't accept the input source, it sets the `Status`
385
+ # parameter to `FAILED` and includes an error message in the `Message`
386
+ # attribute of the `GetDataSource` operation response.
387
+ #
388
+ # The observations should be contained in the database hosted on an
389
+ # Amazon Redshift cluster and should be specified by a `SelectSqlQuery`
390
+ # query. Amazon ML executes an `Unload` command in Amazon Redshift to
391
+ # transfer the result set of the `SelectSqlQuery` query to
392
+ # `S3StagingLocation`.
393
+ #
394
+ # After the `DataSource` has been created, it's ready for use in
395
+ # evaluations and batch predictions. If you plan to use the `DataSource`
396
+ # to train an `MLModel`, the `DataSource` also requires a recipe. A
397
+ # recipe describes how each input variable will be used in training an
398
+ # `MLModel`. Will the variable be included or excluded from training?
399
+ # Will the variable be manipulated; for example, will it be combined
400
+ # with another variable or will it be split apart into word
401
+ # combinations? The recipe provides answers to these questions.
402
+ #
403
+ # <?oxy\_insert\_start author="laurama" timestamp="20160406T153842-0700">You can't change an existing datasource, but you can copy and modify
404
+ # the settings from an existing Amazon Redshift datasource to create a
405
+ # new datasource. To do so, call `GetDataSource` for an existing
406
+ # datasource and copy the values to a `CreateDataSource` call. Change
407
+ # the settings that you want to change and make sure that all required
408
+ # fields have the appropriate values.
409
+ #
410
+ # <?oxy\_insert\_end>
411
+ # @option params [required, String] :data_source_id
412
+ # A user-supplied ID that uniquely identifies the `DataSource`.
413
+ # @option params [String] :data_source_name
414
+ # A user-supplied name or description of the `DataSource`.
415
+ # @option params [required, Types::RedshiftDataSpec] :data_spec
416
+ # The data specification of an Amazon Redshift `DataSource`\:
417
+ #
418
+ # * DatabaseInformation - * `DatabaseName` - The name of the Amazon
419
+ # Redshift database.
420
+ # * ` ClusterIdentifier` - The unique ID for the Amazon Redshift
421
+ # cluster.
422
+ #
423
+ # * DatabaseCredentials - The AWS Identity and Access Management (IAM)
424
+ # credentials that are used to connect to the Amazon Redshift
425
+ # database.
426
+ #
427
+ # * SelectSqlQuery - The query that is used to retrieve the observation
428
+ # data for the `Datasource`.
429
+ #
430
+ # * S3StagingLocation - The Amazon Simple Storage Service (Amazon S3)
431
+ # location for staging Amazon Redshift data. The data retrieved from
432
+ # Amazon Redshift using the `SelectSqlQuery` query is stored in this
433
+ # location.
434
+ #
435
+ # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
436
+ #
437
+ # * DataSchema - A JSON string representing the schema. This is not
438
+ # required if `DataSchemaUri` is specified.
439
+ #
440
+ # * DataRearrangement - A JSON string that represents the splitting and
441
+ # rearrangement requirements for the `DataSource`.
442
+ #
443
+ # Sample - `
444
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
445
+ # @option params [required, String] :role_arn
446
+ # A fully specified role Amazon Resource Name (ARN). Amazon ML assumes
447
+ # the role on behalf of the user to create the following:
448
+ #
449
+ # * A security group to allow Amazon ML to execute the `SelectSqlQuery`
450
+ # query on an Amazon Redshift cluster
451
+ #
452
+ # * An Amazon S3 bucket policy to grant Amazon ML read/write permissions
453
+ # on the `S3StagingLocation`
454
+ # @option params [Boolean] :compute_statistics
455
+ # The compute statistics for a `DataSource`. The statistics are
456
+ # generated from the observation data referenced by a `DataSource`.
457
+ # Amazon ML uses the statistics internally during `MLModel` training.
458
+ # This parameter must be set to `true` if the `DataSource` needs to be
459
+ # used for `MLModel` training.
460
+ # @return [Types::CreateDataSourceFromRedshiftOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
461
+ #
462
+ # * {Types::CreateDataSourceFromRedshiftOutput#data_source_id #DataSourceId} => String
463
+ #
464
+ # @example Request syntax with placeholder values
465
+ # resp = client.create_data_source_from_redshift({
466
+ # data_source_id: "EntityId", # required
467
+ # data_source_name: "EntityName",
468
+ # data_spec: { # required
469
+ # database_information: { # required
470
+ # database_name: "RedshiftDatabaseName", # required
471
+ # cluster_identifier: "RedshiftClusterIdentifier", # required
472
+ # },
473
+ # select_sql_query: "RedshiftSelectSqlQuery", # required
474
+ # database_credentials: { # required
475
+ # username: "RedshiftDatabaseUsername", # required
476
+ # password: "RedshiftDatabasePassword", # required
477
+ # },
478
+ # s3_staging_location: "S3Url", # required
479
+ # data_rearrangement: "DataRearrangement",
480
+ # data_schema: "DataSchema",
481
+ # data_schema_uri: "S3Url",
482
+ # },
483
+ # role_arn: "RoleARN", # required
484
+ # compute_statistics: false,
485
+ # })
486
+ #
487
+ # @example Response structure
488
+ # resp.data_source_id #=> String
489
+ # @overload create_data_source_from_redshift(params = {})
490
+ # @param [Hash] params ({})
491
+ def create_data_source_from_redshift(params = {}, options = {})
492
+ req = build_request(:create_data_source_from_redshift, params)
493
+ req.send_request(options)
494
+ end
495
+
496
+ # Creates a `DataSource` object. A `DataSource` references data that can
497
+ # be used to perform `CreateMLModel`, `CreateEvaluation`, or
498
+ # `CreateBatchPrediction` operations.
499
+ #
500
+ # `CreateDataSourceFromS3` is an asynchronous operation. In response to
501
+ # `CreateDataSourceFromS3`, Amazon Machine Learning (Amazon ML)
502
+ # immediately returns and sets the `DataSource` status to `PENDING`.
503
+ # After the `DataSource` has been created and is ready for use, Amazon
504
+ # ML sets the `Status` parameter to `COMPLETED`. `DataSource` in the
505
+ # `COMPLETED` or `PENDING` state can be used to perform only
506
+ # `CreateMLModel`, `CreateEvaluation` or `CreateBatchPrediction`
507
+ # operations.
508
+ #
509
+ # If Amazon ML can't accept the input source, it sets the `Status`
510
+ # parameter to `FAILED` and includes an error message in the `Message`
511
+ # attribute of the `GetDataSource` operation response.
512
+ #
513
+ # The observation data used in a `DataSource` should be ready to use;
514
+ # that is, it should have a consistent structure, and missing data
515
+ # values should be kept to a minimum. The observation data must reside
516
+ # in one or more .csv files in an Amazon Simple Storage Service (Amazon
517
+ # S3) location, along with a schema that describes the data items by
518
+ # name and type. The same schema must be used for all of the data files
519
+ # referenced by the `DataSource`.
520
+ #
521
+ # After the `DataSource` has been created, it's ready to use in
522
+ # evaluations and batch predictions. If you plan to use the `DataSource`
523
+ # to train an `MLModel`, the `DataSource` also needs a recipe. A recipe
524
+ # describes how each input variable will be used in training an
525
+ # `MLModel`. Will the variable be included or excluded from training?
526
+ # Will the variable be manipulated; for example, will it be combined
527
+ # with another variable or will it be split apart into word
528
+ # combinations? The recipe provides answers to these questions.
529
+ # @option params [required, String] :data_source_id
530
+ # A user-supplied identifier that uniquely identifies the `DataSource`.
531
+ # @option params [String] :data_source_name
532
+ # A user-supplied name or description of the `DataSource`.
533
+ # @option params [required, Types::S3DataSpec] :data_spec
534
+ # The data specification of a `DataSource`\:
535
+ #
536
+ # * DataLocationS3 - The Amazon S3 location of the observation data.
537
+ #
538
+ # * DataSchemaLocationS3 - The Amazon S3 location of the `DataSchema`.
539
+ #
540
+ # * DataSchema - A JSON string representing the schema. This is not
541
+ # required if `DataSchemaUri` is specified.
542
+ #
543
+ # * DataRearrangement - A JSON string that represents the splitting and
544
+ # rearrangement requirements for the `Datasource`.
545
+ #
546
+ # Sample - `
547
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
548
+ # @option params [Boolean] :compute_statistics
549
+ # The compute statistics for a `DataSource`. The statistics are
550
+ # generated from the observation data referenced by a `DataSource`.
551
+ # Amazon ML uses the statistics internally during `MLModel` training.
552
+ # This parameter must be set to `true` if the ``DataSource`` needs to be
553
+ # used for `MLModel` training.
554
+ # @return [Types::CreateDataSourceFromS3Output] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
555
+ #
556
+ # * {Types::CreateDataSourceFromS3Output#data_source_id #DataSourceId} => String
557
+ #
558
+ # @example Request syntax with placeholder values
559
+ # resp = client.create_data_source_from_s3({
560
+ # data_source_id: "EntityId", # required
561
+ # data_source_name: "EntityName",
562
+ # data_spec: { # required
563
+ # data_location_s3: "S3Url", # required
564
+ # data_rearrangement: "DataRearrangement",
565
+ # data_schema: "DataSchema",
566
+ # data_schema_location_s3: "S3Url",
567
+ # },
568
+ # compute_statistics: false,
569
+ # })
570
+ #
571
+ # @example Response structure
572
+ # resp.data_source_id #=> String
573
+ # @overload create_data_source_from_s3(params = {})
574
+ # @param [Hash] params ({})
575
+ def create_data_source_from_s3(params = {}, options = {})
576
+ req = build_request(:create_data_source_from_s3, params)
577
+ req.send_request(options)
578
+ end
579
+
580
+ # Creates a new `Evaluation` of an `MLModel`. An `MLModel` is evaluated
581
+ # on a set of observations associated to a `DataSource`. Like a
582
+ # `DataSource` for an `MLModel`, the `DataSource` for an `Evaluation`
583
+ # contains values for the `Target Variable`. The `Evaluation` compares
584
+ # the predicted result for each observation to the actual outcome and
585
+ # provides a summary so that you know how effective the `MLModel`
586
+ # functions on the test data. Evaluation generates a relevant
587
+ # performance metric, such as BinaryAUC, RegressionRMSE or
588
+ # MulticlassAvgFScore based on the corresponding `MLModelType`\:
589
+ # `BINARY`, `REGRESSION` or `MULTICLASS`.
590
+ #
591
+ # `CreateEvaluation` is an asynchronous operation. In response to
592
+ # `CreateEvaluation`, Amazon Machine Learning (Amazon ML) immediately
593
+ # returns and sets the evaluation status to `PENDING`. After the
594
+ # `Evaluation` is created and ready for use, Amazon ML sets the status
595
+ # to `COMPLETED`.
596
+ #
597
+ # You can use the `GetEvaluation` operation to check progress of the
598
+ # evaluation during the creation operation.
599
+ # @option params [required, String] :evaluation_id
600
+ # A user-supplied ID that uniquely identifies the `Evaluation`.
601
+ # @option params [String] :evaluation_name
602
+ # A user-supplied name or description of the `Evaluation`.
603
+ # @option params [required, String] :ml_model_id
604
+ # The ID of the `MLModel` to evaluate.
605
+ #
606
+ # The schema used in creating the `MLModel` must match the schema of the
607
+ # `DataSource` used in the `Evaluation`.
608
+ # @option params [required, String] :evaluation_data_source_id
609
+ # The ID of the `DataSource` for the evaluation. The schema of the
610
+ # `DataSource` must match the schema used to create the `MLModel`.
611
+ # @return [Types::CreateEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
612
+ #
613
+ # * {Types::CreateEvaluationOutput#evaluation_id #EvaluationId} => String
614
+ #
615
+ # @example Request syntax with placeholder values
616
+ # resp = client.create_evaluation({
617
+ # evaluation_id: "EntityId", # required
618
+ # evaluation_name: "EntityName",
619
+ # ml_model_id: "EntityId", # required
620
+ # evaluation_data_source_id: "EntityId", # required
621
+ # })
622
+ #
623
+ # @example Response structure
624
+ # resp.evaluation_id #=> String
625
+ # @overload create_evaluation(params = {})
626
+ # @param [Hash] params ({})
627
+ def create_evaluation(params = {}, options = {})
628
+ req = build_request(:create_evaluation, params)
629
+ req.send_request(options)
630
+ end
631
+
632
+ # Creates a new `MLModel` using the `DataSource` and the recipe as
633
+ # information sources.
634
+ #
635
+ # An `MLModel` is nearly immutable. Users can update only the
636
+ # `MLModelName` and the `ScoreThreshold` in an `MLModel` without
637
+ # creating a new `MLModel`.
638
+ #
639
+ # `CreateMLModel` is an asynchronous operation. In response to
640
+ # `CreateMLModel`, Amazon Machine Learning (Amazon ML) immediately
641
+ # returns and sets the `MLModel` status to `PENDING`. After the
642
+ # `MLModel` has been created and ready is for use, Amazon ML sets the
643
+ # status to `COMPLETED`.
644
+ #
645
+ # You can use the `GetMLModel` operation to check the progress of the
646
+ # `MLModel` during the creation operation.
647
+ #
648
+ # `CreateMLModel` requires a `DataSource` with computed statistics,
649
+ # which can be created by setting `ComputeStatistics` to `true` in
650
+ # `CreateDataSourceFromRDS`, `CreateDataSourceFromS3`, or
651
+ # `CreateDataSourceFromRedshift` operations.
652
+ # @option params [required, String] :ml_model_id
653
+ # A user-supplied ID that uniquely identifies the `MLModel`.
654
+ # @option params [String] :ml_model_name
655
+ # A user-supplied name or description of the `MLModel`.
656
+ # @option params [required, String] :ml_model_type
657
+ # The category of supervised learning that this `MLModel` will address.
658
+ # Choose from the following types:
659
+ #
660
+ # * Choose `REGRESSION` if the `MLModel` will be used to predict a
661
+ # numeric value.
662
+ # * Choose `BINARY` if the `MLModel` result has two possible values.
663
+ # * Choose `MULTICLASS` if the `MLModel` result has a limited number of
664
+ # values.
665
+ #
666
+ # For more information, see the [Amazon Machine Learning Developer
667
+ # Guide][1].
668
+ #
669
+ #
670
+ #
671
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
672
+ # @option params [Hash<String,String>] :parameters
673
+ # A list of the training parameters in the `MLModel`. The list is
674
+ # implemented as a map of key-value pairs.
675
+ #
676
+ # The following is the current set of training parameters:
677
+ #
678
+ # * `sgd.maxMLModelSizeInBytes` - The maximum allowed size of the model.
679
+ # Depending on the input data, the size of the model might affect its
680
+ # performance.
681
+ #
682
+ # The value is an integer that ranges from `100000` to `2147483648`.
683
+ # The default value is `33554432`.
684
+ #
685
+ # * `sgd.maxPasses` - The number of times that the training process
686
+ # traverses the observations to build the `MLModel`. The value is an
687
+ # integer that ranges from `1` to `10000`. The default value is `10`.
688
+ #
689
+ # * `sgd.shuffleType` - Whether Amazon ML shuffles the training data.
690
+ # Shuffling the data improves a model's ability to find the optimal
691
+ # solution for a variety of data types. The valid values are `auto`
692
+ # and `none`. The default value is `none`. We <?oxy\_insert\_start
693
+ # author="laurama" timestamp="20160329T131121-0700">strongly
694
+ # recommend that you shuffle your data.<?oxy\_insert\_end>
695
+ #
696
+ # * `sgd.l1RegularizationAmount` - The coefficient regularization L1
697
+ # norm. It controls overfitting the data by penalizing large
698
+ # coefficients. This tends to drive coefficients to zero, resulting in
699
+ # a sparse feature set. If you use this parameter, start by specifying
700
+ # a small value, such as `1.0E-08`.
701
+ #
702
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
703
+ # default is to not use L1 normalization. This parameter can't be
704
+ # used when `L2` is specified. Use this parameter sparingly.
705
+ #
706
+ # * `sgd.l2RegularizationAmount` - The coefficient regularization L2
707
+ # norm. It controls overfitting the data by penalizing large
708
+ # coefficients. This tends to drive coefficients to small, nonzero
709
+ # values. If you use this parameter, start by specifying a small
710
+ # value, such as `1.0E-08`.
711
+ #
712
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
713
+ # default is to not use L2 normalization. This parameter can't be
714
+ # used when `L1` is specified. Use this parameter sparingly.
715
+ # @option params [required, String] :training_data_source_id
716
+ # The `DataSource` that points to the training data.
717
+ # @option params [String] :recipe
718
+ # The data recipe for creating the `MLModel`. You must specify either
719
+ # the recipe or its URI. If you don't specify a recipe or its URI,
720
+ # Amazon ML creates a default.
721
+ # @option params [String] :recipe_uri
722
+ # The Amazon Simple Storage Service (Amazon S3) location and file name
723
+ # that contains the `MLModel` recipe. You must specify either the recipe
724
+ # or its URI. If you don't specify a recipe or its URI, Amazon ML
725
+ # creates a default.
726
+ # @return [Types::CreateMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
727
+ #
728
+ # * {Types::CreateMLModelOutput#ml_model_id #MLModelId} => String
729
+ #
730
+ # @example Request syntax with placeholder values
731
+ # resp = client.create_ml_model({
732
+ # ml_model_id: "EntityId", # required
733
+ # ml_model_name: "EntityName",
734
+ # ml_model_type: "REGRESSION", # required, accepts REGRESSION, BINARY, MULTICLASS
735
+ # parameters: {
736
+ # "StringType" => "StringType",
737
+ # },
738
+ # training_data_source_id: "EntityId", # required
739
+ # recipe: "Recipe",
740
+ # recipe_uri: "S3Url",
741
+ # })
742
+ #
743
+ # @example Response structure
744
+ # resp.ml_model_id #=> String
745
+ # @overload create_ml_model(params = {})
746
+ # @param [Hash] params ({})
747
+ def create_ml_model(params = {}, options = {})
748
+ req = build_request(:create_ml_model, params)
749
+ req.send_request(options)
750
+ end
751
+
752
+ # Creates a real-time endpoint for the `MLModel`. The endpoint contains
753
+ # the URI of the `MLModel`; that is, the location to send real-time
754
+ # prediction requests for the specified `MLModel`.
755
+ # @option params [required, String] :ml_model_id
756
+ # The ID assigned to the `MLModel` during creation.
757
+ # @return [Types::CreateRealtimeEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
758
+ #
759
+ # * {Types::CreateRealtimeEndpointOutput#ml_model_id #MLModelId} => String
760
+ # * {Types::CreateRealtimeEndpointOutput#realtime_endpoint_info #RealtimeEndpointInfo} => Types::RealtimeEndpointInfo
761
+ #
762
+ # @example Request syntax with placeholder values
763
+ # resp = client.create_realtime_endpoint({
764
+ # ml_model_id: "EntityId", # required
765
+ # })
766
+ #
767
+ # @example Response structure
768
+ # resp.ml_model_id #=> String
769
+ # resp.realtime_endpoint_info.peak_requests_per_second #=> Integer
770
+ # resp.realtime_endpoint_info.created_at #=> Time
771
+ # resp.realtime_endpoint_info.endpoint_url #=> String
772
+ # resp.realtime_endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
773
+ # @overload create_realtime_endpoint(params = {})
774
+ # @param [Hash] params ({})
775
+ def create_realtime_endpoint(params = {}, options = {})
776
+ req = build_request(:create_realtime_endpoint, params)
777
+ req.send_request(options)
778
+ end
779
+
780
+ # Assigns the DELETED status to a `BatchPrediction`, rendering it
781
+ # unusable.
782
+ #
783
+ # After using the `DeleteBatchPrediction` operation, you can use the
784
+ # GetBatchPrediction operation to verify that the status of the
785
+ # `BatchPrediction` changed to DELETED.
786
+ #
787
+ # **Caution:** The result of the `DeleteBatchPrediction` operation is
788
+ # irreversible.
789
+ # @option params [required, String] :batch_prediction_id
790
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
791
+ # @return [Types::DeleteBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
792
+ #
793
+ # * {Types::DeleteBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
794
+ #
795
+ # @example Request syntax with placeholder values
796
+ # resp = client.delete_batch_prediction({
797
+ # batch_prediction_id: "EntityId", # required
798
+ # })
799
+ #
800
+ # @example Response structure
801
+ # resp.batch_prediction_id #=> String
802
+ # @overload delete_batch_prediction(params = {})
803
+ # @param [Hash] params ({})
804
+ def delete_batch_prediction(params = {}, options = {})
805
+ req = build_request(:delete_batch_prediction, params)
806
+ req.send_request(options)
807
+ end
808
+
809
+ # Assigns the DELETED status to a `DataSource`, rendering it unusable.
810
+ #
811
+ # After using the `DeleteDataSource` operation, you can use the
812
+ # GetDataSource operation to verify that the status of the `DataSource`
813
+ # changed to DELETED.
814
+ #
815
+ # **Caution:** The results of the `DeleteDataSource` operation are
816
+ # irreversible.
817
+ # @option params [required, String] :data_source_id
818
+ # A user-supplied ID that uniquely identifies the `DataSource`.
819
+ # @return [Types::DeleteDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
820
+ #
821
+ # * {Types::DeleteDataSourceOutput#data_source_id #DataSourceId} => String
822
+ #
823
+ # @example Request syntax with placeholder values
824
+ # resp = client.delete_data_source({
825
+ # data_source_id: "EntityId", # required
826
+ # })
827
+ #
828
+ # @example Response structure
829
+ # resp.data_source_id #=> String
830
+ # @overload delete_data_source(params = {})
831
+ # @param [Hash] params ({})
832
+ def delete_data_source(params = {}, options = {})
833
+ req = build_request(:delete_data_source, params)
834
+ req.send_request(options)
835
+ end
836
+
837
+ # Assigns the `DELETED` status to an `Evaluation`, rendering it
838
+ # unusable.
839
+ #
840
+ # After invoking the `DeleteEvaluation` operation, you can use the
841
+ # `GetEvaluation` operation to verify that the status of the
842
+ # `Evaluation` changed to `DELETED`.
843
+ #
844
+ # <caution markdown="1"><title>Caution</title> The results of the `DeleteEvaluation` operation are irreversible.
845
+ #
846
+ # </caution>
847
+ # @option params [required, String] :evaluation_id
848
+ # A user-supplied ID that uniquely identifies the `Evaluation` to
849
+ # delete.
850
+ # @return [Types::DeleteEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
851
+ #
852
+ # * {Types::DeleteEvaluationOutput#evaluation_id #EvaluationId} => String
853
+ #
854
+ # @example Request syntax with placeholder values
855
+ # resp = client.delete_evaluation({
856
+ # evaluation_id: "EntityId", # required
857
+ # })
858
+ #
859
+ # @example Response structure
860
+ # resp.evaluation_id #=> String
861
+ # @overload delete_evaluation(params = {})
862
+ # @param [Hash] params ({})
863
+ def delete_evaluation(params = {}, options = {})
864
+ req = build_request(:delete_evaluation, params)
865
+ req.send_request(options)
866
+ end
867
+
868
+ # Assigns the `DELETED` status to an `MLModel`, rendering it unusable.
869
+ #
870
+ # After using the `DeleteMLModel` operation, you can use the
871
+ # `GetMLModel` operation to verify that the status of the `MLModel`
872
+ # changed to DELETED.
873
+ #
874
+ # **Caution:** The result of the `DeleteMLModel` operation is
875
+ # irreversible.
876
+ # @option params [required, String] :ml_model_id
877
+ # A user-supplied ID that uniquely identifies the `MLModel`.
878
+ # @return [Types::DeleteMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
879
+ #
880
+ # * {Types::DeleteMLModelOutput#ml_model_id #MLModelId} => String
881
+ #
882
+ # @example Request syntax with placeholder values
883
+ # resp = client.delete_ml_model({
884
+ # ml_model_id: "EntityId", # required
885
+ # })
886
+ #
887
+ # @example Response structure
888
+ # resp.ml_model_id #=> String
889
+ # @overload delete_ml_model(params = {})
890
+ # @param [Hash] params ({})
891
+ def delete_ml_model(params = {}, options = {})
892
+ req = build_request(:delete_ml_model, params)
893
+ req.send_request(options)
894
+ end
895
+
896
+ # Deletes a real time endpoint of an `MLModel`.
897
+ # @option params [required, String] :ml_model_id
898
+ # The ID assigned to the `MLModel` during creation.
899
+ # @return [Types::DeleteRealtimeEndpointOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
900
+ #
901
+ # * {Types::DeleteRealtimeEndpointOutput#ml_model_id #MLModelId} => String
902
+ # * {Types::DeleteRealtimeEndpointOutput#realtime_endpoint_info #RealtimeEndpointInfo} => Types::RealtimeEndpointInfo
903
+ #
904
+ # @example Request syntax with placeholder values
905
+ # resp = client.delete_realtime_endpoint({
906
+ # ml_model_id: "EntityId", # required
907
+ # })
908
+ #
909
+ # @example Response structure
910
+ # resp.ml_model_id #=> String
911
+ # resp.realtime_endpoint_info.peak_requests_per_second #=> Integer
912
+ # resp.realtime_endpoint_info.created_at #=> Time
913
+ # resp.realtime_endpoint_info.endpoint_url #=> String
914
+ # resp.realtime_endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
915
+ # @overload delete_realtime_endpoint(params = {})
916
+ # @param [Hash] params ({})
917
+ def delete_realtime_endpoint(params = {}, options = {})
918
+ req = build_request(:delete_realtime_endpoint, params)
919
+ req.send_request(options)
920
+ end
921
+
922
+ # Deletes the specified tags associated with an ML object. After this
923
+ # operation is complete, you can't recover deleted tags.
924
+ #
925
+ # If you specify a tag that doesn't exist, Amazon ML ignores it.
926
+ # @option params [required, Array<String>] :tag_keys
927
+ # One or more tags to delete.
928
+ # @option params [required, String] :resource_id
929
+ # The ID of the tagged ML object. For example, `exampleModelId`.
930
+ # @option params [required, String] :resource_type
931
+ # The type of the tagged ML object.
932
+ # @return [Types::DeleteTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
933
+ #
934
+ # * {Types::DeleteTagsOutput#resource_id #ResourceId} => String
935
+ # * {Types::DeleteTagsOutput#resource_type #ResourceType} => String
936
+ #
937
+ # @example Request syntax with placeholder values
938
+ # resp = client.delete_tags({
939
+ # tag_keys: ["TagKey"], # required
940
+ # resource_id: "EntityId", # required
941
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
942
+ # })
943
+ #
944
+ # @example Response structure
945
+ # resp.resource_id #=> String
946
+ # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
947
+ # @overload delete_tags(params = {})
948
+ # @param [Hash] params ({})
949
+ def delete_tags(params = {}, options = {})
950
+ req = build_request(:delete_tags, params)
951
+ req.send_request(options)
952
+ end
953
+
954
+ # Returns a list of `BatchPrediction` operations that match the search
955
+ # criteria in the request.
956
+ # @option params [String] :filter_variable
957
+ # Use one of the following variables to filter a list of
958
+ # `BatchPrediction`\:
959
+ #
960
+ # * `CreatedAt` - Sets the search criteria to the `BatchPrediction`
961
+ # creation date.
962
+ # * `Status` - Sets the search criteria to the `BatchPrediction` status.
963
+ # * `Name` - Sets the search criteria to the contents of the
964
+ # `BatchPrediction`<b> </b> `Name`.
965
+ # * `IAMUser` - Sets the search criteria to the user account that
966
+ # invoked the `BatchPrediction` creation.
967
+ # * `MLModelId` - Sets the search criteria to the `MLModel` used in the
968
+ # `BatchPrediction`.
969
+ # * `DataSourceId` - Sets the search criteria to the `DataSource` used
970
+ # in the `BatchPrediction`.
971
+ # * `DataURI` - Sets the search criteria to the data file(s) used in the
972
+ # `BatchPrediction`. The URL can identify either a file or an Amazon
973
+ # Simple Storage Solution (Amazon S3) bucket or directory.
974
+ # @option params [String] :eq
975
+ # The equal to operator. The `BatchPrediction` results will have
976
+ # `FilterVariable` values that exactly match the value specified with
977
+ # `EQ`.
978
+ # @option params [String] :gt
979
+ # The greater than operator. The `BatchPrediction` results will have
980
+ # `FilterVariable` values that are greater than the value specified with
981
+ # `GT`.
982
+ # @option params [String] :lt
983
+ # The less than operator. The `BatchPrediction` results will have
984
+ # `FilterVariable` values that are less than the value specified with
985
+ # `LT`.
986
+ # @option params [String] :ge
987
+ # The greater than or equal to operator. The `BatchPrediction` results
988
+ # will have `FilterVariable` values that are greater than or equal to
989
+ # the value specified with `GE`.
990
+ # @option params [String] :le
991
+ # The less than or equal to operator. The `BatchPrediction` results will
992
+ # have `FilterVariable` values that are less than or equal to the value
993
+ # specified with `LE`.
994
+ # @option params [String] :ne
995
+ # The not equal to operator. The `BatchPrediction` results will have
996
+ # `FilterVariable` values not equal to the value specified with `NE`.
997
+ # @option params [String] :prefix
998
+ # A string that is found at the beginning of a variable, such as `Name`
999
+ # or `Id`.
1000
+ #
1001
+ # For example, a `Batch Prediction` operation could have the `Name`
1002
+ # `2014-09-09-HolidayGiftMailer`. To search for this `BatchPrediction`,
1003
+ # select `Name` for the `FilterVariable` and any of the following
1004
+ # strings for the `Prefix`\:
1005
+ #
1006
+ # * 2014-09
1007
+ #
1008
+ # * 2014-09-09
1009
+ #
1010
+ # * 2014-09-09-Holiday
1011
+ # @option params [String] :sort_order
1012
+ # A two-value parameter that determines the sequence of the resulting
1013
+ # list of `MLModel`s.
1014
+ #
1015
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1016
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1017
+ #
1018
+ # Results are sorted by `FilterVariable`.
1019
+ # @option params [String] :next_token
1020
+ # An ID of the page in the paginated results.
1021
+ # @option params [Integer] :limit
1022
+ # The number of pages of information to include in the result. The range
1023
+ # of acceptable values is `1` through `100`. The default value is `100`.
1024
+ # @return [Types::DescribeBatchPredictionsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1025
+ #
1026
+ # * {Types::DescribeBatchPredictionsOutput#results #Results} => Array&lt;Types::BatchPrediction&gt;
1027
+ # * {Types::DescribeBatchPredictionsOutput#next_token #NextToken} => String
1028
+ #
1029
+ # @example Request syntax with placeholder values
1030
+ # resp = client.describe_batch_predictions({
1031
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1032
+ # eq: "ComparatorValue",
1033
+ # gt: "ComparatorValue",
1034
+ # lt: "ComparatorValue",
1035
+ # ge: "ComparatorValue",
1036
+ # le: "ComparatorValue",
1037
+ # ne: "ComparatorValue",
1038
+ # prefix: "ComparatorValue",
1039
+ # sort_order: "asc", # accepts asc, dsc
1040
+ # next_token: "StringType",
1041
+ # limit: 1,
1042
+ # })
1043
+ #
1044
+ # @example Response structure
1045
+ # resp.results #=> Array
1046
+ # resp.results[0].batch_prediction_id #=> String
1047
+ # resp.results[0].ml_model_id #=> String
1048
+ # resp.results[0].batch_prediction_data_source_id #=> String
1049
+ # resp.results[0].input_data_location_s3 #=> String
1050
+ # resp.results[0].created_by_iam_user #=> String
1051
+ # resp.results[0].created_at #=> Time
1052
+ # resp.results[0].last_updated_at #=> Time
1053
+ # resp.results[0].name #=> String
1054
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1055
+ # resp.results[0].output_uri #=> String
1056
+ # resp.results[0].message #=> String
1057
+ # resp.results[0].compute_time #=> Integer
1058
+ # resp.results[0].finished_at #=> Time
1059
+ # resp.results[0].started_at #=> Time
1060
+ # resp.results[0].total_record_count #=> Integer
1061
+ # resp.results[0].invalid_record_count #=> Integer
1062
+ # resp.next_token #=> String
1063
+ # @overload describe_batch_predictions(params = {})
1064
+ # @param [Hash] params ({})
1065
+ def describe_batch_predictions(params = {}, options = {})
1066
+ req = build_request(:describe_batch_predictions, params)
1067
+ req.send_request(options)
1068
+ end
1069
+
1070
+ # Returns a list of `DataSource` that match the search criteria in the
1071
+ # request.
1072
+ # @option params [String] :filter_variable
1073
+ # Use one of the following variables to filter a list of `DataSource`\:
1074
+ #
1075
+ # * `CreatedAt` - Sets the search criteria to `DataSource` creation
1076
+ # dates.
1077
+ # * `Status` - Sets the search criteria to `DataSource` statuses.
1078
+ # * `Name` - Sets the search criteria to the contents of `DataSource`
1079
+ # <b> </b> `Name`.
1080
+ # * `DataUri` - Sets the search criteria to the URI of data files used
1081
+ # to create the `DataSource`. The URI can identify either a file or an
1082
+ # Amazon Simple Storage Service (Amazon S3) bucket or directory.
1083
+ # * `IAMUser` - Sets the search criteria to the user account that
1084
+ # invoked the `DataSource` creation.
1085
+ # @option params [String] :eq
1086
+ # The equal to operator. The `DataSource` results will have
1087
+ # `FilterVariable` values that exactly match the value specified with
1088
+ # `EQ`.
1089
+ # @option params [String] :gt
1090
+ # The greater than operator. The `DataSource` results will have
1091
+ # `FilterVariable` values that are greater than the value specified with
1092
+ # `GT`.
1093
+ # @option params [String] :lt
1094
+ # The less than operator. The `DataSource` results will have
1095
+ # `FilterVariable` values that are less than the value specified with
1096
+ # `LT`.
1097
+ # @option params [String] :ge
1098
+ # The greater than or equal to operator. The `DataSource` results will
1099
+ # have `FilterVariable` values that are greater than or equal to the
1100
+ # value specified with `GE`.
1101
+ # @option params [String] :le
1102
+ # The less than or equal to operator. The `DataSource` results will have
1103
+ # `FilterVariable` values that are less than or equal to the value
1104
+ # specified with `LE`.
1105
+ # @option params [String] :ne
1106
+ # The not equal to operator. The `DataSource` results will have
1107
+ # `FilterVariable` values not equal to the value specified with `NE`.
1108
+ # @option params [String] :prefix
1109
+ # A string that is found at the beginning of a variable, such as `Name`
1110
+ # or `Id`.
1111
+ #
1112
+ # For example, a `DataSource` could have the `Name`
1113
+ # `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`,
1114
+ # select `Name` for the `FilterVariable` and any of the following
1115
+ # strings for the `Prefix`\:
1116
+ #
1117
+ # * 2014-09
1118
+ #
1119
+ # * 2014-09-09
1120
+ #
1121
+ # * 2014-09-09-Holiday
1122
+ # @option params [String] :sort_order
1123
+ # A two-value parameter that determines the sequence of the resulting
1124
+ # list of `DataSource`.
1125
+ #
1126
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1127
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1128
+ #
1129
+ # Results are sorted by `FilterVariable`.
1130
+ # @option params [String] :next_token
1131
+ # The ID of the page in the paginated results.
1132
+ # @option params [Integer] :limit
1133
+ # The maximum number of `DataSource` to include in the result.
1134
+ # @return [Types::DescribeDataSourcesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1135
+ #
1136
+ # * {Types::DescribeDataSourcesOutput#results #Results} => Array&lt;Types::DataSource&gt;
1137
+ # * {Types::DescribeDataSourcesOutput#next_token #NextToken} => String
1138
+ #
1139
+ # @example Request syntax with placeholder values
1140
+ # resp = client.describe_data_sources({
1141
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, DataLocationS3, IAMUser
1142
+ # eq: "ComparatorValue",
1143
+ # gt: "ComparatorValue",
1144
+ # lt: "ComparatorValue",
1145
+ # ge: "ComparatorValue",
1146
+ # le: "ComparatorValue",
1147
+ # ne: "ComparatorValue",
1148
+ # prefix: "ComparatorValue",
1149
+ # sort_order: "asc", # accepts asc, dsc
1150
+ # next_token: "StringType",
1151
+ # limit: 1,
1152
+ # })
1153
+ #
1154
+ # @example Response structure
1155
+ # resp.results #=> Array
1156
+ # resp.results[0].data_source_id #=> String
1157
+ # resp.results[0].data_location_s3 #=> String
1158
+ # resp.results[0].data_rearrangement #=> String
1159
+ # resp.results[0].created_by_iam_user #=> String
1160
+ # resp.results[0].created_at #=> Time
1161
+ # resp.results[0].last_updated_at #=> Time
1162
+ # resp.results[0].data_size_in_bytes #=> Integer
1163
+ # resp.results[0].number_of_files #=> Integer
1164
+ # resp.results[0].name #=> String
1165
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1166
+ # resp.results[0].message #=> String
1167
+ # resp.results[0].redshift_metadata.redshift_database.database_name #=> String
1168
+ # resp.results[0].redshift_metadata.redshift_database.cluster_identifier #=> String
1169
+ # resp.results[0].redshift_metadata.database_user_name #=> String
1170
+ # resp.results[0].redshift_metadata.select_sql_query #=> String
1171
+ # resp.results[0].rds_metadata.database.instance_identifier #=> String
1172
+ # resp.results[0].rds_metadata.database.database_name #=> String
1173
+ # resp.results[0].rds_metadata.database_user_name #=> String
1174
+ # resp.results[0].rds_metadata.select_sql_query #=> String
1175
+ # resp.results[0].rds_metadata.resource_role #=> String
1176
+ # resp.results[0].rds_metadata.service_role #=> String
1177
+ # resp.results[0].rds_metadata.data_pipeline_id #=> String
1178
+ # resp.results[0].role_arn #=> String
1179
+ # resp.results[0].compute_statistics #=> Boolean
1180
+ # resp.results[0].compute_time #=> Integer
1181
+ # resp.results[0].finished_at #=> Time
1182
+ # resp.results[0].started_at #=> Time
1183
+ # resp.next_token #=> String
1184
+ # @overload describe_data_sources(params = {})
1185
+ # @param [Hash] params ({})
1186
+ def describe_data_sources(params = {}, options = {})
1187
+ req = build_request(:describe_data_sources, params)
1188
+ req.send_request(options)
1189
+ end
1190
+
1191
+ # Returns a list of `DescribeEvaluations` that match the search criteria
1192
+ # in the request.
1193
+ # @option params [String] :filter_variable
1194
+ # Use one of the following variable to filter a list of `Evaluation`
1195
+ # objects:
1196
+ #
1197
+ # * `CreatedAt` - Sets the search criteria to the `Evaluation` creation
1198
+ # date.
1199
+ # * `Status` - Sets the search criteria to the `Evaluation` status.
1200
+ # * `Name` - Sets the search criteria to the contents of `Evaluation`
1201
+ # <b> </b> `Name`.
1202
+ # * `IAMUser` - Sets the search criteria to the user account that
1203
+ # invoked an `Evaluation`.
1204
+ # * `MLModelId` - Sets the search criteria to the `MLModel` that was
1205
+ # evaluated.
1206
+ # * `DataSourceId` - Sets the search criteria to the `DataSource` used
1207
+ # in `Evaluation`.
1208
+ # * `DataUri` - Sets the search criteria to the data file(s) used in
1209
+ # `Evaluation`. The URL can identify either a file or an Amazon Simple
1210
+ # Storage Solution (Amazon S3) bucket or directory.
1211
+ # @option params [String] :eq
1212
+ # The equal to operator. The `Evaluation` results will have
1213
+ # `FilterVariable` values that exactly match the value specified with
1214
+ # `EQ`.
1215
+ # @option params [String] :gt
1216
+ # The greater than operator. The `Evaluation` results will have
1217
+ # `FilterVariable` values that are greater than the value specified with
1218
+ # `GT`.
1219
+ # @option params [String] :lt
1220
+ # The less than operator. The `Evaluation` results will have
1221
+ # `FilterVariable` values that are less than the value specified with
1222
+ # `LT`.
1223
+ # @option params [String] :ge
1224
+ # The greater than or equal to operator. The `Evaluation` results will
1225
+ # have `FilterVariable` values that are greater than or equal to the
1226
+ # value specified with `GE`.
1227
+ # @option params [String] :le
1228
+ # The less than or equal to operator. The `Evaluation` results will have
1229
+ # `FilterVariable` values that are less than or equal to the value
1230
+ # specified with `LE`.
1231
+ # @option params [String] :ne
1232
+ # The not equal to operator. The `Evaluation` results will have
1233
+ # `FilterVariable` values not equal to the value specified with `NE`.
1234
+ # @option params [String] :prefix
1235
+ # A string that is found at the beginning of a variable, such as `Name`
1236
+ # or `Id`.
1237
+ #
1238
+ # For example, an `Evaluation` could have the `Name`
1239
+ # `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`,
1240
+ # select `Name` for the `FilterVariable` and any of the following
1241
+ # strings for the `Prefix`\:
1242
+ #
1243
+ # * 2014-09
1244
+ #
1245
+ # * 2014-09-09
1246
+ #
1247
+ # * 2014-09-09-Holiday
1248
+ # @option params [String] :sort_order
1249
+ # A two-value parameter that determines the sequence of the resulting
1250
+ # list of `Evaluation`.
1251
+ #
1252
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1253
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1254
+ #
1255
+ # Results are sorted by `FilterVariable`.
1256
+ # @option params [String] :next_token
1257
+ # The ID of the page in the paginated results.
1258
+ # @option params [Integer] :limit
1259
+ # The maximum number of `Evaluation` to include in the result.
1260
+ # @return [Types::DescribeEvaluationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1261
+ #
1262
+ # * {Types::DescribeEvaluationsOutput#results #Results} => Array&lt;Types::Evaluation&gt;
1263
+ # * {Types::DescribeEvaluationsOutput#next_token #NextToken} => String
1264
+ #
1265
+ # @example Request syntax with placeholder values
1266
+ # resp = client.describe_evaluations({
1267
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1268
+ # eq: "ComparatorValue",
1269
+ # gt: "ComparatorValue",
1270
+ # lt: "ComparatorValue",
1271
+ # ge: "ComparatorValue",
1272
+ # le: "ComparatorValue",
1273
+ # ne: "ComparatorValue",
1274
+ # prefix: "ComparatorValue",
1275
+ # sort_order: "asc", # accepts asc, dsc
1276
+ # next_token: "StringType",
1277
+ # limit: 1,
1278
+ # })
1279
+ #
1280
+ # @example Response structure
1281
+ # resp.results #=> Array
1282
+ # resp.results[0].evaluation_id #=> String
1283
+ # resp.results[0].ml_model_id #=> String
1284
+ # resp.results[0].evaluation_data_source_id #=> String
1285
+ # resp.results[0].input_data_location_s3 #=> String
1286
+ # resp.results[0].created_by_iam_user #=> String
1287
+ # resp.results[0].created_at #=> Time
1288
+ # resp.results[0].last_updated_at #=> Time
1289
+ # resp.results[0].name #=> String
1290
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1291
+ # resp.results[0].performance_metrics.properties #=> Hash
1292
+ # resp.results[0].performance_metrics.properties["PerformanceMetricsPropertyKey"] #=> String
1293
+ # resp.results[0].message #=> String
1294
+ # resp.results[0].compute_time #=> Integer
1295
+ # resp.results[0].finished_at #=> Time
1296
+ # resp.results[0].started_at #=> Time
1297
+ # resp.next_token #=> String
1298
+ # @overload describe_evaluations(params = {})
1299
+ # @param [Hash] params ({})
1300
+ def describe_evaluations(params = {}, options = {})
1301
+ req = build_request(:describe_evaluations, params)
1302
+ req.send_request(options)
1303
+ end
1304
+
1305
+ # Returns a list of `MLModel` that match the search criteria in the
1306
+ # request.
1307
+ # @option params [String] :filter_variable
1308
+ # Use one of the following variables to filter a list of `MLModel`\:
1309
+ #
1310
+ # * `CreatedAt` - Sets the search criteria to `MLModel` creation date.
1311
+ # * `Status` - Sets the search criteria to `MLModel` status.
1312
+ # * `Name` - Sets the search criteria to the contents of `MLModel`<b>
1313
+ # </b> `Name`.
1314
+ # * `IAMUser` - Sets the search criteria to the user account that
1315
+ # invoked the `MLModel` creation.
1316
+ # * `TrainingDataSourceId` - Sets the search criteria to the
1317
+ # `DataSource` used to train one or more `MLModel`.
1318
+ # * `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel`
1319
+ # real-time endpoint status.
1320
+ # * `MLModelType` - Sets the search criteria to `MLModel` type: binary,
1321
+ # regression, or multi-class.
1322
+ # * `Algorithm` - Sets the search criteria to the algorithm that the
1323
+ # `MLModel` uses.
1324
+ # * `TrainingDataURI` - Sets the search criteria to the data file(s)
1325
+ # used in training a `MLModel`. The URL can identify either a file or
1326
+ # an Amazon Simple Storage Service (Amazon S3) bucket or directory.
1327
+ # @option params [String] :eq
1328
+ # The equal to operator. The `MLModel` results will have
1329
+ # `FilterVariable` values that exactly match the value specified with
1330
+ # `EQ`.
1331
+ # @option params [String] :gt
1332
+ # The greater than operator. The `MLModel` results will have
1333
+ # `FilterVariable` values that are greater than the value specified with
1334
+ # `GT`.
1335
+ # @option params [String] :lt
1336
+ # The less than operator. The `MLModel` results will have
1337
+ # `FilterVariable` values that are less than the value specified with
1338
+ # `LT`.
1339
+ # @option params [String] :ge
1340
+ # The greater than or equal to operator. The `MLModel` results will have
1341
+ # `FilterVariable` values that are greater than or equal to the value
1342
+ # specified with `GE`.
1343
+ # @option params [String] :le
1344
+ # The less than or equal to operator. The `MLModel` results will have
1345
+ # `FilterVariable` values that are less than or equal to the value
1346
+ # specified with `LE`.
1347
+ # @option params [String] :ne
1348
+ # The not equal to operator. The `MLModel` results will have
1349
+ # `FilterVariable` values not equal to the value specified with `NE`.
1350
+ # @option params [String] :prefix
1351
+ # A string that is found at the beginning of a variable, such as `Name`
1352
+ # or `Id`.
1353
+ #
1354
+ # For example, an `MLModel` could have the `Name`
1355
+ # `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, select
1356
+ # `Name` for the `FilterVariable` and any of the following strings for
1357
+ # the `Prefix`\:
1358
+ #
1359
+ # * 2014-09
1360
+ #
1361
+ # * 2014-09-09
1362
+ #
1363
+ # * 2014-09-09-Holiday
1364
+ # @option params [String] :sort_order
1365
+ # A two-value parameter that determines the sequence of the resulting
1366
+ # list of `MLModel`.
1367
+ #
1368
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1369
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1370
+ #
1371
+ # Results are sorted by `FilterVariable`.
1372
+ # @option params [String] :next_token
1373
+ # The ID of the page in the paginated results.
1374
+ # @option params [Integer] :limit
1375
+ # The number of pages of information to include in the result. The range
1376
+ # of acceptable values is `1` through `100`. The default value is `100`.
1377
+ # @return [Types::DescribeMLModelsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1378
+ #
1379
+ # * {Types::DescribeMLModelsOutput#results #Results} => Array&lt;Types::MLModel&gt;
1380
+ # * {Types::DescribeMLModelsOutput#next_token #NextToken} => String
1381
+ #
1382
+ # @example Request syntax with placeholder values
1383
+ # resp = client.describe_ml_models({
1384
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, TrainingDataSourceId, RealtimeEndpointStatus, MLModelType, Algorithm, TrainingDataURI
1385
+ # eq: "ComparatorValue",
1386
+ # gt: "ComparatorValue",
1387
+ # lt: "ComparatorValue",
1388
+ # ge: "ComparatorValue",
1389
+ # le: "ComparatorValue",
1390
+ # ne: "ComparatorValue",
1391
+ # prefix: "ComparatorValue",
1392
+ # sort_order: "asc", # accepts asc, dsc
1393
+ # next_token: "StringType",
1394
+ # limit: 1,
1395
+ # })
1396
+ #
1397
+ # @example Response structure
1398
+ # resp.results #=> Array
1399
+ # resp.results[0].ml_model_id #=> String
1400
+ # resp.results[0].training_data_source_id #=> String
1401
+ # resp.results[0].created_by_iam_user #=> String
1402
+ # resp.results[0].created_at #=> Time
1403
+ # resp.results[0].last_updated_at #=> Time
1404
+ # resp.results[0].name #=> String
1405
+ # resp.results[0].status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1406
+ # resp.results[0].size_in_bytes #=> Integer
1407
+ # resp.results[0].endpoint_info.peak_requests_per_second #=> Integer
1408
+ # resp.results[0].endpoint_info.created_at #=> Time
1409
+ # resp.results[0].endpoint_info.endpoint_url #=> String
1410
+ # resp.results[0].endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
1411
+ # resp.results[0].training_parameters #=> Hash
1412
+ # resp.results[0].training_parameters["StringType"] #=> String
1413
+ # resp.results[0].input_data_location_s3 #=> String
1414
+ # resp.results[0].algorithm #=> String, one of "sgd"
1415
+ # resp.results[0].ml_model_type #=> String, one of "REGRESSION", "BINARY", "MULTICLASS"
1416
+ # resp.results[0].score_threshold #=> Float
1417
+ # resp.results[0].score_threshold_last_updated_at #=> Time
1418
+ # resp.results[0].message #=> String
1419
+ # resp.results[0].compute_time #=> Integer
1420
+ # resp.results[0].finished_at #=> Time
1421
+ # resp.results[0].started_at #=> Time
1422
+ # resp.next_token #=> String
1423
+ # @overload describe_ml_models(params = {})
1424
+ # @param [Hash] params ({})
1425
+ def describe_ml_models(params = {}, options = {})
1426
+ req = build_request(:describe_ml_models, params)
1427
+ req.send_request(options)
1428
+ end
1429
+
1430
+ # Describes one or more of the tags for your Amazon ML object.
1431
+ # @option params [required, String] :resource_id
1432
+ # The ID of the ML object. For example, `exampleModelId`.
1433
+ # @option params [required, String] :resource_type
1434
+ # The type of the ML object.
1435
+ # @return [Types::DescribeTagsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1436
+ #
1437
+ # * {Types::DescribeTagsOutput#resource_id #ResourceId} => String
1438
+ # * {Types::DescribeTagsOutput#resource_type #ResourceType} => String
1439
+ # * {Types::DescribeTagsOutput#tags #Tags} => Array&lt;Types::Tag&gt;
1440
+ #
1441
+ # @example Request syntax with placeholder values
1442
+ # resp = client.describe_tags({
1443
+ # resource_id: "EntityId", # required
1444
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
1445
+ # })
1446
+ #
1447
+ # @example Response structure
1448
+ # resp.resource_id #=> String
1449
+ # resp.resource_type #=> String, one of "BatchPrediction", "DataSource", "Evaluation", "MLModel"
1450
+ # resp.tags #=> Array
1451
+ # resp.tags[0].key #=> String
1452
+ # resp.tags[0].value #=> String
1453
+ # @overload describe_tags(params = {})
1454
+ # @param [Hash] params ({})
1455
+ def describe_tags(params = {}, options = {})
1456
+ req = build_request(:describe_tags, params)
1457
+ req.send_request(options)
1458
+ end
1459
+
1460
+ # Returns a `BatchPrediction` that includes detailed metadata, status,
1461
+ # and data file information for a `Batch Prediction` request.
1462
+ # @option params [required, String] :batch_prediction_id
1463
+ # An ID assigned to the `BatchPrediction` at creation.
1464
+ # @return [Types::GetBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1465
+ #
1466
+ # * {Types::GetBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
1467
+ # * {Types::GetBatchPredictionOutput#ml_model_id #MLModelId} => String
1468
+ # * {Types::GetBatchPredictionOutput#batch_prediction_data_source_id #BatchPredictionDataSourceId} => String
1469
+ # * {Types::GetBatchPredictionOutput#input_data_location_s3 #InputDataLocationS3} => String
1470
+ # * {Types::GetBatchPredictionOutput#created_by_iam_user #CreatedByIamUser} => String
1471
+ # * {Types::GetBatchPredictionOutput#created_at #CreatedAt} => Time
1472
+ # * {Types::GetBatchPredictionOutput#last_updated_at #LastUpdatedAt} => Time
1473
+ # * {Types::GetBatchPredictionOutput#name #Name} => String
1474
+ # * {Types::GetBatchPredictionOutput#status #Status} => String
1475
+ # * {Types::GetBatchPredictionOutput#output_uri #OutputUri} => String
1476
+ # * {Types::GetBatchPredictionOutput#log_uri #LogUri} => String
1477
+ # * {Types::GetBatchPredictionOutput#message #Message} => String
1478
+ # * {Types::GetBatchPredictionOutput#compute_time #ComputeTime} => Integer
1479
+ # * {Types::GetBatchPredictionOutput#finished_at #FinishedAt} => Time
1480
+ # * {Types::GetBatchPredictionOutput#started_at #StartedAt} => Time
1481
+ # * {Types::GetBatchPredictionOutput#total_record_count #TotalRecordCount} => Integer
1482
+ # * {Types::GetBatchPredictionOutput#invalid_record_count #InvalidRecordCount} => Integer
1483
+ #
1484
+ # @example Request syntax with placeholder values
1485
+ # resp = client.get_batch_prediction({
1486
+ # batch_prediction_id: "EntityId", # required
1487
+ # })
1488
+ #
1489
+ # @example Response structure
1490
+ # resp.batch_prediction_id #=> String
1491
+ # resp.ml_model_id #=> String
1492
+ # resp.batch_prediction_data_source_id #=> String
1493
+ # resp.input_data_location_s3 #=> String
1494
+ # resp.created_by_iam_user #=> String
1495
+ # resp.created_at #=> Time
1496
+ # resp.last_updated_at #=> Time
1497
+ # resp.name #=> String
1498
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1499
+ # resp.output_uri #=> String
1500
+ # resp.log_uri #=> String
1501
+ # resp.message #=> String
1502
+ # resp.compute_time #=> Integer
1503
+ # resp.finished_at #=> Time
1504
+ # resp.started_at #=> Time
1505
+ # resp.total_record_count #=> Integer
1506
+ # resp.invalid_record_count #=> Integer
1507
+ # @overload get_batch_prediction(params = {})
1508
+ # @param [Hash] params ({})
1509
+ def get_batch_prediction(params = {}, options = {})
1510
+ req = build_request(:get_batch_prediction, params)
1511
+ req.send_request(options)
1512
+ end
1513
+
1514
+ # Returns a `DataSource` that includes metadata and data file
1515
+ # information, as well as the current status of the `DataSource`.
1516
+ #
1517
+ # `GetDataSource` provides results in normal or verbose format. The
1518
+ # verbose format adds the schema description and the list of files
1519
+ # pointed to by the DataSource to the normal format.
1520
+ # @option params [required, String] :data_source_id
1521
+ # The ID assigned to the `DataSource` at creation.
1522
+ # @option params [Boolean] :verbose
1523
+ # Specifies whether the `GetDataSource` operation should return
1524
+ # `DataSourceSchema`.
1525
+ #
1526
+ # If true, `DataSourceSchema` is returned.
1527
+ #
1528
+ # If false, `DataSourceSchema` is not returned.
1529
+ # @return [Types::GetDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1530
+ #
1531
+ # * {Types::GetDataSourceOutput#data_source_id #DataSourceId} => String
1532
+ # * {Types::GetDataSourceOutput#data_location_s3 #DataLocationS3} => String
1533
+ # * {Types::GetDataSourceOutput#data_rearrangement #DataRearrangement} => String
1534
+ # * {Types::GetDataSourceOutput#created_by_iam_user #CreatedByIamUser} => String
1535
+ # * {Types::GetDataSourceOutput#created_at #CreatedAt} => Time
1536
+ # * {Types::GetDataSourceOutput#last_updated_at #LastUpdatedAt} => Time
1537
+ # * {Types::GetDataSourceOutput#data_size_in_bytes #DataSizeInBytes} => Integer
1538
+ # * {Types::GetDataSourceOutput#number_of_files #NumberOfFiles} => Integer
1539
+ # * {Types::GetDataSourceOutput#name #Name} => String
1540
+ # * {Types::GetDataSourceOutput#status #Status} => String
1541
+ # * {Types::GetDataSourceOutput#log_uri #LogUri} => String
1542
+ # * {Types::GetDataSourceOutput#message #Message} => String
1543
+ # * {Types::GetDataSourceOutput#redshift_metadata #RedshiftMetadata} => Types::RedshiftMetadata
1544
+ # * {Types::GetDataSourceOutput#rds_metadata #RDSMetadata} => Types::RDSMetadata
1545
+ # * {Types::GetDataSourceOutput#role_arn #RoleARN} => String
1546
+ # * {Types::GetDataSourceOutput#compute_statistics #ComputeStatistics} => Boolean
1547
+ # * {Types::GetDataSourceOutput#compute_time #ComputeTime} => Integer
1548
+ # * {Types::GetDataSourceOutput#finished_at #FinishedAt} => Time
1549
+ # * {Types::GetDataSourceOutput#started_at #StartedAt} => Time
1550
+ # * {Types::GetDataSourceOutput#data_source_schema #DataSourceSchema} => String
1551
+ #
1552
+ # @example Request syntax with placeholder values
1553
+ # resp = client.get_data_source({
1554
+ # data_source_id: "EntityId", # required
1555
+ # verbose: false,
1556
+ # })
1557
+ #
1558
+ # @example Response structure
1559
+ # resp.data_source_id #=> String
1560
+ # resp.data_location_s3 #=> String
1561
+ # resp.data_rearrangement #=> String
1562
+ # resp.created_by_iam_user #=> String
1563
+ # resp.created_at #=> Time
1564
+ # resp.last_updated_at #=> Time
1565
+ # resp.data_size_in_bytes #=> Integer
1566
+ # resp.number_of_files #=> Integer
1567
+ # resp.name #=> String
1568
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1569
+ # resp.log_uri #=> String
1570
+ # resp.message #=> String
1571
+ # resp.redshift_metadata.redshift_database.database_name #=> String
1572
+ # resp.redshift_metadata.redshift_database.cluster_identifier #=> String
1573
+ # resp.redshift_metadata.database_user_name #=> String
1574
+ # resp.redshift_metadata.select_sql_query #=> String
1575
+ # resp.rds_metadata.database.instance_identifier #=> String
1576
+ # resp.rds_metadata.database.database_name #=> String
1577
+ # resp.rds_metadata.database_user_name #=> String
1578
+ # resp.rds_metadata.select_sql_query #=> String
1579
+ # resp.rds_metadata.resource_role #=> String
1580
+ # resp.rds_metadata.service_role #=> String
1581
+ # resp.rds_metadata.data_pipeline_id #=> String
1582
+ # resp.role_arn #=> String
1583
+ # resp.compute_statistics #=> Boolean
1584
+ # resp.compute_time #=> Integer
1585
+ # resp.finished_at #=> Time
1586
+ # resp.started_at #=> Time
1587
+ # resp.data_source_schema #=> String
1588
+ # @overload get_data_source(params = {})
1589
+ # @param [Hash] params ({})
1590
+ def get_data_source(params = {}, options = {})
1591
+ req = build_request(:get_data_source, params)
1592
+ req.send_request(options)
1593
+ end
1594
+
1595
+ # Returns an `Evaluation` that includes metadata as well as the current
1596
+ # status of the `Evaluation`.
1597
+ # @option params [required, String] :evaluation_id
1598
+ # The ID of the `Evaluation` to retrieve. The evaluation of each
1599
+ # `MLModel` is recorded and cataloged. The ID provides the means to
1600
+ # access the information.
1601
+ # @return [Types::GetEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1602
+ #
1603
+ # * {Types::GetEvaluationOutput#evaluation_id #EvaluationId} => String
1604
+ # * {Types::GetEvaluationOutput#ml_model_id #MLModelId} => String
1605
+ # * {Types::GetEvaluationOutput#evaluation_data_source_id #EvaluationDataSourceId} => String
1606
+ # * {Types::GetEvaluationOutput#input_data_location_s3 #InputDataLocationS3} => String
1607
+ # * {Types::GetEvaluationOutput#created_by_iam_user #CreatedByIamUser} => String
1608
+ # * {Types::GetEvaluationOutput#created_at #CreatedAt} => Time
1609
+ # * {Types::GetEvaluationOutput#last_updated_at #LastUpdatedAt} => Time
1610
+ # * {Types::GetEvaluationOutput#name #Name} => String
1611
+ # * {Types::GetEvaluationOutput#status #Status} => String
1612
+ # * {Types::GetEvaluationOutput#performance_metrics #PerformanceMetrics} => Types::PerformanceMetrics
1613
+ # * {Types::GetEvaluationOutput#log_uri #LogUri} => String
1614
+ # * {Types::GetEvaluationOutput#message #Message} => String
1615
+ # * {Types::GetEvaluationOutput#compute_time #ComputeTime} => Integer
1616
+ # * {Types::GetEvaluationOutput#finished_at #FinishedAt} => Time
1617
+ # * {Types::GetEvaluationOutput#started_at #StartedAt} => Time
1618
+ #
1619
+ # @example Request syntax with placeholder values
1620
+ # resp = client.get_evaluation({
1621
+ # evaluation_id: "EntityId", # required
1622
+ # })
1623
+ #
1624
+ # @example Response structure
1625
+ # resp.evaluation_id #=> String
1626
+ # resp.ml_model_id #=> String
1627
+ # resp.evaluation_data_source_id #=> String
1628
+ # resp.input_data_location_s3 #=> String
1629
+ # resp.created_by_iam_user #=> String
1630
+ # resp.created_at #=> Time
1631
+ # resp.last_updated_at #=> Time
1632
+ # resp.name #=> String
1633
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1634
+ # resp.performance_metrics.properties #=> Hash
1635
+ # resp.performance_metrics.properties["PerformanceMetricsPropertyKey"] #=> String
1636
+ # resp.log_uri #=> String
1637
+ # resp.message #=> String
1638
+ # resp.compute_time #=> Integer
1639
+ # resp.finished_at #=> Time
1640
+ # resp.started_at #=> Time
1641
+ # @overload get_evaluation(params = {})
1642
+ # @param [Hash] params ({})
1643
+ def get_evaluation(params = {}, options = {})
1644
+ req = build_request(:get_evaluation, params)
1645
+ req.send_request(options)
1646
+ end
1647
+
1648
+ # Returns an `MLModel` that includes detailed metadata, data source
1649
+ # information, and the current status of the `MLModel`.
1650
+ #
1651
+ # `GetMLModel` provides results in normal or verbose format.
1652
+ # @option params [required, String] :ml_model_id
1653
+ # The ID assigned to the `MLModel` at creation.
1654
+ # @option params [Boolean] :verbose
1655
+ # Specifies whether the `GetMLModel` operation should return `Recipe`.
1656
+ #
1657
+ # If true, `Recipe` is returned.
1658
+ #
1659
+ # If false, `Recipe` is not returned.
1660
+ # @return [Types::GetMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1661
+ #
1662
+ # * {Types::GetMLModelOutput#ml_model_id #MLModelId} => String
1663
+ # * {Types::GetMLModelOutput#training_data_source_id #TrainingDataSourceId} => String
1664
+ # * {Types::GetMLModelOutput#created_by_iam_user #CreatedByIamUser} => String
1665
+ # * {Types::GetMLModelOutput#created_at #CreatedAt} => Time
1666
+ # * {Types::GetMLModelOutput#last_updated_at #LastUpdatedAt} => Time
1667
+ # * {Types::GetMLModelOutput#name #Name} => String
1668
+ # * {Types::GetMLModelOutput#status #Status} => String
1669
+ # * {Types::GetMLModelOutput#size_in_bytes #SizeInBytes} => Integer
1670
+ # * {Types::GetMLModelOutput#endpoint_info #EndpointInfo} => Types::RealtimeEndpointInfo
1671
+ # * {Types::GetMLModelOutput#training_parameters #TrainingParameters} => Hash&lt;String,String&gt;
1672
+ # * {Types::GetMLModelOutput#input_data_location_s3 #InputDataLocationS3} => String
1673
+ # * {Types::GetMLModelOutput#ml_model_type #MLModelType} => String
1674
+ # * {Types::GetMLModelOutput#score_threshold #ScoreThreshold} => Float
1675
+ # * {Types::GetMLModelOutput#score_threshold_last_updated_at #ScoreThresholdLastUpdatedAt} => Time
1676
+ # * {Types::GetMLModelOutput#log_uri #LogUri} => String
1677
+ # * {Types::GetMLModelOutput#message #Message} => String
1678
+ # * {Types::GetMLModelOutput#compute_time #ComputeTime} => Integer
1679
+ # * {Types::GetMLModelOutput#finished_at #FinishedAt} => Time
1680
+ # * {Types::GetMLModelOutput#started_at #StartedAt} => Time
1681
+ # * {Types::GetMLModelOutput#recipe #Recipe} => String
1682
+ # * {Types::GetMLModelOutput#schema #Schema} => String
1683
+ #
1684
+ # @example Request syntax with placeholder values
1685
+ # resp = client.get_ml_model({
1686
+ # ml_model_id: "EntityId", # required
1687
+ # verbose: false,
1688
+ # })
1689
+ #
1690
+ # @example Response structure
1691
+ # resp.ml_model_id #=> String
1692
+ # resp.training_data_source_id #=> String
1693
+ # resp.created_by_iam_user #=> String
1694
+ # resp.created_at #=> Time
1695
+ # resp.last_updated_at #=> Time
1696
+ # resp.name #=> String
1697
+ # resp.status #=> String, one of "PENDING", "INPROGRESS", "FAILED", "COMPLETED", "DELETED"
1698
+ # resp.size_in_bytes #=> Integer
1699
+ # resp.endpoint_info.peak_requests_per_second #=> Integer
1700
+ # resp.endpoint_info.created_at #=> Time
1701
+ # resp.endpoint_info.endpoint_url #=> String
1702
+ # resp.endpoint_info.endpoint_status #=> String, one of "NONE", "READY", "UPDATING", "FAILED"
1703
+ # resp.training_parameters #=> Hash
1704
+ # resp.training_parameters["StringType"] #=> String
1705
+ # resp.input_data_location_s3 #=> String
1706
+ # resp.ml_model_type #=> String, one of "REGRESSION", "BINARY", "MULTICLASS"
1707
+ # resp.score_threshold #=> Float
1708
+ # resp.score_threshold_last_updated_at #=> Time
1709
+ # resp.log_uri #=> String
1710
+ # resp.message #=> String
1711
+ # resp.compute_time #=> Integer
1712
+ # resp.finished_at #=> Time
1713
+ # resp.started_at #=> Time
1714
+ # resp.recipe #=> String
1715
+ # resp.schema #=> String
1716
+ # @overload get_ml_model(params = {})
1717
+ # @param [Hash] params ({})
1718
+ def get_ml_model(params = {}, options = {})
1719
+ req = build_request(:get_ml_model, params)
1720
+ req.send_request(options)
1721
+ end
1722
+
1723
+ # Generates a prediction for the observation using the specified `ML
1724
+ # Model`.
1725
+ #
1726
+ # <note markdown="1"><title>Note</title> Not all response parameters will be populated. Whether a response
1727
+ # parameter is populated depends on the type of model requested.
1728
+ #
1729
+ # </note>
1730
+ # @option params [required, String] :ml_model_id
1731
+ # A unique identifier of the `MLModel`.
1732
+ # @option params [required, Hash<String,String>] :record
1733
+ # A map of variable name-value pairs that represent an observation.
1734
+ # @option params [required, String] :predict_endpoint
1735
+ # @return [Types::PredictOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1736
+ #
1737
+ # * {Types::PredictOutput#prediction #Prediction} => Types::Prediction
1738
+ #
1739
+ # @example Request syntax with placeholder values
1740
+ # resp = client.predict({
1741
+ # ml_model_id: "EntityId", # required
1742
+ # record: { # required
1743
+ # "VariableName" => "VariableValue",
1744
+ # },
1745
+ # predict_endpoint: "VipURL", # required
1746
+ # })
1747
+ #
1748
+ # @example Response structure
1749
+ # resp.prediction.predicted_label #=> String
1750
+ # resp.prediction.predicted_value #=> Float
1751
+ # resp.prediction.predicted_scores #=> Hash
1752
+ # resp.prediction.predicted_scores["Label"] #=> Float
1753
+ # resp.prediction.details #=> Hash
1754
+ # resp.prediction.details["DetailsAttributes"] #=> String
1755
+ # @overload predict(params = {})
1756
+ # @param [Hash] params ({})
1757
+ def predict(params = {}, options = {})
1758
+ req = build_request(:predict, params)
1759
+ req.send_request(options)
1760
+ end
1761
+
1762
+ # Updates the `BatchPredictionName` of a `BatchPrediction`.
1763
+ #
1764
+ # You can use the `GetBatchPrediction` operation to view the contents of
1765
+ # the updated data element.
1766
+ # @option params [required, String] :batch_prediction_id
1767
+ # The ID assigned to the `BatchPrediction` during creation.
1768
+ # @option params [required, String] :batch_prediction_name
1769
+ # A new user-supplied name or description of the `BatchPrediction`.
1770
+ # @return [Types::UpdateBatchPredictionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1771
+ #
1772
+ # * {Types::UpdateBatchPredictionOutput#batch_prediction_id #BatchPredictionId} => String
1773
+ #
1774
+ # @example Request syntax with placeholder values
1775
+ # resp = client.update_batch_prediction({
1776
+ # batch_prediction_id: "EntityId", # required
1777
+ # batch_prediction_name: "EntityName", # required
1778
+ # })
1779
+ #
1780
+ # @example Response structure
1781
+ # resp.batch_prediction_id #=> String
1782
+ # @overload update_batch_prediction(params = {})
1783
+ # @param [Hash] params ({})
1784
+ def update_batch_prediction(params = {}, options = {})
1785
+ req = build_request(:update_batch_prediction, params)
1786
+ req.send_request(options)
1787
+ end
1788
+
1789
+ # Updates the `DataSourceName` of a `DataSource`.
1790
+ #
1791
+ # You can use the `GetDataSource` operation to view the contents of the
1792
+ # updated data element.
1793
+ # @option params [required, String] :data_source_id
1794
+ # The ID assigned to the `DataSource` during creation.
1795
+ # @option params [required, String] :data_source_name
1796
+ # A new user-supplied name or description of the `DataSource` that will
1797
+ # replace the current description.
1798
+ # @return [Types::UpdateDataSourceOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1799
+ #
1800
+ # * {Types::UpdateDataSourceOutput#data_source_id #DataSourceId} => String
1801
+ #
1802
+ # @example Request syntax with placeholder values
1803
+ # resp = client.update_data_source({
1804
+ # data_source_id: "EntityId", # required
1805
+ # data_source_name: "EntityName", # required
1806
+ # })
1807
+ #
1808
+ # @example Response structure
1809
+ # resp.data_source_id #=> String
1810
+ # @overload update_data_source(params = {})
1811
+ # @param [Hash] params ({})
1812
+ def update_data_source(params = {}, options = {})
1813
+ req = build_request(:update_data_source, params)
1814
+ req.send_request(options)
1815
+ end
1816
+
1817
+ # Updates the `EvaluationName` of an `Evaluation`.
1818
+ #
1819
+ # You can use the `GetEvaluation` operation to view the contents of the
1820
+ # updated data element.
1821
+ # @option params [required, String] :evaluation_id
1822
+ # The ID assigned to the `Evaluation` during creation.
1823
+ # @option params [required, String] :evaluation_name
1824
+ # A new user-supplied name or description of the `Evaluation` that will
1825
+ # replace the current content.
1826
+ # @return [Types::UpdateEvaluationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1827
+ #
1828
+ # * {Types::UpdateEvaluationOutput#evaluation_id #EvaluationId} => String
1829
+ #
1830
+ # @example Request syntax with placeholder values
1831
+ # resp = client.update_evaluation({
1832
+ # evaluation_id: "EntityId", # required
1833
+ # evaluation_name: "EntityName", # required
1834
+ # })
1835
+ #
1836
+ # @example Response structure
1837
+ # resp.evaluation_id #=> String
1838
+ # @overload update_evaluation(params = {})
1839
+ # @param [Hash] params ({})
1840
+ def update_evaluation(params = {}, options = {})
1841
+ req = build_request(:update_evaluation, params)
1842
+ req.send_request(options)
1843
+ end
1844
+
1845
+ # Updates the `MLModelName` and the `ScoreThreshold` of an `MLModel`.
1846
+ #
1847
+ # You can use the `GetMLModel` operation to view the contents of the
1848
+ # updated data element.
1849
+ # @option params [required, String] :ml_model_id
1850
+ # The ID assigned to the `MLModel` during creation.
1851
+ # @option params [String] :ml_model_name
1852
+ # A user-supplied name or description of the `MLModel`.
1853
+ # @option params [Float] :score_threshold
1854
+ # The `ScoreThreshold` used in binary classification `MLModel` that
1855
+ # marks the boundary between a positive prediction and a negative
1856
+ # prediction.
1857
+ #
1858
+ # Output values greater than or equal to the `ScoreThreshold` receive a
1859
+ # positive result from the `MLModel`, such as `true`. Output values less
1860
+ # than the `ScoreThreshold` receive a negative response from the
1861
+ # `MLModel`, such as `false`.
1862
+ # @return [Types::UpdateMLModelOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1863
+ #
1864
+ # * {Types::UpdateMLModelOutput#ml_model_id #MLModelId} => String
1865
+ #
1866
+ # @example Request syntax with placeholder values
1867
+ # resp = client.update_ml_model({
1868
+ # ml_model_id: "EntityId", # required
1869
+ # ml_model_name: "EntityName",
1870
+ # score_threshold: 1.0,
1871
+ # })
1872
+ #
1873
+ # @example Response structure
1874
+ # resp.ml_model_id #=> String
1875
+ # @overload update_ml_model(params = {})
1876
+ # @param [Hash] params ({})
1877
+ def update_ml_model(params = {}, options = {})
1878
+ req = build_request(:update_ml_model, params)
1879
+ req.send_request(options)
1880
+ end
1881
+
1882
+ # @!endgroup
1883
+
1884
+ # @param params ({})
1885
+ # @api private
1886
+ def build_request(operation_name, params = {})
1887
+ handlers = @handlers.for(operation_name)
1888
+ context = Seahorse::Client::RequestContext.new(
1889
+ operation_name: operation_name,
1890
+ operation: config.api.operation(operation_name),
1891
+ client: self,
1892
+ params: params,
1893
+ config: config)
1894
+ context[:gem_name] = 'aws-sdk-machinelearning'
1895
+ context[:gem_version] = '1.0.0.rc1'
1896
+ Seahorse::Client::Request.new(handlers, context)
1897
+ end
1898
+
1899
+ # Polls an API operation until a resource enters a desired state.
1900
+ #
1901
+ # ## Basic Usage
1902
+ #
1903
+ # A waiter will call an API operation until:
1904
+ #
1905
+ # * It is successful
1906
+ # * It enters a terminal state
1907
+ # * It makes the maximum number of attempts
1908
+ #
1909
+ # In between attempts, the waiter will sleep.
1910
+ #
1911
+ # # polls in a loop, sleeping between attempts
1912
+ # client.waiter_until(waiter_name, params)
1913
+ #
1914
+ # ## Configuration
1915
+ #
1916
+ # You can configure the maximum number of polling attempts, and the
1917
+ # delay (in seconds) between each polling attempt. You can pass
1918
+ # configuration as the final arguments hash.
1919
+ #
1920
+ # # poll for ~25 seconds
1921
+ # client.wait_until(waiter_name, params, {
1922
+ # max_attempts: 5,
1923
+ # delay: 5,
1924
+ # })
1925
+ #
1926
+ # ## Callbacks
1927
+ #
1928
+ # You can be notified before each polling attempt and before each
1929
+ # delay. If you throw `:success` or `:failure` from these callbacks,
1930
+ # it will terminate the waiter.
1931
+ #
1932
+ # started_at = Time.now
1933
+ # client.wait_until(waiter_name, params, {
1934
+ #
1935
+ # # disable max attempts
1936
+ # max_attempts: nil,
1937
+ #
1938
+ # # poll for 1 hour, instead of a number of attempts
1939
+ # before_wait: -> (attempts, response) do
1940
+ # throw :failure if Time.now - started_at > 3600
1941
+ # end
1942
+ # })
1943
+ #
1944
+ # ## Handling Errors
1945
+ #
1946
+ # When a waiter is unsuccessful, it will raise an error.
1947
+ # All of the failure errors extend from
1948
+ # {Aws::Waiters::Errors::WaiterFailed}.
1949
+ #
1950
+ # begin
1951
+ # client.wait_until(...)
1952
+ # rescue Aws::Waiters::Errors::WaiterFailed
1953
+ # # resource did not enter the desired state in time
1954
+ # end
1955
+ #
1956
+ # ## Valid Waiters
1957
+ #
1958
+ # The following table lists the valid waiter names, the operations they call,
1959
+ # and the default `:delay` and `:max_attempts` values.
1960
+ #
1961
+ # | waiter_name | params | :delay | :max_attempts |
1962
+ # | -------------------------- | ----------------------------- | -------- | ------------- |
1963
+ # | batch_prediction_available | {#describe_batch_predictions} | 30 | 60 |
1964
+ # | data_source_available | {#describe_data_sources} | 30 | 60 |
1965
+ # | evaluation_available | {#describe_evaluations} | 30 | 60 |
1966
+ # | ml_model_available | {#describe_ml_models} | 30 | 60 |
1967
+ #
1968
+ # @raise [Errors::FailureStateError] Raised when the waiter terminates
1969
+ # because the waiter has entered a state that it will not transition
1970
+ # out of, preventing success.
1971
+ #
1972
+ # @raise [Errors::TooManyAttemptsError] Raised when the configured
1973
+ # maximum number of attempts have been made, and the waiter is not
1974
+ # yet successful.
1975
+ #
1976
+ # @raise [Errors::UnexpectedError] Raised when an error is encounted
1977
+ # while polling for a resource that is not expected.
1978
+ #
1979
+ # @raise [Errors::NoSuchWaiterError] Raised when you request to wait
1980
+ # for an unknown state.
1981
+ #
1982
+ # @return [Boolean] Returns `true` if the waiter was successful.
1983
+ # @param [Symbol] waiter_name
1984
+ # @param [Hash] params ({})
1985
+ # @param [Hash] options ({})
1986
+ # @option options [Integer] :max_attempts
1987
+ # @option options [Integer] :delay
1988
+ # @option options [Proc] :before_attempt
1989
+ # @option options [Proc] :before_wait
1990
+ def wait_until(waiter_name, params = {}, options = {})
1991
+ w = waiter(waiter_name, options)
1992
+ yield(w.waiter) if block_given? # deprecated
1993
+ w.wait(params)
1994
+ end
1995
+
1996
+ # @api private
1997
+ # @deprecated
1998
+ def waiter_names
1999
+ waiters.keys
2000
+ end
2001
+
2002
+ private
2003
+
2004
+ # @param [Symbol] waiter_name
2005
+ # @param [Hash] options ({})
2006
+ def waiter(waiter_name, options = {})
2007
+ waiter_class = waiters[waiter_name]
2008
+ if waiter_class
2009
+ waiter_class.new(options.merge(client: self))
2010
+ else
2011
+ raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
2012
+ end
2013
+ end
2014
+
2015
+ def waiters
2016
+ {
2017
+ data_source_available: Waiters::DataSourceAvailable,
2018
+ ml_model_available: Waiters::MLModelAvailable,
2019
+ evaluation_available: Waiters::EvaluationAvailable,
2020
+ batch_prediction_available: Waiters::BatchPredictionAvailable
2021
+ }
2022
+ end
2023
+
2024
+ class << self
2025
+
2026
+ # @api private
2027
+ attr_reader :identifier
2028
+
2029
+ # @api private
2030
+ def errors_module
2031
+ Errors
2032
+ end
2033
+
2034
+ end
2035
+ end
2036
+ end
2037
+ end