aws-sdk-machinelearning 1.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
@@ -0,0 +1,23 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ module Aws
9
+ module MachineLearning
10
+ module Errors
11
+
12
+ extend Aws::Errors::DynamicErrors
13
+
14
+ # Raised when calling #load or #data on a resource class that can not be
15
+ # loaded. This can happen when:
16
+ #
17
+ # * A resource class has identifiers, but no data attributes.
18
+ # * Resource data is only available when making an API call that
19
+ # enumerates all resources of that type.
20
+ class ResourceNotLoadable < RuntimeError; end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,22 @@
1
+ module Aws
2
+ module MachineLearning
3
+ module Plugins
4
+ # @api private
5
+ class PredictEndpoint < Seahorse::Client::Plugin
6
+
7
+ class Handler < Seahorse::Client::Handler
8
+
9
+ def call(context)
10
+ endpoint = context.params.delete(:predict_endpoint)
11
+ context.http_request.endpoint = URI.parse(endpoint.to_s)
12
+ @handler.call(context)
13
+ end
14
+
15
+ end
16
+
17
+ handle(Handler, operations: [:predict])
18
+
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,25 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ module Aws
9
+ module MachineLearning
10
+ class Resource
11
+
12
+ # @param options ({})
13
+ # @option options [Client] :client
14
+ def initialize(options = {})
15
+ @client = options[:client] || Client.new(options)
16
+ end
17
+
18
+ # @return [Client]
19
+ def client
20
+ @client
21
+ end
22
+
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,3711 @@
1
+ # WARNING ABOUT GENERATED CODE
2
+ #
3
+ # This file is generated. See the contributing for info on making contributions:
4
+ # https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
5
+ #
6
+ # WARNING ABOUT GENERATED CODE
7
+
8
+ module Aws
9
+ module MachineLearning
10
+ module Types
11
+
12
+ # @note When making an API call, pass AddTagsInput
13
+ # data as a hash:
14
+ #
15
+ # {
16
+ # tags: [ # required
17
+ # {
18
+ # key: "TagKey",
19
+ # value: "TagValue",
20
+ # },
21
+ # ],
22
+ # resource_id: "EntityId", # required
23
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
24
+ # }
25
+ # @!attribute [rw] tags
26
+ # The key-value pairs to use to create tags. If you specify a key
27
+ # without specifying a value, Amazon ML creates a tag with the
28
+ # specified key and a value of null.
29
+ # @return [Array<Types::Tag>]
30
+ #
31
+ # @!attribute [rw] resource_id
32
+ # The ID of the ML object to tag. For example, `exampleModelId`.
33
+ # @return [String]
34
+ #
35
+ # @!attribute [rw] resource_type
36
+ # The type of the ML object to tag.
37
+ # @return [String]
38
+ class AddTagsInput < Struct.new(
39
+ :tags,
40
+ :resource_id,
41
+ :resource_type)
42
+ include Aws::Structure
43
+ end
44
+
45
+ # Amazon ML returns the following elements.
46
+ # @!attribute [rw] resource_id
47
+ # The ID of the ML object that was tagged.
48
+ # @return [String]
49
+ #
50
+ # @!attribute [rw] resource_type
51
+ # The type of the ML object that was tagged.
52
+ # @return [String]
53
+ class AddTagsOutput < Struct.new(
54
+ :resource_id,
55
+ :resource_type)
56
+ include Aws::Structure
57
+ end
58
+
59
+ # Represents the output of a `GetBatchPrediction` operation.
60
+ #
61
+ # The content consists of the detailed metadata, the status, and the
62
+ # data file information of a `Batch Prediction`.
63
+ # @!attribute [rw] batch_prediction_id
64
+ # The ID assigned to the `BatchPrediction` at creation. This value
65
+ # should be identical to the value of the `BatchPredictionID` in the
66
+ # request.
67
+ # @return [String]
68
+ #
69
+ # @!attribute [rw] ml_model_id
70
+ # The ID of the `MLModel` that generated predictions for the
71
+ # `BatchPrediction` request.
72
+ # @return [String]
73
+ #
74
+ # @!attribute [rw] batch_prediction_data_source_id
75
+ # The ID of the `DataSource` that points to the group of observations
76
+ # to predict.
77
+ # @return [String]
78
+ #
79
+ # @!attribute [rw] input_data_location_s3
80
+ # The location of the data file or directory in Amazon Simple Storage
81
+ # Service (Amazon S3).
82
+ # @return [String]
83
+ #
84
+ # @!attribute [rw] created_by_iam_user
85
+ # The AWS user account that invoked the `BatchPrediction`. The account
86
+ # type can be either an AWS root account or an AWS Identity and Access
87
+ # Management (IAM) user account.
88
+ # @return [String]
89
+ #
90
+ # @!attribute [rw] created_at
91
+ # The time that the `BatchPrediction` was created. The time is
92
+ # expressed in epoch time.
93
+ # @return [Time]
94
+ #
95
+ # @!attribute [rw] last_updated_at
96
+ # The time of the most recent edit to the `BatchPrediction`. The time
97
+ # is expressed in epoch time.
98
+ # @return [Time]
99
+ #
100
+ # @!attribute [rw] name
101
+ # A user-supplied name or description of the `BatchPrediction`.
102
+ # @return [String]
103
+ #
104
+ # @!attribute [rw] status
105
+ # The status of the `BatchPrediction`. This element can have one of
106
+ # the following values:
107
+ #
108
+ # * `PENDING` - Amazon Machine Learning (Amazon ML) submitted a
109
+ # request to generate predictions for a batch of observations.
110
+ # * `INPROGRESS` - The process is underway.
111
+ # * `FAILED` - The request to perform a batch prediction did not run
112
+ # to completion. It is not usable.
113
+ # * `COMPLETED` - The batch prediction process completed successfully.
114
+ # * `DELETED` - The `BatchPrediction` is marked as deleted. It is not
115
+ # usable.
116
+ # @return [String]
117
+ #
118
+ # @!attribute [rw] output_uri
119
+ # The location of an Amazon S3 bucket or directory to receive the
120
+ # operation results. The following substrings are not allowed in the
121
+ # `s3 key` portion of the `outputURI` field: ':', '//', '/./',
122
+ # '/../'.
123
+ # @return [String]
124
+ #
125
+ # @!attribute [rw] message
126
+ # A description of the most recent details about processing the batch
127
+ # prediction request.
128
+ # @return [String]
129
+ #
130
+ # @!attribute [rw] compute_time
131
+ # Long integer type that is a 64-bit signed number.
132
+ # @return [Integer]
133
+ #
134
+ # @!attribute [rw] finished_at
135
+ # A timestamp represented in epoch time.
136
+ # @return [Time]
137
+ #
138
+ # @!attribute [rw] started_at
139
+ # A timestamp represented in epoch time.
140
+ # @return [Time]
141
+ #
142
+ # @!attribute [rw] total_record_count
143
+ # Long integer type that is a 64-bit signed number.
144
+ # @return [Integer]
145
+ #
146
+ # @!attribute [rw] invalid_record_count
147
+ # Long integer type that is a 64-bit signed number.
148
+ # @return [Integer]
149
+ class BatchPrediction < Struct.new(
150
+ :batch_prediction_id,
151
+ :ml_model_id,
152
+ :batch_prediction_data_source_id,
153
+ :input_data_location_s3,
154
+ :created_by_iam_user,
155
+ :created_at,
156
+ :last_updated_at,
157
+ :name,
158
+ :status,
159
+ :output_uri,
160
+ :message,
161
+ :compute_time,
162
+ :finished_at,
163
+ :started_at,
164
+ :total_record_count,
165
+ :invalid_record_count)
166
+ include Aws::Structure
167
+ end
168
+
169
+ # @note When making an API call, pass CreateBatchPredictionInput
170
+ # data as a hash:
171
+ #
172
+ # {
173
+ # batch_prediction_id: "EntityId", # required
174
+ # batch_prediction_name: "EntityName",
175
+ # ml_model_id: "EntityId", # required
176
+ # batch_prediction_data_source_id: "EntityId", # required
177
+ # output_uri: "S3Url", # required
178
+ # }
179
+ # @!attribute [rw] batch_prediction_id
180
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
181
+ # @return [String]
182
+ #
183
+ # @!attribute [rw] batch_prediction_name
184
+ # A user-supplied name or description of the `BatchPrediction`.
185
+ # `BatchPredictionName` can only use the UTF-8 character set.
186
+ # @return [String]
187
+ #
188
+ # @!attribute [rw] ml_model_id
189
+ # The ID of the `MLModel` that will generate predictions for the group
190
+ # of observations.
191
+ # @return [String]
192
+ #
193
+ # @!attribute [rw] batch_prediction_data_source_id
194
+ # The ID of the `DataSource` that points to the group of observations
195
+ # to predict.
196
+ # @return [String]
197
+ #
198
+ # @!attribute [rw] output_uri
199
+ # The location of an Amazon Simple Storage Service (Amazon S3) bucket
200
+ # or directory to store the batch prediction results. The following
201
+ # substrings are not allowed in the `s3 key` portion of the
202
+ # `outputURI` field: ':', '//', '/./', '/../'.
203
+ #
204
+ # Amazon ML needs permissions to store and retrieve the logs on your
205
+ # behalf. For information about how to set permissions, see the
206
+ # [Amazon Machine Learning Developer Guide][1].
207
+ #
208
+ #
209
+ #
210
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
211
+ # @return [String]
212
+ class CreateBatchPredictionInput < Struct.new(
213
+ :batch_prediction_id,
214
+ :batch_prediction_name,
215
+ :ml_model_id,
216
+ :batch_prediction_data_source_id,
217
+ :output_uri)
218
+ include Aws::Structure
219
+ end
220
+
221
+ # Represents the output of a `CreateBatchPrediction` operation, and is
222
+ # an acknowledgement that Amazon ML received the request.
223
+ #
224
+ # The `CreateBatchPrediction` operation is asynchronous. You can poll
225
+ # for status updates by using the `>GetBatchPrediction` operation and
226
+ # checking the `Status` parameter of the result.
227
+ # @!attribute [rw] batch_prediction_id
228
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
229
+ # This value is identical to the value of the `BatchPredictionId` in
230
+ # the request.
231
+ # @return [String]
232
+ class CreateBatchPredictionOutput < Struct.new(
233
+ :batch_prediction_id)
234
+ include Aws::Structure
235
+ end
236
+
237
+ # @note When making an API call, pass CreateDataSourceFromRDSInput
238
+ # data as a hash:
239
+ #
240
+ # {
241
+ # data_source_id: "EntityId", # required
242
+ # data_source_name: "EntityName",
243
+ # rds_data: { # required
244
+ # database_information: { # required
245
+ # instance_identifier: "RDSInstanceIdentifier", # required
246
+ # database_name: "RDSDatabaseName", # required
247
+ # },
248
+ # select_sql_query: "RDSSelectSqlQuery", # required
249
+ # database_credentials: { # required
250
+ # username: "RDSDatabaseUsername", # required
251
+ # password: "RDSDatabasePassword", # required
252
+ # },
253
+ # s3_staging_location: "S3Url", # required
254
+ # data_rearrangement: "DataRearrangement",
255
+ # data_schema: "DataSchema",
256
+ # data_schema_uri: "S3Url",
257
+ # resource_role: "EDPResourceRole", # required
258
+ # service_role: "EDPServiceRole", # required
259
+ # subnet_id: "EDPSubnetId", # required
260
+ # security_group_ids: ["EDPSecurityGroupId"], # required
261
+ # },
262
+ # role_arn: "RoleARN", # required
263
+ # compute_statistics: false,
264
+ # }
265
+ # @!attribute [rw] data_source_id
266
+ # A user-supplied ID that uniquely identifies the `DataSource`.
267
+ # Typically, an Amazon Resource Number (ARN) becomes the ID for a
268
+ # `DataSource`.
269
+ # @return [String]
270
+ #
271
+ # @!attribute [rw] data_source_name
272
+ # A user-supplied name or description of the `DataSource`.
273
+ # @return [String]
274
+ #
275
+ # @!attribute [rw] rds_data
276
+ # The data specification of an Amazon RDS `DataSource`\:
277
+ #
278
+ # * DatabaseInformation - * `DatabaseName` - The name of the Amazon
279
+ # RDS database.
280
+ # * `InstanceIdentifier ` - A unique identifier for the Amazon RDS
281
+ # database instance.
282
+ #
283
+ # * DatabaseCredentials - AWS Identity and Access Management (IAM)
284
+ # credentials that are used to connect to the Amazon RDS database.
285
+ #
286
+ # * ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by
287
+ # an EC2 instance to carry out the copy task from Amazon RDS to
288
+ # Amazon Simple Storage Service (Amazon S3). For more information,
289
+ # see [Role templates][1] for data pipelines.
290
+ #
291
+ # * ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS
292
+ # Data Pipeline service to monitor the progress of the copy task
293
+ # from Amazon RDS to Amazon S3. For more information, see [Role
294
+ # templates][1] for data pipelines.
295
+ #
296
+ # * SecurityInfo - The security information to use to access an RDS DB
297
+ # instance. You need to set up appropriate ingress rules for the
298
+ # security entity IDs provided to allow access to the Amazon RDS
299
+ # instance. Specify a \[`SubnetId`, `SecurityGroupIds`\] pair for a
300
+ # VPC-based RDS DB instance.
301
+ #
302
+ # * SelectSqlQuery - A query that is used to retrieve the observation
303
+ # data for the `Datasource`.
304
+ #
305
+ # * S3StagingLocation - The Amazon S3 location for staging Amazon RDS
306
+ # data. The data retrieved from Amazon RDS using `SelectSqlQuery` is
307
+ # stored in this location.
308
+ #
309
+ # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
310
+ #
311
+ # * DataSchema - A JSON string representing the schema. This is not
312
+ # required if `DataSchemaUri` is specified.
313
+ #
314
+ # * DataRearrangement - A JSON string that represents the splitting
315
+ # and rearrangement requirements for the `Datasource`.
316
+ #
317
+ #
318
+ # Sample - `
319
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
320
+ #
321
+ #
322
+ #
323
+ # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
324
+ # @return [Types::RDSDataSpec]
325
+ #
326
+ # @!attribute [rw] role_arn
327
+ # The role that Amazon ML assumes on behalf of the user to create and
328
+ # activate a data pipeline in the user's account and copy data using
329
+ # the `SelectSqlQuery` query from Amazon RDS to Amazon S3.
330
+ # @return [String]
331
+ #
332
+ # @!attribute [rw] compute_statistics
333
+ # The compute statistics for a `DataSource`. The statistics are
334
+ # generated from the observation data referenced by a `DataSource`.
335
+ # Amazon ML uses the statistics internally during `MLModel` training.
336
+ # This parameter must be set to `true` if the ``DataSource`` needs to
337
+ # be used for `MLModel` training.
338
+ # @return [Boolean]
339
+ class CreateDataSourceFromRDSInput < Struct.new(
340
+ :data_source_id,
341
+ :data_source_name,
342
+ :rds_data,
343
+ :role_arn,
344
+ :compute_statistics)
345
+ include Aws::Structure
346
+ end
347
+
348
+ # Represents the output of a `CreateDataSourceFromRDS` operation, and is
349
+ # an acknowledgement that Amazon ML received the request.
350
+ #
351
+ # The `CreateDataSourceFromRDS`&gt; operation is asynchronous. You can
352
+ # poll for updates by using the `GetBatchPrediction` operation and
353
+ # checking the `Status` parameter. You can inspect the `Message` when
354
+ # `Status` shows up as `FAILED`. You can also check the progress of the
355
+ # copy operation by going to the `DataPipeline` console and looking up
356
+ # the pipeline using the `pipelineId ` from the describe call.
357
+ # @!attribute [rw] data_source_id
358
+ # A user-supplied ID that uniquely identifies the datasource. This
359
+ # value should be identical to the value of the `DataSourceID` in the
360
+ # request.
361
+ # @return [String]
362
+ class CreateDataSourceFromRDSOutput < Struct.new(
363
+ :data_source_id)
364
+ include Aws::Structure
365
+ end
366
+
367
+ # @note When making an API call, pass CreateDataSourceFromRedshiftInput
368
+ # data as a hash:
369
+ #
370
+ # {
371
+ # data_source_id: "EntityId", # required
372
+ # data_source_name: "EntityName",
373
+ # data_spec: { # required
374
+ # database_information: { # required
375
+ # database_name: "RedshiftDatabaseName", # required
376
+ # cluster_identifier: "RedshiftClusterIdentifier", # required
377
+ # },
378
+ # select_sql_query: "RedshiftSelectSqlQuery", # required
379
+ # database_credentials: { # required
380
+ # username: "RedshiftDatabaseUsername", # required
381
+ # password: "RedshiftDatabasePassword", # required
382
+ # },
383
+ # s3_staging_location: "S3Url", # required
384
+ # data_rearrangement: "DataRearrangement",
385
+ # data_schema: "DataSchema",
386
+ # data_schema_uri: "S3Url",
387
+ # },
388
+ # role_arn: "RoleARN", # required
389
+ # compute_statistics: false,
390
+ # }
391
+ # @!attribute [rw] data_source_id
392
+ # A user-supplied ID that uniquely identifies the `DataSource`.
393
+ # @return [String]
394
+ #
395
+ # @!attribute [rw] data_source_name
396
+ # A user-supplied name or description of the `DataSource`.
397
+ # @return [String]
398
+ #
399
+ # @!attribute [rw] data_spec
400
+ # The data specification of an Amazon Redshift `DataSource`\:
401
+ #
402
+ # * DatabaseInformation - * `DatabaseName` - The name of the Amazon
403
+ # Redshift database.
404
+ # * ` ClusterIdentifier` - The unique ID for the Amazon Redshift
405
+ # cluster.
406
+ #
407
+ # * DatabaseCredentials - The AWS Identity and Access Management (IAM)
408
+ # credentials that are used to connect to the Amazon Redshift
409
+ # database.
410
+ #
411
+ # * SelectSqlQuery - The query that is used to retrieve the
412
+ # observation data for the `Datasource`.
413
+ #
414
+ # * S3StagingLocation - The Amazon Simple Storage Service (Amazon S3)
415
+ # location for staging Amazon Redshift data. The data retrieved from
416
+ # Amazon Redshift using the `SelectSqlQuery` query is stored in this
417
+ # location.
418
+ #
419
+ # * DataSchemaUri - The Amazon S3 location of the `DataSchema`.
420
+ #
421
+ # * DataSchema - A JSON string representing the schema. This is not
422
+ # required if `DataSchemaUri` is specified.
423
+ #
424
+ # * DataRearrangement - A JSON string that represents the splitting
425
+ # and rearrangement requirements for the `DataSource`.
426
+ #
427
+ # Sample - `
428
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
429
+ # @return [Types::RedshiftDataSpec]
430
+ #
431
+ # @!attribute [rw] role_arn
432
+ # A fully specified role Amazon Resource Name (ARN). Amazon ML assumes
433
+ # the role on behalf of the user to create the following:
434
+ #
435
+ # * A security group to allow Amazon ML to execute the
436
+ # `SelectSqlQuery` query on an Amazon Redshift cluster
437
+ #
438
+ # * An Amazon S3 bucket policy to grant Amazon ML read/write
439
+ # permissions on the `S3StagingLocation`
440
+ # @return [String]
441
+ #
442
+ # @!attribute [rw] compute_statistics
443
+ # The compute statistics for a `DataSource`. The statistics are
444
+ # generated from the observation data referenced by a `DataSource`.
445
+ # Amazon ML uses the statistics internally during `MLModel` training.
446
+ # This parameter must be set to `true` if the `DataSource` needs to be
447
+ # used for `MLModel` training.
448
+ # @return [Boolean]
449
+ class CreateDataSourceFromRedshiftInput < Struct.new(
450
+ :data_source_id,
451
+ :data_source_name,
452
+ :data_spec,
453
+ :role_arn,
454
+ :compute_statistics)
455
+ include Aws::Structure
456
+ end
457
+
458
+ # Represents the output of a `CreateDataSourceFromRedshift` operation,
459
+ # and is an acknowledgement that Amazon ML received the request.
460
+ #
461
+ # The `CreateDataSourceFromRedshift` operation is asynchronous. You can
462
+ # poll for updates by using the `GetBatchPrediction` operation and
463
+ # checking the `Status` parameter.
464
+ # @!attribute [rw] data_source_id
465
+ # A user-supplied ID that uniquely identifies the datasource. This
466
+ # value should be identical to the value of the `DataSourceID` in the
467
+ # request.
468
+ # @return [String]
469
+ class CreateDataSourceFromRedshiftOutput < Struct.new(
470
+ :data_source_id)
471
+ include Aws::Structure
472
+ end
473
+
474
+ # @note When making an API call, pass CreateDataSourceFromS3Input
475
+ # data as a hash:
476
+ #
477
+ # {
478
+ # data_source_id: "EntityId", # required
479
+ # data_source_name: "EntityName",
480
+ # data_spec: { # required
481
+ # data_location_s3: "S3Url", # required
482
+ # data_rearrangement: "DataRearrangement",
483
+ # data_schema: "DataSchema",
484
+ # data_schema_location_s3: "S3Url",
485
+ # },
486
+ # compute_statistics: false,
487
+ # }
488
+ # @!attribute [rw] data_source_id
489
+ # A user-supplied identifier that uniquely identifies the
490
+ # `DataSource`.
491
+ # @return [String]
492
+ #
493
+ # @!attribute [rw] data_source_name
494
+ # A user-supplied name or description of the `DataSource`.
495
+ # @return [String]
496
+ #
497
+ # @!attribute [rw] data_spec
498
+ # The data specification of a `DataSource`\:
499
+ #
500
+ # * DataLocationS3 - The Amazon S3 location of the observation data.
501
+ #
502
+ # * DataSchemaLocationS3 - The Amazon S3 location of the `DataSchema`.
503
+ #
504
+ # * DataSchema - A JSON string representing the schema. This is not
505
+ # required if `DataSchemaUri` is specified.
506
+ #
507
+ # * DataRearrangement - A JSON string that represents the splitting
508
+ # and rearrangement requirements for the `Datasource`.
509
+ #
510
+ # Sample - `
511
+ # "\{"splitting":\{"percentBegin":10,"percentEnd":60\}\}"`
512
+ # @return [Types::S3DataSpec]
513
+ #
514
+ # @!attribute [rw] compute_statistics
515
+ # The compute statistics for a `DataSource`. The statistics are
516
+ # generated from the observation data referenced by a `DataSource`.
517
+ # Amazon ML uses the statistics internally during `MLModel` training.
518
+ # This parameter must be set to `true` if the ``DataSource`` needs to
519
+ # be used for `MLModel` training.
520
+ # @return [Boolean]
521
+ class CreateDataSourceFromS3Input < Struct.new(
522
+ :data_source_id,
523
+ :data_source_name,
524
+ :data_spec,
525
+ :compute_statistics)
526
+ include Aws::Structure
527
+ end
528
+
529
+ # Represents the output of a `CreateDataSourceFromS3` operation, and is
530
+ # an acknowledgement that Amazon ML received the request.
531
+ #
532
+ # The `CreateDataSourceFromS3` operation is asynchronous. You can poll
533
+ # for updates by using the `GetBatchPrediction` operation and checking
534
+ # the `Status` parameter.
535
+ # @!attribute [rw] data_source_id
536
+ # A user-supplied ID that uniquely identifies the `DataSource`. This
537
+ # value should be identical to the value of the `DataSourceID` in the
538
+ # request.
539
+ # @return [String]
540
+ class CreateDataSourceFromS3Output < Struct.new(
541
+ :data_source_id)
542
+ include Aws::Structure
543
+ end
544
+
545
+ # @note When making an API call, pass CreateEvaluationInput
546
+ # data as a hash:
547
+ #
548
+ # {
549
+ # evaluation_id: "EntityId", # required
550
+ # evaluation_name: "EntityName",
551
+ # ml_model_id: "EntityId", # required
552
+ # evaluation_data_source_id: "EntityId", # required
553
+ # }
554
+ # @!attribute [rw] evaluation_id
555
+ # A user-supplied ID that uniquely identifies the `Evaluation`.
556
+ # @return [String]
557
+ #
558
+ # @!attribute [rw] evaluation_name
559
+ # A user-supplied name or description of the `Evaluation`.
560
+ # @return [String]
561
+ #
562
+ # @!attribute [rw] ml_model_id
563
+ # The ID of the `MLModel` to evaluate.
564
+ #
565
+ # The schema used in creating the `MLModel` must match the schema of
566
+ # the `DataSource` used in the `Evaluation`.
567
+ # @return [String]
568
+ #
569
+ # @!attribute [rw] evaluation_data_source_id
570
+ # The ID of the `DataSource` for the evaluation. The schema of the
571
+ # `DataSource` must match the schema used to create the `MLModel`.
572
+ # @return [String]
573
+ class CreateEvaluationInput < Struct.new(
574
+ :evaluation_id,
575
+ :evaluation_name,
576
+ :ml_model_id,
577
+ :evaluation_data_source_id)
578
+ include Aws::Structure
579
+ end
580
+
581
+ # Represents the output of a `CreateEvaluation` operation, and is an
582
+ # acknowledgement that Amazon ML received the request.
583
+ #
584
+ # `CreateEvaluation` operation is asynchronous. You can poll for status
585
+ # updates by using the `GetEvcaluation` operation and checking the
586
+ # `Status` parameter.
587
+ # @!attribute [rw] evaluation_id
588
+ # The user-supplied ID that uniquely identifies the `Evaluation`. This
589
+ # value should be identical to the value of the `EvaluationId` in the
590
+ # request.
591
+ # @return [String]
592
+ class CreateEvaluationOutput < Struct.new(
593
+ :evaluation_id)
594
+ include Aws::Structure
595
+ end
596
+
597
+ # @note When making an API call, pass CreateMLModelInput
598
+ # data as a hash:
599
+ #
600
+ # {
601
+ # ml_model_id: "EntityId", # required
602
+ # ml_model_name: "EntityName",
603
+ # ml_model_type: "REGRESSION", # required, accepts REGRESSION, BINARY, MULTICLASS
604
+ # parameters: {
605
+ # "StringType" => "StringType",
606
+ # },
607
+ # training_data_source_id: "EntityId", # required
608
+ # recipe: "Recipe",
609
+ # recipe_uri: "S3Url",
610
+ # }
611
+ # @!attribute [rw] ml_model_id
612
+ # A user-supplied ID that uniquely identifies the `MLModel`.
613
+ # @return [String]
614
+ #
615
+ # @!attribute [rw] ml_model_name
616
+ # A user-supplied name or description of the `MLModel`.
617
+ # @return [String]
618
+ #
619
+ # @!attribute [rw] ml_model_type
620
+ # The category of supervised learning that this `MLModel` will
621
+ # address. Choose from the following types:
622
+ #
623
+ # * Choose `REGRESSION` if the `MLModel` will be used to predict a
624
+ # numeric value.
625
+ # * Choose `BINARY` if the `MLModel` result has two possible values.
626
+ # * Choose `MULTICLASS` if the `MLModel` result has a limited number
627
+ # of values.
628
+ #
629
+ # For more information, see the [Amazon Machine Learning Developer
630
+ # Guide][1].
631
+ #
632
+ #
633
+ #
634
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
635
+ # @return [String]
636
+ #
637
+ # @!attribute [rw] parameters
638
+ # A list of the training parameters in the `MLModel`. The list is
639
+ # implemented as a map of key-value pairs.
640
+ #
641
+ # The following is the current set of training parameters:
642
+ #
643
+ # * `sgd.maxMLModelSizeInBytes` - The maximum allowed size of the
644
+ # model. Depending on the input data, the size of the model might
645
+ # affect its performance.
646
+ #
647
+ # The value is an integer that ranges from `100000` to `2147483648`.
648
+ # The default value is `33554432`.
649
+ #
650
+ # * `sgd.maxPasses` - The number of times that the training process
651
+ # traverses the observations to build the `MLModel`. The value is an
652
+ # integer that ranges from `1` to `10000`. The default value is
653
+ # `10`.
654
+ #
655
+ # * `sgd.shuffleType` - Whether Amazon ML shuffles the training data.
656
+ # Shuffling the data improves a model's ability to find the optimal
657
+ # solution for a variety of data types. The valid values are `auto`
658
+ # and `none`. The default value is `none`. We <?oxy\_insert\_start
659
+ # author="laurama" timestamp="20160329T131121-0700">strongly
660
+ # recommend that you shuffle your data.<?oxy\_insert\_end>
661
+ #
662
+ # * `sgd.l1RegularizationAmount` - The coefficient regularization L1
663
+ # norm. It controls overfitting the data by penalizing large
664
+ # coefficients. This tends to drive coefficients to zero, resulting
665
+ # in a sparse feature set. If you use this parameter, start by
666
+ # specifying a small value, such as `1.0E-08`.
667
+ #
668
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
669
+ # default is to not use L1 normalization. This parameter can't be
670
+ # used when `L2` is specified. Use this parameter sparingly.
671
+ #
672
+ # * `sgd.l2RegularizationAmount` - The coefficient regularization L2
673
+ # norm. It controls overfitting the data by penalizing large
674
+ # coefficients. This tends to drive coefficients to small, nonzero
675
+ # values. If you use this parameter, start by specifying a small
676
+ # value, such as `1.0E-08`.
677
+ #
678
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
679
+ # default is to not use L2 normalization. This parameter can't be
680
+ # used when `L1` is specified. Use this parameter sparingly.
681
+ # @return [Hash<String,String>]
682
+ #
683
+ # @!attribute [rw] training_data_source_id
684
+ # The `DataSource` that points to the training data.
685
+ # @return [String]
686
+ #
687
+ # @!attribute [rw] recipe
688
+ # The data recipe for creating the `MLModel`. You must specify either
689
+ # the recipe or its URI. If you don't specify a recipe or its URI,
690
+ # Amazon ML creates a default.
691
+ # @return [String]
692
+ #
693
+ # @!attribute [rw] recipe_uri
694
+ # The Amazon Simple Storage Service (Amazon S3) location and file name
695
+ # that contains the `MLModel` recipe. You must specify either the
696
+ # recipe or its URI. If you don't specify a recipe or its URI, Amazon
697
+ # ML creates a default.
698
+ # @return [String]
699
+ class CreateMLModelInput < Struct.new(
700
+ :ml_model_id,
701
+ :ml_model_name,
702
+ :ml_model_type,
703
+ :parameters,
704
+ :training_data_source_id,
705
+ :recipe,
706
+ :recipe_uri)
707
+ include Aws::Structure
708
+ end
709
+
710
+ # Represents the output of a `CreateMLModel` operation, and is an
711
+ # acknowledgement that Amazon ML received the request.
712
+ #
713
+ # The `CreateMLModel` operation is asynchronous. You can poll for status
714
+ # updates by using the `GetMLModel` operation and checking the `Status`
715
+ # parameter.
716
+ # @!attribute [rw] ml_model_id
717
+ # A user-supplied ID that uniquely identifies the `MLModel`. This
718
+ # value should be identical to the value of the `MLModelId` in the
719
+ # request.
720
+ # @return [String]
721
+ class CreateMLModelOutput < Struct.new(
722
+ :ml_model_id)
723
+ include Aws::Structure
724
+ end
725
+
726
+ # @note When making an API call, pass CreateRealtimeEndpointInput
727
+ # data as a hash:
728
+ #
729
+ # {
730
+ # ml_model_id: "EntityId", # required
731
+ # }
732
+ # @!attribute [rw] ml_model_id
733
+ # The ID assigned to the `MLModel` during creation.
734
+ # @return [String]
735
+ class CreateRealtimeEndpointInput < Struct.new(
736
+ :ml_model_id)
737
+ include Aws::Structure
738
+ end
739
+
740
+ # Represents the output of an `CreateRealtimeEndpoint` operation.
741
+ #
742
+ # The result contains the `MLModelId` and the endpoint information for
743
+ # the `MLModel`.
744
+ #
745
+ # <note markdown="1"> The endpoint information includes the URI of the `MLModel`; that is,
746
+ # the location to send online prediction requests for the specified
747
+ # `MLModel`.
748
+ #
749
+ # </note>
750
+ # @!attribute [rw] ml_model_id
751
+ # A user-supplied ID that uniquely identifies the `MLModel`. This
752
+ # value should be identical to the value of the `MLModelId` in the
753
+ # request.
754
+ # @return [String]
755
+ #
756
+ # @!attribute [rw] realtime_endpoint_info
757
+ # The endpoint information of the `MLModel`
758
+ # @return [Types::RealtimeEndpointInfo]
759
+ class CreateRealtimeEndpointOutput < Struct.new(
760
+ :ml_model_id,
761
+ :realtime_endpoint_info)
762
+ include Aws::Structure
763
+ end
764
+
765
+ # Represents the output of the `GetDataSource` operation.
766
+ #
767
+ # The content consists of the detailed metadata and data file
768
+ # information and the current status of the `DataSource`.
769
+ # @!attribute [rw] data_source_id
770
+ # The ID that is assigned to the `DataSource` during creation.
771
+ # @return [String]
772
+ #
773
+ # @!attribute [rw] data_location_s3
774
+ # The location and name of the data in Amazon Simple Storage Service
775
+ # (Amazon S3) that is used by a `DataSource`.
776
+ # @return [String]
777
+ #
778
+ # @!attribute [rw] data_rearrangement
779
+ # A JSON string that represents the splitting and rearrangement
780
+ # requirement used when this `DataSource` was created.
781
+ # @return [String]
782
+ #
783
+ # @!attribute [rw] created_by_iam_user
784
+ # The AWS user account from which the `DataSource` was created. The
785
+ # account type can be either an AWS root account or an AWS Identity
786
+ # and Access Management (IAM) user account.
787
+ # @return [String]
788
+ #
789
+ # @!attribute [rw] created_at
790
+ # The time that the `DataSource` was created. The time is expressed in
791
+ # epoch time.
792
+ # @return [Time]
793
+ #
794
+ # @!attribute [rw] last_updated_at
795
+ # The time of the most recent edit to the `BatchPrediction`. The time
796
+ # is expressed in epoch time.
797
+ # @return [Time]
798
+ #
799
+ # @!attribute [rw] data_size_in_bytes
800
+ # The total number of observations contained in the data files that
801
+ # the `DataSource` references.
802
+ # @return [Integer]
803
+ #
804
+ # @!attribute [rw] number_of_files
805
+ # The number of data files referenced by the `DataSource`.
806
+ # @return [Integer]
807
+ #
808
+ # @!attribute [rw] name
809
+ # A user-supplied name or description of the `DataSource`.
810
+ # @return [String]
811
+ #
812
+ # @!attribute [rw] status
813
+ # The current status of the `DataSource`. This element can have one of
814
+ # the following values:
815
+ #
816
+ # * PENDING - Amazon Machine Learning (Amazon ML) submitted a request
817
+ # to create a `DataSource`.
818
+ # * INPROGRESS - The creation process is underway.
819
+ # * FAILED - The request to create a `DataSource` did not run to
820
+ # completion. It is not usable.
821
+ # * COMPLETED - The creation process completed successfully.
822
+ # * DELETED - The `DataSource` is marked as deleted. It is not usable.
823
+ # @return [String]
824
+ #
825
+ # @!attribute [rw] message
826
+ # A description of the most recent details about creating the
827
+ # `DataSource`.
828
+ # @return [String]
829
+ #
830
+ # @!attribute [rw] redshift_metadata
831
+ # Describes the `DataSource` details specific to Amazon Redshift.
832
+ # @return [Types::RedshiftMetadata]
833
+ #
834
+ # @!attribute [rw] rds_metadata
835
+ # The datasource details that are specific to Amazon RDS.
836
+ # @return [Types::RDSMetadata]
837
+ #
838
+ # @!attribute [rw] role_arn
839
+ # The Amazon Resource Name (ARN) of an [AWS IAM Role][1], such as the
840
+ # following: arn:aws:iam::account:role/rolename.
841
+ #
842
+ #
843
+ #
844
+ # [1]: http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts
845
+ # @return [String]
846
+ #
847
+ # @!attribute [rw] compute_statistics
848
+ # The parameter is `true` if statistics need to be generated from the
849
+ # observation data.
850
+ # @return [Boolean]
851
+ #
852
+ # @!attribute [rw] compute_time
853
+ # Long integer type that is a 64-bit signed number.
854
+ # @return [Integer]
855
+ #
856
+ # @!attribute [rw] finished_at
857
+ # A timestamp represented in epoch time.
858
+ # @return [Time]
859
+ #
860
+ # @!attribute [rw] started_at
861
+ # A timestamp represented in epoch time.
862
+ # @return [Time]
863
+ class DataSource < Struct.new(
864
+ :data_source_id,
865
+ :data_location_s3,
866
+ :data_rearrangement,
867
+ :created_by_iam_user,
868
+ :created_at,
869
+ :last_updated_at,
870
+ :data_size_in_bytes,
871
+ :number_of_files,
872
+ :name,
873
+ :status,
874
+ :message,
875
+ :redshift_metadata,
876
+ :rds_metadata,
877
+ :role_arn,
878
+ :compute_statistics,
879
+ :compute_time,
880
+ :finished_at,
881
+ :started_at)
882
+ include Aws::Structure
883
+ end
884
+
885
+ # @note When making an API call, pass DeleteBatchPredictionInput
886
+ # data as a hash:
887
+ #
888
+ # {
889
+ # batch_prediction_id: "EntityId", # required
890
+ # }
891
+ # @!attribute [rw] batch_prediction_id
892
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
893
+ # @return [String]
894
+ class DeleteBatchPredictionInput < Struct.new(
895
+ :batch_prediction_id)
896
+ include Aws::Structure
897
+ end
898
+
899
+ # Represents the output of a `DeleteBatchPrediction` operation.
900
+ #
901
+ # You can use the `GetBatchPrediction` operation and check the value of
902
+ # the `Status` parameter to see whether a `BatchPrediction` is marked as
903
+ # `DELETED`.
904
+ # @!attribute [rw] batch_prediction_id
905
+ # A user-supplied ID that uniquely identifies the `BatchPrediction`.
906
+ # This value should be identical to the value of the
907
+ # `BatchPredictionID` in the request.
908
+ # @return [String]
909
+ class DeleteBatchPredictionOutput < Struct.new(
910
+ :batch_prediction_id)
911
+ include Aws::Structure
912
+ end
913
+
914
+ # @note When making an API call, pass DeleteDataSourceInput
915
+ # data as a hash:
916
+ #
917
+ # {
918
+ # data_source_id: "EntityId", # required
919
+ # }
920
+ # @!attribute [rw] data_source_id
921
+ # A user-supplied ID that uniquely identifies the `DataSource`.
922
+ # @return [String]
923
+ class DeleteDataSourceInput < Struct.new(
924
+ :data_source_id)
925
+ include Aws::Structure
926
+ end
927
+
928
+ # Represents the output of a `DeleteDataSource` operation.
929
+ # @!attribute [rw] data_source_id
930
+ # A user-supplied ID that uniquely identifies the `DataSource`. This
931
+ # value should be identical to the value of the `DataSourceID` in the
932
+ # request.
933
+ # @return [String]
934
+ class DeleteDataSourceOutput < Struct.new(
935
+ :data_source_id)
936
+ include Aws::Structure
937
+ end
938
+
939
+ # @note When making an API call, pass DeleteEvaluationInput
940
+ # data as a hash:
941
+ #
942
+ # {
943
+ # evaluation_id: "EntityId", # required
944
+ # }
945
+ # @!attribute [rw] evaluation_id
946
+ # A user-supplied ID that uniquely identifies the `Evaluation` to
947
+ # delete.
948
+ # @return [String]
949
+ class DeleteEvaluationInput < Struct.new(
950
+ :evaluation_id)
951
+ include Aws::Structure
952
+ end
953
+
954
+ # Represents the output of a `DeleteEvaluation` operation. The output
955
+ # indicates that Amazon Machine Learning (Amazon ML) received the
956
+ # request.
957
+ #
958
+ # You can use the `GetEvaluation` operation and check the value of the
959
+ # `Status` parameter to see whether an `Evaluation` is marked as
960
+ # `DELETED`.
961
+ # @!attribute [rw] evaluation_id
962
+ # A user-supplied ID that uniquely identifies the `Evaluation`. This
963
+ # value should be identical to the value of the `EvaluationId` in the
964
+ # request.
965
+ # @return [String]
966
+ class DeleteEvaluationOutput < Struct.new(
967
+ :evaluation_id)
968
+ include Aws::Structure
969
+ end
970
+
971
+ # @note When making an API call, pass DeleteMLModelInput
972
+ # data as a hash:
973
+ #
974
+ # {
975
+ # ml_model_id: "EntityId", # required
976
+ # }
977
+ # @!attribute [rw] ml_model_id
978
+ # A user-supplied ID that uniquely identifies the `MLModel`.
979
+ # @return [String]
980
+ class DeleteMLModelInput < Struct.new(
981
+ :ml_model_id)
982
+ include Aws::Structure
983
+ end
984
+
985
+ # Represents the output of a `DeleteMLModel` operation.
986
+ #
987
+ # You can use the `GetMLModel` operation and check the value of the
988
+ # `Status` parameter to see whether an `MLModel` is marked as `DELETED`.
989
+ # @!attribute [rw] ml_model_id
990
+ # A user-supplied ID that uniquely identifies the `MLModel`. This
991
+ # value should be identical to the value of the `MLModelID` in the
992
+ # request.
993
+ # @return [String]
994
+ class DeleteMLModelOutput < Struct.new(
995
+ :ml_model_id)
996
+ include Aws::Structure
997
+ end
998
+
999
+ # @note When making an API call, pass DeleteRealtimeEndpointInput
1000
+ # data as a hash:
1001
+ #
1002
+ # {
1003
+ # ml_model_id: "EntityId", # required
1004
+ # }
1005
+ # @!attribute [rw] ml_model_id
1006
+ # The ID assigned to the `MLModel` during creation.
1007
+ # @return [String]
1008
+ class DeleteRealtimeEndpointInput < Struct.new(
1009
+ :ml_model_id)
1010
+ include Aws::Structure
1011
+ end
1012
+
1013
+ # Represents the output of an `DeleteRealtimeEndpoint` operation.
1014
+ #
1015
+ # The result contains the `MLModelId` and the endpoint information for
1016
+ # the `MLModel`.
1017
+ # @!attribute [rw] ml_model_id
1018
+ # A user-supplied ID that uniquely identifies the `MLModel`. This
1019
+ # value should be identical to the value of the `MLModelId` in the
1020
+ # request.
1021
+ # @return [String]
1022
+ #
1023
+ # @!attribute [rw] realtime_endpoint_info
1024
+ # The endpoint information of the `MLModel`
1025
+ # @return [Types::RealtimeEndpointInfo]
1026
+ class DeleteRealtimeEndpointOutput < Struct.new(
1027
+ :ml_model_id,
1028
+ :realtime_endpoint_info)
1029
+ include Aws::Structure
1030
+ end
1031
+
1032
+ # @note When making an API call, pass DeleteTagsInput
1033
+ # data as a hash:
1034
+ #
1035
+ # {
1036
+ # tag_keys: ["TagKey"], # required
1037
+ # resource_id: "EntityId", # required
1038
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
1039
+ # }
1040
+ # @!attribute [rw] tag_keys
1041
+ # One or more tags to delete.
1042
+ # @return [Array<String>]
1043
+ #
1044
+ # @!attribute [rw] resource_id
1045
+ # The ID of the tagged ML object. For example, `exampleModelId`.
1046
+ # @return [String]
1047
+ #
1048
+ # @!attribute [rw] resource_type
1049
+ # The type of the tagged ML object.
1050
+ # @return [String]
1051
+ class DeleteTagsInput < Struct.new(
1052
+ :tag_keys,
1053
+ :resource_id,
1054
+ :resource_type)
1055
+ include Aws::Structure
1056
+ end
1057
+
1058
+ # Amazon ML returns the following elements.
1059
+ # @!attribute [rw] resource_id
1060
+ # The ID of the ML object from which tags were deleted.
1061
+ # @return [String]
1062
+ #
1063
+ # @!attribute [rw] resource_type
1064
+ # The type of the ML object from which tags were deleted.
1065
+ # @return [String]
1066
+ class DeleteTagsOutput < Struct.new(
1067
+ :resource_id,
1068
+ :resource_type)
1069
+ include Aws::Structure
1070
+ end
1071
+
1072
+ # @note When making an API call, pass DescribeBatchPredictionsInput
1073
+ # data as a hash:
1074
+ #
1075
+ # {
1076
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1077
+ # eq: "ComparatorValue",
1078
+ # gt: "ComparatorValue",
1079
+ # lt: "ComparatorValue",
1080
+ # ge: "ComparatorValue",
1081
+ # le: "ComparatorValue",
1082
+ # ne: "ComparatorValue",
1083
+ # prefix: "ComparatorValue",
1084
+ # sort_order: "asc", # accepts asc, dsc
1085
+ # next_token: "StringType",
1086
+ # limit: 1,
1087
+ # }
1088
+ # @!attribute [rw] filter_variable
1089
+ # Use one of the following variables to filter a list of
1090
+ # `BatchPrediction`\:
1091
+ #
1092
+ # * `CreatedAt` - Sets the search criteria to the `BatchPrediction`
1093
+ # creation date.
1094
+ # * `Status` - Sets the search criteria to the `BatchPrediction`
1095
+ # status.
1096
+ # * `Name` - Sets the search criteria to the contents of the
1097
+ # `BatchPrediction`<b> </b> `Name`.
1098
+ # * `IAMUser` - Sets the search criteria to the user account that
1099
+ # invoked the `BatchPrediction` creation.
1100
+ # * `MLModelId` - Sets the search criteria to the `MLModel` used in
1101
+ # the `BatchPrediction`.
1102
+ # * `DataSourceId` - Sets the search criteria to the `DataSource` used
1103
+ # in the `BatchPrediction`.
1104
+ # * `DataURI` - Sets the search criteria to the data file(s) used in
1105
+ # the `BatchPrediction`. The URL can identify either a file or an
1106
+ # Amazon Simple Storage Solution (Amazon S3) bucket or directory.
1107
+ # @return [String]
1108
+ #
1109
+ # @!attribute [rw] eq
1110
+ # The equal to operator. The `BatchPrediction` results will have
1111
+ # `FilterVariable` values that exactly match the value specified with
1112
+ # `EQ`.
1113
+ # @return [String]
1114
+ #
1115
+ # @!attribute [rw] gt
1116
+ # The greater than operator. The `BatchPrediction` results will have
1117
+ # `FilterVariable` values that are greater than the value specified
1118
+ # with `GT`.
1119
+ # @return [String]
1120
+ #
1121
+ # @!attribute [rw] lt
1122
+ # The less than operator. The `BatchPrediction` results will have
1123
+ # `FilterVariable` values that are less than the value specified with
1124
+ # `LT`.
1125
+ # @return [String]
1126
+ #
1127
+ # @!attribute [rw] ge
1128
+ # The greater than or equal to operator. The `BatchPrediction` results
1129
+ # will have `FilterVariable` values that are greater than or equal to
1130
+ # the value specified with `GE`.
1131
+ # @return [String]
1132
+ #
1133
+ # @!attribute [rw] le
1134
+ # The less than or equal to operator. The `BatchPrediction` results
1135
+ # will have `FilterVariable` values that are less than or equal to the
1136
+ # value specified with `LE`.
1137
+ # @return [String]
1138
+ #
1139
+ # @!attribute [rw] ne
1140
+ # The not equal to operator. The `BatchPrediction` results will have
1141
+ # `FilterVariable` values not equal to the value specified with `NE`.
1142
+ # @return [String]
1143
+ #
1144
+ # @!attribute [rw] prefix
1145
+ # A string that is found at the beginning of a variable, such as
1146
+ # `Name` or `Id`.
1147
+ #
1148
+ # For example, a `Batch Prediction` operation could have the `Name`
1149
+ # `2014-09-09-HolidayGiftMailer`. To search for this
1150
+ # `BatchPrediction`, select `Name` for the `FilterVariable` and any of
1151
+ # the following strings for the `Prefix`\:
1152
+ #
1153
+ # * 2014-09
1154
+ #
1155
+ # * 2014-09-09
1156
+ #
1157
+ # * 2014-09-09-Holiday
1158
+ # @return [String]
1159
+ #
1160
+ # @!attribute [rw] sort_order
1161
+ # A two-value parameter that determines the sequence of the resulting
1162
+ # list of `MLModel`s.
1163
+ #
1164
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1165
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1166
+ #
1167
+ # Results are sorted by `FilterVariable`.
1168
+ # @return [String]
1169
+ #
1170
+ # @!attribute [rw] next_token
1171
+ # An ID of the page in the paginated results.
1172
+ # @return [String]
1173
+ #
1174
+ # @!attribute [rw] limit
1175
+ # The number of pages of information to include in the result. The
1176
+ # range of acceptable values is `1` through `100`. The default value
1177
+ # is `100`.
1178
+ # @return [Integer]
1179
+ class DescribeBatchPredictionsInput < Struct.new(
1180
+ :filter_variable,
1181
+ :eq,
1182
+ :gt,
1183
+ :lt,
1184
+ :ge,
1185
+ :le,
1186
+ :ne,
1187
+ :prefix,
1188
+ :sort_order,
1189
+ :next_token,
1190
+ :limit)
1191
+ include Aws::Structure
1192
+ end
1193
+
1194
+ # Represents the output of a `DescribeBatchPredictions` operation. The
1195
+ # content is essentially a list of `BatchPrediction`s.
1196
+ # @!attribute [rw] results
1197
+ # A list of `BatchPrediction` objects that meet the search criteria.
1198
+ # @return [Array<Types::BatchPrediction>]
1199
+ #
1200
+ # @!attribute [rw] next_token
1201
+ # The ID of the next page in the paginated results that indicates at
1202
+ # least one more page follows.
1203
+ # @return [String]
1204
+ class DescribeBatchPredictionsOutput < Struct.new(
1205
+ :results,
1206
+ :next_token)
1207
+ include Aws::Structure
1208
+ end
1209
+
1210
+ # @note When making an API call, pass DescribeDataSourcesInput
1211
+ # data as a hash:
1212
+ #
1213
+ # {
1214
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, DataLocationS3, IAMUser
1215
+ # eq: "ComparatorValue",
1216
+ # gt: "ComparatorValue",
1217
+ # lt: "ComparatorValue",
1218
+ # ge: "ComparatorValue",
1219
+ # le: "ComparatorValue",
1220
+ # ne: "ComparatorValue",
1221
+ # prefix: "ComparatorValue",
1222
+ # sort_order: "asc", # accepts asc, dsc
1223
+ # next_token: "StringType",
1224
+ # limit: 1,
1225
+ # }
1226
+ # @!attribute [rw] filter_variable
1227
+ # Use one of the following variables to filter a list of
1228
+ # `DataSource`\:
1229
+ #
1230
+ # * `CreatedAt` - Sets the search criteria to `DataSource` creation
1231
+ # dates.
1232
+ # * `Status` - Sets the search criteria to `DataSource` statuses.
1233
+ # * `Name` - Sets the search criteria to the contents of `DataSource`
1234
+ # <b> </b> `Name`.
1235
+ # * `DataUri` - Sets the search criteria to the URI of data files used
1236
+ # to create the `DataSource`. The URI can identify either a file or
1237
+ # an Amazon Simple Storage Service (Amazon S3) bucket or directory.
1238
+ # * `IAMUser` - Sets the search criteria to the user account that
1239
+ # invoked the `DataSource` creation.
1240
+ # @return [String]
1241
+ #
1242
+ # @!attribute [rw] eq
1243
+ # The equal to operator. The `DataSource` results will have
1244
+ # `FilterVariable` values that exactly match the value specified with
1245
+ # `EQ`.
1246
+ # @return [String]
1247
+ #
1248
+ # @!attribute [rw] gt
1249
+ # The greater than operator. The `DataSource` results will have
1250
+ # `FilterVariable` values that are greater than the value specified
1251
+ # with `GT`.
1252
+ # @return [String]
1253
+ #
1254
+ # @!attribute [rw] lt
1255
+ # The less than operator. The `DataSource` results will have
1256
+ # `FilterVariable` values that are less than the value specified with
1257
+ # `LT`.
1258
+ # @return [String]
1259
+ #
1260
+ # @!attribute [rw] ge
1261
+ # The greater than or equal to operator. The `DataSource` results will
1262
+ # have `FilterVariable` values that are greater than or equal to the
1263
+ # value specified with `GE`.
1264
+ # @return [String]
1265
+ #
1266
+ # @!attribute [rw] le
1267
+ # The less than or equal to operator. The `DataSource` results will
1268
+ # have `FilterVariable` values that are less than or equal to the
1269
+ # value specified with `LE`.
1270
+ # @return [String]
1271
+ #
1272
+ # @!attribute [rw] ne
1273
+ # The not equal to operator. The `DataSource` results will have
1274
+ # `FilterVariable` values not equal to the value specified with `NE`.
1275
+ # @return [String]
1276
+ #
1277
+ # @!attribute [rw] prefix
1278
+ # A string that is found at the beginning of a variable, such as
1279
+ # `Name` or `Id`.
1280
+ #
1281
+ # For example, a `DataSource` could have the `Name`
1282
+ # `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`,
1283
+ # select `Name` for the `FilterVariable` and any of the following
1284
+ # strings for the `Prefix`\:
1285
+ #
1286
+ # * 2014-09
1287
+ #
1288
+ # * 2014-09-09
1289
+ #
1290
+ # * 2014-09-09-Holiday
1291
+ # @return [String]
1292
+ #
1293
+ # @!attribute [rw] sort_order
1294
+ # A two-value parameter that determines the sequence of the resulting
1295
+ # list of `DataSource`.
1296
+ #
1297
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1298
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1299
+ #
1300
+ # Results are sorted by `FilterVariable`.
1301
+ # @return [String]
1302
+ #
1303
+ # @!attribute [rw] next_token
1304
+ # The ID of the page in the paginated results.
1305
+ # @return [String]
1306
+ #
1307
+ # @!attribute [rw] limit
1308
+ # The maximum number of `DataSource` to include in the result.
1309
+ # @return [Integer]
1310
+ class DescribeDataSourcesInput < Struct.new(
1311
+ :filter_variable,
1312
+ :eq,
1313
+ :gt,
1314
+ :lt,
1315
+ :ge,
1316
+ :le,
1317
+ :ne,
1318
+ :prefix,
1319
+ :sort_order,
1320
+ :next_token,
1321
+ :limit)
1322
+ include Aws::Structure
1323
+ end
1324
+
1325
+ # Represents the query results from a DescribeDataSources operation. The
1326
+ # content is essentially a list of `DataSource`.
1327
+ # @!attribute [rw] results
1328
+ # A list of `DataSource` that meet the search criteria.
1329
+ # @return [Array<Types::DataSource>]
1330
+ #
1331
+ # @!attribute [rw] next_token
1332
+ # An ID of the next page in the paginated results that indicates at
1333
+ # least one more page follows.
1334
+ # @return [String]
1335
+ class DescribeDataSourcesOutput < Struct.new(
1336
+ :results,
1337
+ :next_token)
1338
+ include Aws::Structure
1339
+ end
1340
+
1341
+ # @note When making an API call, pass DescribeEvaluationsInput
1342
+ # data as a hash:
1343
+ #
1344
+ # {
1345
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, MLModelId, DataSourceId, DataURI
1346
+ # eq: "ComparatorValue",
1347
+ # gt: "ComparatorValue",
1348
+ # lt: "ComparatorValue",
1349
+ # ge: "ComparatorValue",
1350
+ # le: "ComparatorValue",
1351
+ # ne: "ComparatorValue",
1352
+ # prefix: "ComparatorValue",
1353
+ # sort_order: "asc", # accepts asc, dsc
1354
+ # next_token: "StringType",
1355
+ # limit: 1,
1356
+ # }
1357
+ # @!attribute [rw] filter_variable
1358
+ # Use one of the following variable to filter a list of `Evaluation`
1359
+ # objects:
1360
+ #
1361
+ # * `CreatedAt` - Sets the search criteria to the `Evaluation`
1362
+ # creation date.
1363
+ # * `Status` - Sets the search criteria to the `Evaluation` status.
1364
+ # * `Name` - Sets the search criteria to the contents of `Evaluation`
1365
+ # <b> </b> `Name`.
1366
+ # * `IAMUser` - Sets the search criteria to the user account that
1367
+ # invoked an `Evaluation`.
1368
+ # * `MLModelId` - Sets the search criteria to the `MLModel` that was
1369
+ # evaluated.
1370
+ # * `DataSourceId` - Sets the search criteria to the `DataSource` used
1371
+ # in `Evaluation`.
1372
+ # * `DataUri` - Sets the search criteria to the data file(s) used in
1373
+ # `Evaluation`. The URL can identify either a file or an Amazon
1374
+ # Simple Storage Solution (Amazon S3) bucket or directory.
1375
+ # @return [String]
1376
+ #
1377
+ # @!attribute [rw] eq
1378
+ # The equal to operator. The `Evaluation` results will have
1379
+ # `FilterVariable` values that exactly match the value specified with
1380
+ # `EQ`.
1381
+ # @return [String]
1382
+ #
1383
+ # @!attribute [rw] gt
1384
+ # The greater than operator. The `Evaluation` results will have
1385
+ # `FilterVariable` values that are greater than the value specified
1386
+ # with `GT`.
1387
+ # @return [String]
1388
+ #
1389
+ # @!attribute [rw] lt
1390
+ # The less than operator. The `Evaluation` results will have
1391
+ # `FilterVariable` values that are less than the value specified with
1392
+ # `LT`.
1393
+ # @return [String]
1394
+ #
1395
+ # @!attribute [rw] ge
1396
+ # The greater than or equal to operator. The `Evaluation` results will
1397
+ # have `FilterVariable` values that are greater than or equal to the
1398
+ # value specified with `GE`.
1399
+ # @return [String]
1400
+ #
1401
+ # @!attribute [rw] le
1402
+ # The less than or equal to operator. The `Evaluation` results will
1403
+ # have `FilterVariable` values that are less than or equal to the
1404
+ # value specified with `LE`.
1405
+ # @return [String]
1406
+ #
1407
+ # @!attribute [rw] ne
1408
+ # The not equal to operator. The `Evaluation` results will have
1409
+ # `FilterVariable` values not equal to the value specified with `NE`.
1410
+ # @return [String]
1411
+ #
1412
+ # @!attribute [rw] prefix
1413
+ # A string that is found at the beginning of a variable, such as
1414
+ # `Name` or `Id`.
1415
+ #
1416
+ # For example, an `Evaluation` could have the `Name`
1417
+ # `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`,
1418
+ # select `Name` for the `FilterVariable` and any of the following
1419
+ # strings for the `Prefix`\:
1420
+ #
1421
+ # * 2014-09
1422
+ #
1423
+ # * 2014-09-09
1424
+ #
1425
+ # * 2014-09-09-Holiday
1426
+ # @return [String]
1427
+ #
1428
+ # @!attribute [rw] sort_order
1429
+ # A two-value parameter that determines the sequence of the resulting
1430
+ # list of `Evaluation`.
1431
+ #
1432
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1433
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1434
+ #
1435
+ # Results are sorted by `FilterVariable`.
1436
+ # @return [String]
1437
+ #
1438
+ # @!attribute [rw] next_token
1439
+ # The ID of the page in the paginated results.
1440
+ # @return [String]
1441
+ #
1442
+ # @!attribute [rw] limit
1443
+ # The maximum number of `Evaluation` to include in the result.
1444
+ # @return [Integer]
1445
+ class DescribeEvaluationsInput < Struct.new(
1446
+ :filter_variable,
1447
+ :eq,
1448
+ :gt,
1449
+ :lt,
1450
+ :ge,
1451
+ :le,
1452
+ :ne,
1453
+ :prefix,
1454
+ :sort_order,
1455
+ :next_token,
1456
+ :limit)
1457
+ include Aws::Structure
1458
+ end
1459
+
1460
+ # Represents the query results from a `DescribeEvaluations` operation.
1461
+ # The content is essentially a list of `Evaluation`.
1462
+ # @!attribute [rw] results
1463
+ # A list of `Evaluation` that meet the search criteria.
1464
+ # @return [Array<Types::Evaluation>]
1465
+ #
1466
+ # @!attribute [rw] next_token
1467
+ # The ID of the next page in the paginated results that indicates at
1468
+ # least one more page follows.
1469
+ # @return [String]
1470
+ class DescribeEvaluationsOutput < Struct.new(
1471
+ :results,
1472
+ :next_token)
1473
+ include Aws::Structure
1474
+ end
1475
+
1476
+ # @note When making an API call, pass DescribeMLModelsInput
1477
+ # data as a hash:
1478
+ #
1479
+ # {
1480
+ # filter_variable: "CreatedAt", # accepts CreatedAt, LastUpdatedAt, Status, Name, IAMUser, TrainingDataSourceId, RealtimeEndpointStatus, MLModelType, Algorithm, TrainingDataURI
1481
+ # eq: "ComparatorValue",
1482
+ # gt: "ComparatorValue",
1483
+ # lt: "ComparatorValue",
1484
+ # ge: "ComparatorValue",
1485
+ # le: "ComparatorValue",
1486
+ # ne: "ComparatorValue",
1487
+ # prefix: "ComparatorValue",
1488
+ # sort_order: "asc", # accepts asc, dsc
1489
+ # next_token: "StringType",
1490
+ # limit: 1,
1491
+ # }
1492
+ # @!attribute [rw] filter_variable
1493
+ # Use one of the following variables to filter a list of `MLModel`\:
1494
+ #
1495
+ # * `CreatedAt` - Sets the search criteria to `MLModel` creation date.
1496
+ # * `Status` - Sets the search criteria to `MLModel` status.
1497
+ # * `Name` - Sets the search criteria to the contents of `MLModel`<b>
1498
+ # </b> `Name`.
1499
+ # * `IAMUser` - Sets the search criteria to the user account that
1500
+ # invoked the `MLModel` creation.
1501
+ # * `TrainingDataSourceId` - Sets the search criteria to the
1502
+ # `DataSource` used to train one or more `MLModel`.
1503
+ # * `RealtimeEndpointStatus` - Sets the search criteria to the
1504
+ # `MLModel` real-time endpoint status.
1505
+ # * `MLModelType` - Sets the search criteria to `MLModel` type:
1506
+ # binary, regression, or multi-class.
1507
+ # * `Algorithm` - Sets the search criteria to the algorithm that the
1508
+ # `MLModel` uses.
1509
+ # * `TrainingDataURI` - Sets the search criteria to the data file(s)
1510
+ # used in training a `MLModel`. The URL can identify either a file
1511
+ # or an Amazon Simple Storage Service (Amazon S3) bucket or
1512
+ # directory.
1513
+ # @return [String]
1514
+ #
1515
+ # @!attribute [rw] eq
1516
+ # The equal to operator. The `MLModel` results will have
1517
+ # `FilterVariable` values that exactly match the value specified with
1518
+ # `EQ`.
1519
+ # @return [String]
1520
+ #
1521
+ # @!attribute [rw] gt
1522
+ # The greater than operator. The `MLModel` results will have
1523
+ # `FilterVariable` values that are greater than the value specified
1524
+ # with `GT`.
1525
+ # @return [String]
1526
+ #
1527
+ # @!attribute [rw] lt
1528
+ # The less than operator. The `MLModel` results will have
1529
+ # `FilterVariable` values that are less than the value specified with
1530
+ # `LT`.
1531
+ # @return [String]
1532
+ #
1533
+ # @!attribute [rw] ge
1534
+ # The greater than or equal to operator. The `MLModel` results will
1535
+ # have `FilterVariable` values that are greater than or equal to the
1536
+ # value specified with `GE`.
1537
+ # @return [String]
1538
+ #
1539
+ # @!attribute [rw] le
1540
+ # The less than or equal to operator. The `MLModel` results will have
1541
+ # `FilterVariable` values that are less than or equal to the value
1542
+ # specified with `LE`.
1543
+ # @return [String]
1544
+ #
1545
+ # @!attribute [rw] ne
1546
+ # The not equal to operator. The `MLModel` results will have
1547
+ # `FilterVariable` values not equal to the value specified with `NE`.
1548
+ # @return [String]
1549
+ #
1550
+ # @!attribute [rw] prefix
1551
+ # A string that is found at the beginning of a variable, such as
1552
+ # `Name` or `Id`.
1553
+ #
1554
+ # For example, an `MLModel` could have the `Name`
1555
+ # `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, select
1556
+ # `Name` for the `FilterVariable` and any of the following strings for
1557
+ # the `Prefix`\:
1558
+ #
1559
+ # * 2014-09
1560
+ #
1561
+ # * 2014-09-09
1562
+ #
1563
+ # * 2014-09-09-Holiday
1564
+ # @return [String]
1565
+ #
1566
+ # @!attribute [rw] sort_order
1567
+ # A two-value parameter that determines the sequence of the resulting
1568
+ # list of `MLModel`.
1569
+ #
1570
+ # * `asc` - Arranges the list in ascending order (A-Z, 0-9).
1571
+ # * `dsc` - Arranges the list in descending order (Z-A, 9-0).
1572
+ #
1573
+ # Results are sorted by `FilterVariable`.
1574
+ # @return [String]
1575
+ #
1576
+ # @!attribute [rw] next_token
1577
+ # The ID of the page in the paginated results.
1578
+ # @return [String]
1579
+ #
1580
+ # @!attribute [rw] limit
1581
+ # The number of pages of information to include in the result. The
1582
+ # range of acceptable values is `1` through `100`. The default value
1583
+ # is `100`.
1584
+ # @return [Integer]
1585
+ class DescribeMLModelsInput < Struct.new(
1586
+ :filter_variable,
1587
+ :eq,
1588
+ :gt,
1589
+ :lt,
1590
+ :ge,
1591
+ :le,
1592
+ :ne,
1593
+ :prefix,
1594
+ :sort_order,
1595
+ :next_token,
1596
+ :limit)
1597
+ include Aws::Structure
1598
+ end
1599
+
1600
+ # Represents the output of a `DescribeMLModels` operation. The content
1601
+ # is essentially a list of `MLModel`.
1602
+ # @!attribute [rw] results
1603
+ # A list of `MLModel` that meet the search criteria.
1604
+ # @return [Array<Types::MLModel>]
1605
+ #
1606
+ # @!attribute [rw] next_token
1607
+ # The ID of the next page in the paginated results that indicates at
1608
+ # least one more page follows.
1609
+ # @return [String]
1610
+ class DescribeMLModelsOutput < Struct.new(
1611
+ :results,
1612
+ :next_token)
1613
+ include Aws::Structure
1614
+ end
1615
+
1616
+ # @note When making an API call, pass DescribeTagsInput
1617
+ # data as a hash:
1618
+ #
1619
+ # {
1620
+ # resource_id: "EntityId", # required
1621
+ # resource_type: "BatchPrediction", # required, accepts BatchPrediction, DataSource, Evaluation, MLModel
1622
+ # }
1623
+ # @!attribute [rw] resource_id
1624
+ # The ID of the ML object. For example, `exampleModelId`.
1625
+ # @return [String]
1626
+ #
1627
+ # @!attribute [rw] resource_type
1628
+ # The type of the ML object.
1629
+ # @return [String]
1630
+ class DescribeTagsInput < Struct.new(
1631
+ :resource_id,
1632
+ :resource_type)
1633
+ include Aws::Structure
1634
+ end
1635
+
1636
+ # Amazon ML returns the following elements.
1637
+ # @!attribute [rw] resource_id
1638
+ # The ID of the tagged ML object.
1639
+ # @return [String]
1640
+ #
1641
+ # @!attribute [rw] resource_type
1642
+ # The type of the tagged ML object.
1643
+ # @return [String]
1644
+ #
1645
+ # @!attribute [rw] tags
1646
+ # A list of tags associated with the ML object.
1647
+ # @return [Array<Types::Tag>]
1648
+ class DescribeTagsOutput < Struct.new(
1649
+ :resource_id,
1650
+ :resource_type,
1651
+ :tags)
1652
+ include Aws::Structure
1653
+ end
1654
+
1655
+ # Represents the output of `GetEvaluation` operation.
1656
+ #
1657
+ # The content consists of the detailed metadata and data file
1658
+ # information and the current status of the `Evaluation`.
1659
+ # @!attribute [rw] evaluation_id
1660
+ # The ID that is assigned to the `Evaluation` at creation.
1661
+ # @return [String]
1662
+ #
1663
+ # @!attribute [rw] ml_model_id
1664
+ # The ID of the `MLModel` that is the focus of the evaluation.
1665
+ # @return [String]
1666
+ #
1667
+ # @!attribute [rw] evaluation_data_source_id
1668
+ # The ID of the `DataSource` that is used to evaluate the `MLModel`.
1669
+ # @return [String]
1670
+ #
1671
+ # @!attribute [rw] input_data_location_s3
1672
+ # The location and name of the data in Amazon Simple Storage Server
1673
+ # (Amazon S3) that is used in the evaluation.
1674
+ # @return [String]
1675
+ #
1676
+ # @!attribute [rw] created_by_iam_user
1677
+ # The AWS user account that invoked the evaluation. The account type
1678
+ # can be either an AWS root account or an AWS Identity and Access
1679
+ # Management (IAM) user account.
1680
+ # @return [String]
1681
+ #
1682
+ # @!attribute [rw] created_at
1683
+ # The time that the `Evaluation` was created. The time is expressed in
1684
+ # epoch time.
1685
+ # @return [Time]
1686
+ #
1687
+ # @!attribute [rw] last_updated_at
1688
+ # The time of the most recent edit to the `Evaluation`. The time is
1689
+ # expressed in epoch time.
1690
+ # @return [Time]
1691
+ #
1692
+ # @!attribute [rw] name
1693
+ # A user-supplied name or description of the `Evaluation`.
1694
+ # @return [String]
1695
+ #
1696
+ # @!attribute [rw] status
1697
+ # The status of the evaluation. This element can have one of the
1698
+ # following values:
1699
+ #
1700
+ # * `PENDING` - Amazon Machine Learning (Amazon ML) submitted a
1701
+ # request to evaluate an `MLModel`.
1702
+ # * `INPROGRESS` - The evaluation is underway.
1703
+ # * `FAILED` - The request to evaluate an `MLModel` did not run to
1704
+ # completion. It is not usable.
1705
+ # * `COMPLETED` - The evaluation process completed successfully.
1706
+ # * `DELETED` - The `Evaluation` is marked as deleted. It is not
1707
+ # usable.
1708
+ # @return [String]
1709
+ #
1710
+ # @!attribute [rw] performance_metrics
1711
+ # Measurements of how well the `MLModel` performed, using observations
1712
+ # referenced by the `DataSource`. One of the following metrics is
1713
+ # returned, based on the type of the `MLModel`\:
1714
+ #
1715
+ # * BinaryAUC: A binary `MLModel` uses the Area Under the Curve (AUC)
1716
+ # technique to measure performance.
1717
+ #
1718
+ # * RegressionRMSE: A regression `MLModel` uses the Root Mean Square
1719
+ # Error (RMSE) technique to measure performance. RMSE measures the
1720
+ # difference between predicted and actual values for a single
1721
+ # variable.
1722
+ #
1723
+ # * MulticlassAvgFScore: A multiclass `MLModel` uses the F1 score
1724
+ # technique to measure performance.
1725
+ #
1726
+ # For more information about performance metrics, please see the
1727
+ # [Amazon Machine Learning Developer Guide][1].
1728
+ #
1729
+ #
1730
+ #
1731
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
1732
+ # @return [Types::PerformanceMetrics]
1733
+ #
1734
+ # @!attribute [rw] message
1735
+ # A description of the most recent details about evaluating the
1736
+ # `MLModel`.
1737
+ # @return [String]
1738
+ #
1739
+ # @!attribute [rw] compute_time
1740
+ # Long integer type that is a 64-bit signed number.
1741
+ # @return [Integer]
1742
+ #
1743
+ # @!attribute [rw] finished_at
1744
+ # A timestamp represented in epoch time.
1745
+ # @return [Time]
1746
+ #
1747
+ # @!attribute [rw] started_at
1748
+ # A timestamp represented in epoch time.
1749
+ # @return [Time]
1750
+ class Evaluation < Struct.new(
1751
+ :evaluation_id,
1752
+ :ml_model_id,
1753
+ :evaluation_data_source_id,
1754
+ :input_data_location_s3,
1755
+ :created_by_iam_user,
1756
+ :created_at,
1757
+ :last_updated_at,
1758
+ :name,
1759
+ :status,
1760
+ :performance_metrics,
1761
+ :message,
1762
+ :compute_time,
1763
+ :finished_at,
1764
+ :started_at)
1765
+ include Aws::Structure
1766
+ end
1767
+
1768
+ # @note When making an API call, pass GetBatchPredictionInput
1769
+ # data as a hash:
1770
+ #
1771
+ # {
1772
+ # batch_prediction_id: "EntityId", # required
1773
+ # }
1774
+ # @!attribute [rw] batch_prediction_id
1775
+ # An ID assigned to the `BatchPrediction` at creation.
1776
+ # @return [String]
1777
+ class GetBatchPredictionInput < Struct.new(
1778
+ :batch_prediction_id)
1779
+ include Aws::Structure
1780
+ end
1781
+
1782
+ # Represents the output of a `GetBatchPrediction` operation and
1783
+ # describes a `BatchPrediction`.
1784
+ # @!attribute [rw] batch_prediction_id
1785
+ # An ID assigned to the `BatchPrediction` at creation. This value
1786
+ # should be identical to the value of the `BatchPredictionID` in the
1787
+ # request.
1788
+ # @return [String]
1789
+ #
1790
+ # @!attribute [rw] ml_model_id
1791
+ # The ID of the `MLModel` that generated predictions for the
1792
+ # `BatchPrediction` request.
1793
+ # @return [String]
1794
+ #
1795
+ # @!attribute [rw] batch_prediction_data_source_id
1796
+ # The ID of the `DataSource` that was used to create the
1797
+ # `BatchPrediction`.
1798
+ # @return [String]
1799
+ #
1800
+ # @!attribute [rw] input_data_location_s3
1801
+ # The location of the data file or directory in Amazon Simple Storage
1802
+ # Service (Amazon S3).
1803
+ # @return [String]
1804
+ #
1805
+ # @!attribute [rw] created_by_iam_user
1806
+ # The AWS user account that invoked the `BatchPrediction`. The account
1807
+ # type can be either an AWS root account or an AWS Identity and Access
1808
+ # Management (IAM) user account.
1809
+ # @return [String]
1810
+ #
1811
+ # @!attribute [rw] created_at
1812
+ # The time when the `BatchPrediction` was created. The time is
1813
+ # expressed in epoch time.
1814
+ # @return [Time]
1815
+ #
1816
+ # @!attribute [rw] last_updated_at
1817
+ # The time of the most recent edit to `BatchPrediction`. The time is
1818
+ # expressed in epoch time.
1819
+ # @return [Time]
1820
+ #
1821
+ # @!attribute [rw] name
1822
+ # A user-supplied name or description of the `BatchPrediction`.
1823
+ # @return [String]
1824
+ #
1825
+ # @!attribute [rw] status
1826
+ # The status of the `BatchPrediction`, which can be one of the
1827
+ # following values:
1828
+ #
1829
+ # * `PENDING` - Amazon Machine Learning (Amazon ML) submitted a
1830
+ # request to generate batch predictions.
1831
+ # * `INPROGRESS` - The batch predictions are in progress.
1832
+ # * `FAILED` - The request to perform a batch prediction did not run
1833
+ # to completion. It is not usable.
1834
+ # * `COMPLETED` - The batch prediction process completed successfully.
1835
+ # * `DELETED` - The `BatchPrediction` is marked as deleted. It is not
1836
+ # usable.
1837
+ # @return [String]
1838
+ #
1839
+ # @!attribute [rw] output_uri
1840
+ # The location of an Amazon S3 bucket or directory to receive the
1841
+ # operation results.
1842
+ # @return [String]
1843
+ #
1844
+ # @!attribute [rw] log_uri
1845
+ # A link to the file that contains logs of the `CreateBatchPrediction`
1846
+ # operation.
1847
+ # @return [String]
1848
+ #
1849
+ # @!attribute [rw] message
1850
+ # A description of the most recent details about processing the batch
1851
+ # prediction request.
1852
+ # @return [String]
1853
+ #
1854
+ # @!attribute [rw] compute_time
1855
+ # The approximate CPU time in milliseconds that Amazon Machine
1856
+ # Learning spent processing the `BatchPrediction`, normalized and
1857
+ # scaled on computation resources. `ComputeTime` is only available if
1858
+ # the `BatchPrediction` is in the `COMPLETED` state.
1859
+ # @return [Integer]
1860
+ #
1861
+ # @!attribute [rw] finished_at
1862
+ # The epoch time when Amazon Machine Learning marked the
1863
+ # `BatchPrediction` as `COMPLETED` or `FAILED`. `FinishedAt` is only
1864
+ # available when the `BatchPrediction` is in the `COMPLETED` or
1865
+ # `FAILED` state.
1866
+ # @return [Time]
1867
+ #
1868
+ # @!attribute [rw] started_at
1869
+ # The epoch time when Amazon Machine Learning marked the
1870
+ # `BatchPrediction` as `INPROGRESS`. `StartedAt` isn't available if
1871
+ # the `BatchPrediction` is in the `PENDING` state.
1872
+ # @return [Time]
1873
+ #
1874
+ # @!attribute [rw] total_record_count
1875
+ # The number of total records that Amazon Machine Learning saw while
1876
+ # processing the `BatchPrediction`.
1877
+ # @return [Integer]
1878
+ #
1879
+ # @!attribute [rw] invalid_record_count
1880
+ # The number of invalid records that Amazon Machine Learning saw while
1881
+ # processing the `BatchPrediction`.
1882
+ # @return [Integer]
1883
+ class GetBatchPredictionOutput < Struct.new(
1884
+ :batch_prediction_id,
1885
+ :ml_model_id,
1886
+ :batch_prediction_data_source_id,
1887
+ :input_data_location_s3,
1888
+ :created_by_iam_user,
1889
+ :created_at,
1890
+ :last_updated_at,
1891
+ :name,
1892
+ :status,
1893
+ :output_uri,
1894
+ :log_uri,
1895
+ :message,
1896
+ :compute_time,
1897
+ :finished_at,
1898
+ :started_at,
1899
+ :total_record_count,
1900
+ :invalid_record_count)
1901
+ include Aws::Structure
1902
+ end
1903
+
1904
+ # @note When making an API call, pass GetDataSourceInput
1905
+ # data as a hash:
1906
+ #
1907
+ # {
1908
+ # data_source_id: "EntityId", # required
1909
+ # verbose: false,
1910
+ # }
1911
+ # @!attribute [rw] data_source_id
1912
+ # The ID assigned to the `DataSource` at creation.
1913
+ # @return [String]
1914
+ #
1915
+ # @!attribute [rw] verbose
1916
+ # Specifies whether the `GetDataSource` operation should return
1917
+ # `DataSourceSchema`.
1918
+ #
1919
+ # If true, `DataSourceSchema` is returned.
1920
+ #
1921
+ # If false, `DataSourceSchema` is not returned.
1922
+ # @return [Boolean]
1923
+ class GetDataSourceInput < Struct.new(
1924
+ :data_source_id,
1925
+ :verbose)
1926
+ include Aws::Structure
1927
+ end
1928
+
1929
+ # Represents the output of a `GetDataSource` operation and describes a
1930
+ # `DataSource`.
1931
+ # @!attribute [rw] data_source_id
1932
+ # The ID assigned to the `DataSource` at creation. This value should
1933
+ # be identical to the value of the `DataSourceId` in the request.
1934
+ # @return [String]
1935
+ #
1936
+ # @!attribute [rw] data_location_s3
1937
+ # The location of the data file or directory in Amazon Simple Storage
1938
+ # Service (Amazon S3).
1939
+ # @return [String]
1940
+ #
1941
+ # @!attribute [rw] data_rearrangement
1942
+ # A JSON string that represents the splitting and rearrangement
1943
+ # requirement used when this `DataSource` was created.
1944
+ # @return [String]
1945
+ #
1946
+ # @!attribute [rw] created_by_iam_user
1947
+ # The AWS user account from which the `DataSource` was created. The
1948
+ # account type can be either an AWS root account or an AWS Identity
1949
+ # and Access Management (IAM) user account.
1950
+ # @return [String]
1951
+ #
1952
+ # @!attribute [rw] created_at
1953
+ # The time that the `DataSource` was created. The time is expressed in
1954
+ # epoch time.
1955
+ # @return [Time]
1956
+ #
1957
+ # @!attribute [rw] last_updated_at
1958
+ # The time of the most recent edit to the `DataSource`. The time is
1959
+ # expressed in epoch time.
1960
+ # @return [Time]
1961
+ #
1962
+ # @!attribute [rw] data_size_in_bytes
1963
+ # The total size of observations in the data files.
1964
+ # @return [Integer]
1965
+ #
1966
+ # @!attribute [rw] number_of_files
1967
+ # The number of data files referenced by the `DataSource`.
1968
+ # @return [Integer]
1969
+ #
1970
+ # @!attribute [rw] name
1971
+ # A user-supplied name or description of the `DataSource`.
1972
+ # @return [String]
1973
+ #
1974
+ # @!attribute [rw] status
1975
+ # The current status of the `DataSource`. This element can have one of
1976
+ # the following values:
1977
+ #
1978
+ # * `PENDING` - Amazon ML submitted a request to create a
1979
+ # `DataSource`.
1980
+ # * `INPROGRESS` - The creation process is underway.
1981
+ # * `FAILED` - The request to create a `DataSource` did not run to
1982
+ # completion. It is not usable.
1983
+ # * `COMPLETED` - The creation process completed successfully.
1984
+ # * `DELETED` - The `DataSource` is marked as deleted. It is not
1985
+ # usable.
1986
+ # @return [String]
1987
+ #
1988
+ # @!attribute [rw] log_uri
1989
+ # A link to the file containing logs of `CreateDataSourceFrom*`
1990
+ # operations.
1991
+ # @return [String]
1992
+ #
1993
+ # @!attribute [rw] message
1994
+ # The user-supplied description of the most recent details about
1995
+ # creating the `DataSource`.
1996
+ # @return [String]
1997
+ #
1998
+ # @!attribute [rw] redshift_metadata
1999
+ # Describes the `DataSource` details specific to Amazon Redshift.
2000
+ # @return [Types::RedshiftMetadata]
2001
+ #
2002
+ # @!attribute [rw] rds_metadata
2003
+ # The datasource details that are specific to Amazon RDS.
2004
+ # @return [Types::RDSMetadata]
2005
+ #
2006
+ # @!attribute [rw] role_arn
2007
+ # The Amazon Resource Name (ARN) of an [AWS IAM Role][1], such as the
2008
+ # following: arn:aws:iam::account:role/rolename.
2009
+ #
2010
+ #
2011
+ #
2012
+ # [1]: http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts
2013
+ # @return [String]
2014
+ #
2015
+ # @!attribute [rw] compute_statistics
2016
+ # The parameter is `true` if statistics need to be generated from the
2017
+ # observation data.
2018
+ # @return [Boolean]
2019
+ #
2020
+ # @!attribute [rw] compute_time
2021
+ # The approximate CPU time in milliseconds that Amazon Machine
2022
+ # Learning spent processing the `DataSource`, normalized and scaled on
2023
+ # computation resources. `ComputeTime` is only available if the
2024
+ # `DataSource` is in the `COMPLETED` state and the `ComputeStatistics`
2025
+ # is set to true.
2026
+ # @return [Integer]
2027
+ #
2028
+ # @!attribute [rw] finished_at
2029
+ # The epoch time when Amazon Machine Learning marked the `DataSource`
2030
+ # as `COMPLETED` or `FAILED`. `FinishedAt` is only available when the
2031
+ # `DataSource` is in the `COMPLETED` or `FAILED` state.
2032
+ # @return [Time]
2033
+ #
2034
+ # @!attribute [rw] started_at
2035
+ # The epoch time when Amazon Machine Learning marked the `DataSource`
2036
+ # as `INPROGRESS`. `StartedAt` isn't available if the `DataSource` is
2037
+ # in the `PENDING` state.
2038
+ # @return [Time]
2039
+ #
2040
+ # @!attribute [rw] data_source_schema
2041
+ # The schema used by all of the data files of this `DataSource`.
2042
+ #
2043
+ # <note markdown="1"><title>Note</title> This parameter is provided as part of the verbose format.
2044
+ #
2045
+ # </note>
2046
+ # @return [String]
2047
+ class GetDataSourceOutput < Struct.new(
2048
+ :data_source_id,
2049
+ :data_location_s3,
2050
+ :data_rearrangement,
2051
+ :created_by_iam_user,
2052
+ :created_at,
2053
+ :last_updated_at,
2054
+ :data_size_in_bytes,
2055
+ :number_of_files,
2056
+ :name,
2057
+ :status,
2058
+ :log_uri,
2059
+ :message,
2060
+ :redshift_metadata,
2061
+ :rds_metadata,
2062
+ :role_arn,
2063
+ :compute_statistics,
2064
+ :compute_time,
2065
+ :finished_at,
2066
+ :started_at,
2067
+ :data_source_schema)
2068
+ include Aws::Structure
2069
+ end
2070
+
2071
+ # @note When making an API call, pass GetEvaluationInput
2072
+ # data as a hash:
2073
+ #
2074
+ # {
2075
+ # evaluation_id: "EntityId", # required
2076
+ # }
2077
+ # @!attribute [rw] evaluation_id
2078
+ # The ID of the `Evaluation` to retrieve. The evaluation of each
2079
+ # `MLModel` is recorded and cataloged. The ID provides the means to
2080
+ # access the information.
2081
+ # @return [String]
2082
+ class GetEvaluationInput < Struct.new(
2083
+ :evaluation_id)
2084
+ include Aws::Structure
2085
+ end
2086
+
2087
+ # Represents the output of a `GetEvaluation` operation and describes an
2088
+ # `Evaluation`.
2089
+ # @!attribute [rw] evaluation_id
2090
+ # The evaluation ID which is same as the `EvaluationId` in the
2091
+ # request.
2092
+ # @return [String]
2093
+ #
2094
+ # @!attribute [rw] ml_model_id
2095
+ # The ID of the `MLModel` that was the focus of the evaluation.
2096
+ # @return [String]
2097
+ #
2098
+ # @!attribute [rw] evaluation_data_source_id
2099
+ # The `DataSource` used for this evaluation.
2100
+ # @return [String]
2101
+ #
2102
+ # @!attribute [rw] input_data_location_s3
2103
+ # The location of the data file or directory in Amazon Simple Storage
2104
+ # Service (Amazon S3).
2105
+ # @return [String]
2106
+ #
2107
+ # @!attribute [rw] created_by_iam_user
2108
+ # The AWS user account that invoked the evaluation. The account type
2109
+ # can be either an AWS root account or an AWS Identity and Access
2110
+ # Management (IAM) user account.
2111
+ # @return [String]
2112
+ #
2113
+ # @!attribute [rw] created_at
2114
+ # The time that the `Evaluation` was created. The time is expressed in
2115
+ # epoch time.
2116
+ # @return [Time]
2117
+ #
2118
+ # @!attribute [rw] last_updated_at
2119
+ # The time of the most recent edit to the `Evaluation`. The time is
2120
+ # expressed in epoch time.
2121
+ # @return [Time]
2122
+ #
2123
+ # @!attribute [rw] name
2124
+ # A user-supplied name or description of the `Evaluation`.
2125
+ # @return [String]
2126
+ #
2127
+ # @!attribute [rw] status
2128
+ # The status of the evaluation. This element can have one of the
2129
+ # following values:
2130
+ #
2131
+ # * `PENDING` - Amazon Machine Language (Amazon ML) submitted a
2132
+ # request to evaluate an `MLModel`.
2133
+ # * `INPROGRESS` - The evaluation is underway.
2134
+ # * `FAILED` - The request to evaluate an `MLModel` did not run to
2135
+ # completion. It is not usable.
2136
+ # * `COMPLETED` - The evaluation process completed successfully.
2137
+ # * `DELETED` - The `Evaluation` is marked as deleted. It is not
2138
+ # usable.
2139
+ # @return [String]
2140
+ #
2141
+ # @!attribute [rw] performance_metrics
2142
+ # Measurements of how well the `MLModel` performed using observations
2143
+ # referenced by the `DataSource`. One of the following metric is
2144
+ # returned based on the type of the `MLModel`\:
2145
+ #
2146
+ # * BinaryAUC: A binary `MLModel` uses the Area Under the Curve (AUC)
2147
+ # technique to measure performance.
2148
+ #
2149
+ # * RegressionRMSE: A regression `MLModel` uses the Root Mean Square
2150
+ # Error (RMSE) technique to measure performance. RMSE measures the
2151
+ # difference between predicted and actual values for a single
2152
+ # variable.
2153
+ #
2154
+ # * MulticlassAvgFScore: A multiclass `MLModel` uses the F1 score
2155
+ # technique to measure performance.
2156
+ #
2157
+ # For more information about performance metrics, please see the
2158
+ # [Amazon Machine Learning Developer Guide][1].
2159
+ #
2160
+ #
2161
+ #
2162
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
2163
+ # @return [Types::PerformanceMetrics]
2164
+ #
2165
+ # @!attribute [rw] log_uri
2166
+ # A link to the file that contains logs of the `CreateEvaluation`
2167
+ # operation.
2168
+ # @return [String]
2169
+ #
2170
+ # @!attribute [rw] message
2171
+ # A description of the most recent details about evaluating the
2172
+ # `MLModel`.
2173
+ # @return [String]
2174
+ #
2175
+ # @!attribute [rw] compute_time
2176
+ # The approximate CPU time in milliseconds that Amazon Machine
2177
+ # Learning spent processing the `Evaluation`, normalized and scaled on
2178
+ # computation resources. `ComputeTime` is only available if the
2179
+ # `Evaluation` is in the `COMPLETED` state.
2180
+ # @return [Integer]
2181
+ #
2182
+ # @!attribute [rw] finished_at
2183
+ # The epoch time when Amazon Machine Learning marked the `Evaluation`
2184
+ # as `COMPLETED` or `FAILED`. `FinishedAt` is only available when the
2185
+ # `Evaluation` is in the `COMPLETED` or `FAILED` state.
2186
+ # @return [Time]
2187
+ #
2188
+ # @!attribute [rw] started_at
2189
+ # The epoch time when Amazon Machine Learning marked the `Evaluation`
2190
+ # as `INPROGRESS`. `StartedAt` isn't available if the `Evaluation` is
2191
+ # in the `PENDING` state.
2192
+ # @return [Time]
2193
+ class GetEvaluationOutput < Struct.new(
2194
+ :evaluation_id,
2195
+ :ml_model_id,
2196
+ :evaluation_data_source_id,
2197
+ :input_data_location_s3,
2198
+ :created_by_iam_user,
2199
+ :created_at,
2200
+ :last_updated_at,
2201
+ :name,
2202
+ :status,
2203
+ :performance_metrics,
2204
+ :log_uri,
2205
+ :message,
2206
+ :compute_time,
2207
+ :finished_at,
2208
+ :started_at)
2209
+ include Aws::Structure
2210
+ end
2211
+
2212
+ # @note When making an API call, pass GetMLModelInput
2213
+ # data as a hash:
2214
+ #
2215
+ # {
2216
+ # ml_model_id: "EntityId", # required
2217
+ # verbose: false,
2218
+ # }
2219
+ # @!attribute [rw] ml_model_id
2220
+ # The ID assigned to the `MLModel` at creation.
2221
+ # @return [String]
2222
+ #
2223
+ # @!attribute [rw] verbose
2224
+ # Specifies whether the `GetMLModel` operation should return `Recipe`.
2225
+ #
2226
+ # If true, `Recipe` is returned.
2227
+ #
2228
+ # If false, `Recipe` is not returned.
2229
+ # @return [Boolean]
2230
+ class GetMLModelInput < Struct.new(
2231
+ :ml_model_id,
2232
+ :verbose)
2233
+ include Aws::Structure
2234
+ end
2235
+
2236
+ # Represents the output of a `GetMLModel` operation, and provides
2237
+ # detailed information about a `MLModel`.
2238
+ # @!attribute [rw] ml_model_id
2239
+ # The MLModel ID<?oxy\_insert\_start author="annbech"
2240
+ # timestamp="20160328T151251-0700">,<?oxy\_insert\_end> which is
2241
+ # same as the `MLModelId` in the request.
2242
+ # @return [String]
2243
+ #
2244
+ # @!attribute [rw] training_data_source_id
2245
+ # The ID of the training `DataSource`.
2246
+ # @return [String]
2247
+ #
2248
+ # @!attribute [rw] created_by_iam_user
2249
+ # The AWS user account from which the `MLModel` was created. The
2250
+ # account type can be either an AWS root account or an AWS Identity
2251
+ # and Access Management (IAM) user account.
2252
+ # @return [String]
2253
+ #
2254
+ # @!attribute [rw] created_at
2255
+ # The time that the `MLModel` was created. The time is expressed in
2256
+ # epoch time.
2257
+ # @return [Time]
2258
+ #
2259
+ # @!attribute [rw] last_updated_at
2260
+ # The time of the most recent edit to the `MLModel`. The time is
2261
+ # expressed in epoch time.
2262
+ # @return [Time]
2263
+ #
2264
+ # @!attribute [rw] name
2265
+ # A user-supplied name or description of the `MLModel`.
2266
+ # @return [String]
2267
+ #
2268
+ # @!attribute [rw] status
2269
+ # The current status of the `MLModel`. This element can have one of
2270
+ # the following values:
2271
+ #
2272
+ # * `PENDING` - Amazon Machine Learning (Amazon ML) submitted a
2273
+ # request to describe a `MLModel`.
2274
+ # * `INPROGRESS` - The request is processing.
2275
+ # * `FAILED` - The request did not run to completion. The ML model
2276
+ # isn't usable.
2277
+ # * `COMPLETED` - The request completed successfully.
2278
+ # * `DELETED` - The `MLModel` is marked as deleted. It isn't usable.
2279
+ # @return [String]
2280
+ #
2281
+ # @!attribute [rw] size_in_bytes
2282
+ # Long integer type that is a 64-bit signed number.
2283
+ # @return [Integer]
2284
+ #
2285
+ # @!attribute [rw] endpoint_info
2286
+ # The current endpoint of the `MLModel`
2287
+ # @return [Types::RealtimeEndpointInfo]
2288
+ #
2289
+ # @!attribute [rw] training_parameters
2290
+ # A list of the training parameters in the `MLModel`. The list is
2291
+ # implemented as a map of key-value pairs.
2292
+ #
2293
+ # The following is the current set of training parameters:
2294
+ #
2295
+ # * `sgd.maxMLModelSizeInBytes` - The maximum allowed size of the
2296
+ # model. Depending on the input data, the size of the model might
2297
+ # affect its performance.
2298
+ #
2299
+ # The value is an integer that ranges from `100000` to `2147483648`.
2300
+ # The default value is `33554432`.
2301
+ #
2302
+ # * `sgd.maxPasses` - The number of times that the training process
2303
+ # traverses the observations to build the `MLModel`. The value is an
2304
+ # integer that ranges from `1` to `10000`. The default value is
2305
+ # `10`.
2306
+ #
2307
+ # * `sgd.shuffleType` - Whether Amazon ML shuffles the training data.
2308
+ # Shuffling data improves a model's ability to find the optimal
2309
+ # solution for a variety of data types. The valid values are `auto`
2310
+ # and `none`. The default value is `none`. We strongly recommend
2311
+ # that you shuffle your data.
2312
+ #
2313
+ # * `sgd.l1RegularizationAmount` - The coefficient regularization L1
2314
+ # norm. It controls overfitting the data by penalizing large
2315
+ # coefficients. This tends to drive coefficients to zero, resulting
2316
+ # in a sparse feature set. If you use this parameter, start by
2317
+ # specifying a small value, such as `1.0E-08`.
2318
+ #
2319
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
2320
+ # default is to not use L1 normalization. This parameter can't be
2321
+ # used when `L2` is specified. Use this parameter sparingly.
2322
+ #
2323
+ # * `sgd.l2RegularizationAmount` - The coefficient regularization L2
2324
+ # norm. It controls overfitting the data by penalizing large
2325
+ # coefficients. This tends to drive coefficients to small, nonzero
2326
+ # values. If you use this parameter, start by specifying a small
2327
+ # value, such as `1.0E-08`.
2328
+ #
2329
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
2330
+ # default is to not use L2 normalization. This parameter can't be
2331
+ # used when `L1` is specified. Use this parameter sparingly.
2332
+ # @return [Hash<String,String>]
2333
+ #
2334
+ # @!attribute [rw] input_data_location_s3
2335
+ # The location of the data file or directory in Amazon Simple Storage
2336
+ # Service (Amazon S3).
2337
+ # @return [String]
2338
+ #
2339
+ # @!attribute [rw] ml_model_type
2340
+ # Identifies the `MLModel` category. The following are the available
2341
+ # types:
2342
+ #
2343
+ # * REGRESSION -- Produces a numeric result. For example, "What price
2344
+ # should a house be listed at?"
2345
+ # * BINARY -- Produces one of two possible results. For example, "Is
2346
+ # this an e-commerce website?"
2347
+ # * MULTICLASS -- Produces one of several possible results. For
2348
+ # example, "Is this a HIGH, LOW or MEDIUM risk trade?"
2349
+ # @return [String]
2350
+ #
2351
+ # @!attribute [rw] score_threshold
2352
+ # The scoring threshold is used in binary classification
2353
+ # `MLModel`<?oxy\_insert\_start author="laurama"
2354
+ # timestamp="20160329T114851-0700"> <?oxy\_insert\_end>models. It
2355
+ # marks the boundary between a positive prediction and a negative
2356
+ # prediction.
2357
+ #
2358
+ # Output values greater than or equal to the threshold receive a
2359
+ # positive result from the MLModel, such as `true`. Output values less
2360
+ # than the threshold receive a negative response from the MLModel,
2361
+ # such as `false`.
2362
+ # @return [Float]
2363
+ #
2364
+ # @!attribute [rw] score_threshold_last_updated_at
2365
+ # The time of the most recent edit to the `ScoreThreshold`. The time
2366
+ # is expressed in epoch time.
2367
+ # @return [Time]
2368
+ #
2369
+ # @!attribute [rw] log_uri
2370
+ # A link to the file that contains logs of the `CreateMLModel`
2371
+ # operation.
2372
+ # @return [String]
2373
+ #
2374
+ # @!attribute [rw] message
2375
+ # A description of the most recent details about accessing the
2376
+ # `MLModel`.
2377
+ # @return [String]
2378
+ #
2379
+ # @!attribute [rw] compute_time
2380
+ # The approximate CPU time in milliseconds that Amazon Machine
2381
+ # Learning spent processing the `MLModel`, normalized and scaled on
2382
+ # computation resources. `ComputeTime` is only available if the
2383
+ # `MLModel` is in the `COMPLETED` state.
2384
+ # @return [Integer]
2385
+ #
2386
+ # @!attribute [rw] finished_at
2387
+ # The epoch time when Amazon Machine Learning marked the `MLModel` as
2388
+ # `COMPLETED` or `FAILED`. `FinishedAt` is only available when the
2389
+ # `MLModel` is in the `COMPLETED` or `FAILED` state.
2390
+ # @return [Time]
2391
+ #
2392
+ # @!attribute [rw] started_at
2393
+ # The epoch time when Amazon Machine Learning marked the `MLModel` as
2394
+ # `INPROGRESS`. `StartedAt` isn't available if the `MLModel` is in
2395
+ # the `PENDING` state.
2396
+ # @return [Time]
2397
+ #
2398
+ # @!attribute [rw] recipe
2399
+ # The recipe to use when training the `MLModel`. The `Recipe` provides
2400
+ # detailed information about the observation data to use during
2401
+ # training, and manipulations to perform on the observation data
2402
+ # during training.
2403
+ #
2404
+ # <note markdown="1"><title>Note</title> This parameter is provided as part of the verbose format.
2405
+ #
2406
+ # </note>
2407
+ # @return [String]
2408
+ #
2409
+ # @!attribute [rw] schema
2410
+ # The schema used by all of the data files referenced by the
2411
+ # `DataSource`.
2412
+ #
2413
+ # <note markdown="1"><title>Note</title> This parameter is provided as part of the verbose format.
2414
+ #
2415
+ # </note>
2416
+ # @return [String]
2417
+ class GetMLModelOutput < Struct.new(
2418
+ :ml_model_id,
2419
+ :training_data_source_id,
2420
+ :created_by_iam_user,
2421
+ :created_at,
2422
+ :last_updated_at,
2423
+ :name,
2424
+ :status,
2425
+ :size_in_bytes,
2426
+ :endpoint_info,
2427
+ :training_parameters,
2428
+ :input_data_location_s3,
2429
+ :ml_model_type,
2430
+ :score_threshold,
2431
+ :score_threshold_last_updated_at,
2432
+ :log_uri,
2433
+ :message,
2434
+ :compute_time,
2435
+ :finished_at,
2436
+ :started_at,
2437
+ :recipe,
2438
+ :schema)
2439
+ include Aws::Structure
2440
+ end
2441
+
2442
+ # Represents the output of a `GetMLModel` operation.
2443
+ #
2444
+ # The content consists of the detailed metadata and the current status
2445
+ # of the `MLModel`.
2446
+ # @!attribute [rw] ml_model_id
2447
+ # The ID assigned to the `MLModel` at creation.
2448
+ # @return [String]
2449
+ #
2450
+ # @!attribute [rw] training_data_source_id
2451
+ # The ID of the training `DataSource`. The `CreateMLModel` operation
2452
+ # uses the `TrainingDataSourceId`.
2453
+ # @return [String]
2454
+ #
2455
+ # @!attribute [rw] created_by_iam_user
2456
+ # The AWS user account from which the `MLModel` was created. The
2457
+ # account type can be either an AWS root account or an AWS Identity
2458
+ # and Access Management (IAM) user account.
2459
+ # @return [String]
2460
+ #
2461
+ # @!attribute [rw] created_at
2462
+ # The time that the `MLModel` was created. The time is expressed in
2463
+ # epoch time.
2464
+ # @return [Time]
2465
+ #
2466
+ # @!attribute [rw] last_updated_at
2467
+ # The time of the most recent edit to the `MLModel`. The time is
2468
+ # expressed in epoch time.
2469
+ # @return [Time]
2470
+ #
2471
+ # @!attribute [rw] name
2472
+ # A user-supplied name or description of the `MLModel`.
2473
+ # @return [String]
2474
+ #
2475
+ # @!attribute [rw] status
2476
+ # The current status of an `MLModel`. This element can have one of the
2477
+ # following values:
2478
+ #
2479
+ # * `PENDING` - Amazon Machine Learning (Amazon ML) submitted a
2480
+ # request to create an `MLModel`.
2481
+ # * `INPROGRESS` - The creation process is underway.
2482
+ # * `FAILED` - The request to create an `MLModel` didn't run to
2483
+ # completion. The model isn't usable.
2484
+ # * `COMPLETED` - The creation process completed successfully.
2485
+ # * `DELETED` - The `MLModel` is marked as deleted. It isn't usable.
2486
+ # @return [String]
2487
+ #
2488
+ # @!attribute [rw] size_in_bytes
2489
+ # Long integer type that is a 64-bit signed number.
2490
+ # @return [Integer]
2491
+ #
2492
+ # @!attribute [rw] endpoint_info
2493
+ # The current endpoint of the `MLModel`.
2494
+ # @return [Types::RealtimeEndpointInfo]
2495
+ #
2496
+ # @!attribute [rw] training_parameters
2497
+ # A list of the training parameters in the `MLModel`. The list is
2498
+ # implemented as a map of key-value pairs.
2499
+ #
2500
+ # The following is the current set of training parameters:
2501
+ #
2502
+ # * `sgd.maxMLModelSizeInBytes` - The maximum allowed size of the
2503
+ # model. Depending on the input data, the size of the model might
2504
+ # affect its performance.
2505
+ #
2506
+ # The value is an integer that ranges from `100000` to `2147483648`.
2507
+ # The default value is `33554432`.
2508
+ #
2509
+ # * `sgd.maxPasses` - The number of times that the training process
2510
+ # traverses the observations to build the `MLModel`. The value is an
2511
+ # integer that ranges from `1` to `10000`. The default value is
2512
+ # `10`.
2513
+ #
2514
+ # * `sgd.shuffleType` - Whether Amazon ML shuffles the training data.
2515
+ # Shuffling the data improves a model's ability to find the optimal
2516
+ # solution for a variety of data types. The valid values are `auto`
2517
+ # and `none`. The default value is `none`.
2518
+ #
2519
+ # * `sgd.l1RegularizationAmount` - The coefficient regularization L1
2520
+ # norm, which controls overfitting the data by penalizing large
2521
+ # coefficients. This parameter tends to drive coefficients to zero,
2522
+ # resulting in sparse feature set. If you use this parameter, start
2523
+ # by specifying a small value, such as `1.0E-08`.
2524
+ #
2525
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
2526
+ # default is to not use L1 normalization. This parameter can't be
2527
+ # used when `L2` is specified. Use this parameter sparingly.
2528
+ #
2529
+ # * `sgd.l2RegularizationAmount` - The coefficient regularization L2
2530
+ # norm, which controls overfitting the data by penalizing large
2531
+ # coefficients. This tends to drive coefficients to small, nonzero
2532
+ # values. If you use this parameter, start by specifying a small
2533
+ # value, such as `1.0E-08`.
2534
+ #
2535
+ # The value is a double that ranges from `0` to `MAX_DOUBLE`. The
2536
+ # default is to not use L2 normalization. This parameter can't be
2537
+ # used when `L1` is specified. Use this parameter sparingly.
2538
+ # @return [Hash<String,String>]
2539
+ #
2540
+ # @!attribute [rw] input_data_location_s3
2541
+ # The location of the data file or directory in Amazon Simple Storage
2542
+ # Service (Amazon S3).
2543
+ # @return [String]
2544
+ #
2545
+ # @!attribute [rw] algorithm
2546
+ # The algorithm used to train the `MLModel`. The following algorithm
2547
+ # is supported:
2548
+ #
2549
+ # * `SGD` -- Stochastic gradient descent. The goal of `SGD` is to
2550
+ # minimize the gradient of the loss function.
2551
+ # @return [String]
2552
+ #
2553
+ # @!attribute [rw] ml_model_type
2554
+ # Identifies the `MLModel` category. The following are the available
2555
+ # types:
2556
+ #
2557
+ # * `REGRESSION` - Produces a numeric result. For example, "What
2558
+ # price should a house be listed at?"
2559
+ # * `BINARY` - Produces one of two possible results. For example, "Is
2560
+ # this a child-friendly web site?".
2561
+ # * `MULTICLASS` - Produces one of several possible results. For
2562
+ # example, "Is this a HIGH-, LOW-, or MEDIUM<?oxy\_delete
2563
+ # author="annbech" timestamp="20160328T175050-0700" content="
2564
+ # "><?oxy\_insert\_start author="annbech"
2565
+ # timestamp="20160328T175050-0700">-<?oxy\_insert\_end>risk
2566
+ # trade?".
2567
+ # @return [String]
2568
+ #
2569
+ # @!attribute [rw] score_threshold
2570
+ # @return [Float]
2571
+ #
2572
+ # @!attribute [rw] score_threshold_last_updated_at
2573
+ # The time of the most recent edit to the `ScoreThreshold`. The time
2574
+ # is expressed in epoch time.
2575
+ # @return [Time]
2576
+ #
2577
+ # @!attribute [rw] message
2578
+ # A description of the most recent details about accessing the
2579
+ # `MLModel`.
2580
+ # @return [String]
2581
+ #
2582
+ # @!attribute [rw] compute_time
2583
+ # Long integer type that is a 64-bit signed number.
2584
+ # @return [Integer]
2585
+ #
2586
+ # @!attribute [rw] finished_at
2587
+ # A timestamp represented in epoch time.
2588
+ # @return [Time]
2589
+ #
2590
+ # @!attribute [rw] started_at
2591
+ # A timestamp represented in epoch time.
2592
+ # @return [Time]
2593
+ class MLModel < Struct.new(
2594
+ :ml_model_id,
2595
+ :training_data_source_id,
2596
+ :created_by_iam_user,
2597
+ :created_at,
2598
+ :last_updated_at,
2599
+ :name,
2600
+ :status,
2601
+ :size_in_bytes,
2602
+ :endpoint_info,
2603
+ :training_parameters,
2604
+ :input_data_location_s3,
2605
+ :algorithm,
2606
+ :ml_model_type,
2607
+ :score_threshold,
2608
+ :score_threshold_last_updated_at,
2609
+ :message,
2610
+ :compute_time,
2611
+ :finished_at,
2612
+ :started_at)
2613
+ include Aws::Structure
2614
+ end
2615
+
2616
+ # Measurements of how well the `MLModel` performed on known
2617
+ # observations. One of the following metrics is returned, based on the
2618
+ # type of the `MLModel`\:
2619
+ #
2620
+ # * BinaryAUC: The binary `MLModel` uses the Area Under the Curve (AUC)
2621
+ # technique to measure performance.
2622
+ #
2623
+ # * RegressionRMSE: The regression `MLModel` uses the Root Mean Square
2624
+ # Error (RMSE) technique to measure performance. RMSE measures the
2625
+ # difference between predicted and actual values for a single
2626
+ # variable.
2627
+ #
2628
+ # * MulticlassAvgFScore: The multiclass `MLModel` uses the F1 score
2629
+ # technique to measure performance.
2630
+ #
2631
+ # For more information about performance metrics, please see the [Amazon
2632
+ # Machine Learning Developer Guide][1].
2633
+ #
2634
+ #
2635
+ #
2636
+ # [1]: http://docs.aws.amazon.com/machine-learning/latest/dg
2637
+ # @!attribute [rw] properties
2638
+ # @return [Hash<String,String>]
2639
+ class PerformanceMetrics < Struct.new(
2640
+ :properties)
2641
+ include Aws::Structure
2642
+ end
2643
+
2644
+ # @note When making an API call, pass PredictInput
2645
+ # data as a hash:
2646
+ #
2647
+ # {
2648
+ # ml_model_id: "EntityId", # required
2649
+ # record: { # required
2650
+ # "VariableName" => "VariableValue",
2651
+ # },
2652
+ # predict_endpoint: "VipURL", # required
2653
+ # }
2654
+ # @!attribute [rw] ml_model_id
2655
+ # A unique identifier of the `MLModel`.
2656
+ # @return [String]
2657
+ #
2658
+ # @!attribute [rw] record
2659
+ # A map of variable name-value pairs that represent an observation.
2660
+ # @return [Hash<String,String>]
2661
+ #
2662
+ # @!attribute [rw] predict_endpoint
2663
+ # @return [String]
2664
+ class PredictInput < Struct.new(
2665
+ :ml_model_id,
2666
+ :record,
2667
+ :predict_endpoint)
2668
+ include Aws::Structure
2669
+ end
2670
+
2671
+ # @!attribute [rw] prediction
2672
+ # The output from a `Predict` operation:
2673
+ #
2674
+ # * `Details` - Contains the following attributes:
2675
+ # `DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY |
2676
+ # MULTICLASS` `DetailsAttributes.ALGORITHM - SGD`
2677
+ #
2678
+ # * `PredictedLabel` - Present for either a `BINARY` or `MULTICLASS`
2679
+ # `MLModel` request.
2680
+ #
2681
+ # * `PredictedScores` - Contains the raw classification score
2682
+ # corresponding to each label.
2683
+ #
2684
+ # * `PredictedValue` - Present for a `REGRESSION` `MLModel` request.
2685
+ # @return [Types::Prediction]
2686
+ class PredictOutput < Struct.new(
2687
+ :prediction)
2688
+ include Aws::Structure
2689
+ end
2690
+
2691
+ # The output from a `Predict` operation:
2692
+ #
2693
+ # * `Details` - Contains the following attributes:
2694
+ # `DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY |
2695
+ # MULTICLASS` `DetailsAttributes.ALGORITHM - SGD`
2696
+ #
2697
+ # * `PredictedLabel` - Present for either a `BINARY` or `MULTICLASS`
2698
+ # `MLModel` request.
2699
+ #
2700
+ # * `PredictedScores` - Contains the raw classification score
2701
+ # corresponding to each label.
2702
+ #
2703
+ # * `PredictedValue` - Present for a `REGRESSION` `MLModel` request.
2704
+ # @!attribute [rw] predicted_label
2705
+ # The prediction label for either a `BINARY` or `MULTICLASS`
2706
+ # `MLModel`.
2707
+ # @return [String]
2708
+ #
2709
+ # @!attribute [rw] predicted_value
2710
+ # The prediction value for `REGRESSION` `MLModel`.
2711
+ # @return [Float]
2712
+ #
2713
+ # @!attribute [rw] predicted_scores
2714
+ # Provides the raw classification score corresponding to each label.
2715
+ # @return [Hash<String,Float>]
2716
+ #
2717
+ # @!attribute [rw] details
2718
+ # Provides any additional details regarding the prediction.
2719
+ # @return [Hash<String,String>]
2720
+ class Prediction < Struct.new(
2721
+ :predicted_label,
2722
+ :predicted_value,
2723
+ :predicted_scores,
2724
+ :details)
2725
+ include Aws::Structure
2726
+ end
2727
+
2728
+ # The data specification of an Amazon Relational Database Service
2729
+ # (Amazon RDS) `DataSource`.
2730
+ # @note When making an API call, pass RDSDataSpec
2731
+ # data as a hash:
2732
+ #
2733
+ # {
2734
+ # database_information: { # required
2735
+ # instance_identifier: "RDSInstanceIdentifier", # required
2736
+ # database_name: "RDSDatabaseName", # required
2737
+ # },
2738
+ # select_sql_query: "RDSSelectSqlQuery", # required
2739
+ # database_credentials: { # required
2740
+ # username: "RDSDatabaseUsername", # required
2741
+ # password: "RDSDatabasePassword", # required
2742
+ # },
2743
+ # s3_staging_location: "S3Url", # required
2744
+ # data_rearrangement: "DataRearrangement",
2745
+ # data_schema: "DataSchema",
2746
+ # data_schema_uri: "S3Url",
2747
+ # resource_role: "EDPResourceRole", # required
2748
+ # service_role: "EDPServiceRole", # required
2749
+ # subnet_id: "EDPSubnetId", # required
2750
+ # security_group_ids: ["EDPSecurityGroupId"], # required
2751
+ # }
2752
+ # @!attribute [rw] database_information
2753
+ # Describes the `DatabaseName` and `InstanceIdentifier` of an Amazon
2754
+ # RDS database.
2755
+ # @return [Types::RDSDatabase]
2756
+ #
2757
+ # @!attribute [rw] select_sql_query
2758
+ # The query that is used to retrieve the observation data for the
2759
+ # `DataSource`.
2760
+ # @return [String]
2761
+ #
2762
+ # @!attribute [rw] database_credentials
2763
+ # The AWS Identity and Access Management (IAM) credentials that are
2764
+ # used connect to the Amazon RDS database.
2765
+ # @return [Types::RDSDatabaseCredentials]
2766
+ #
2767
+ # @!attribute [rw] s3_staging_location
2768
+ # The Amazon S3 location for staging Amazon RDS data. The data
2769
+ # retrieved from Amazon RDS using `SelectSqlQuery` is stored in this
2770
+ # location.
2771
+ # @return [String]
2772
+ #
2773
+ # @!attribute [rw] data_rearrangement
2774
+ # A JSON string that represents the splitting and rearrangement
2775
+ # processing to be applied to a `DataSource`. If the
2776
+ # `DataRearrangement` parameter is not provided, all of the input data
2777
+ # is used to create the `Datasource`.
2778
+ #
2779
+ # There are multiple parameters that control what data is used to
2780
+ # create a datasource:
2781
+ #
2782
+ # * **`percentBegin`**
2783
+ #
2784
+ # Use `percentBegin` to indicate the beginning of the range of the
2785
+ # data used to create the Datasource. If you do not include
2786
+ # `percentBegin` and `percentEnd`, Amazon ML includes all of the
2787
+ # data when creating the datasource.
2788
+ #
2789
+ # * **`percentEnd`**
2790
+ #
2791
+ # Use `percentEnd` to indicate the end of the range of the data used
2792
+ # to create the Datasource. If you do not include `percentBegin` and
2793
+ # `percentEnd`, Amazon ML includes all of the data when creating the
2794
+ # datasource.
2795
+ #
2796
+ # * **`complement`**
2797
+ #
2798
+ # The `complement` parameter instructs Amazon ML to use the data
2799
+ # that is not included in the range of `percentBegin` to
2800
+ # `percentEnd` to create a datasource. The `complement` parameter is
2801
+ # useful if you need to create complementary datasources for
2802
+ # training and evaluation. To create a complementary datasource, use
2803
+ # the same values for `percentBegin` and `percentEnd`, along with
2804
+ # the `complement` parameter.
2805
+ #
2806
+ # For example, the following two datasources do not share any data,
2807
+ # and can be used to train and evaluate a model. The first
2808
+ # datasource has 25 percent of the data, and the second one has 75
2809
+ # percent of the data.
2810
+ #
2811
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":0,
2812
+ # "percentEnd":25\}\}`
2813
+ #
2814
+ # Datasource for training: `\{"splitting":\{"percentBegin":0,
2815
+ # "percentEnd":25, "complement":"true"\}\}`
2816
+ #
2817
+ # * **`strategy`**
2818
+ #
2819
+ # To change how Amazon ML splits the data for a datasource, use the
2820
+ # `strategy` parameter.
2821
+ #
2822
+ # The default value for the `strategy` parameter is `sequential`,
2823
+ # meaning that Amazon ML takes all of the data records between the
2824
+ # `percentBegin` and `percentEnd` parameters for the datasource, in
2825
+ # the order that the records appear in the input data.
2826
+ #
2827
+ # The following two `DataRearrangement` lines are examples of
2828
+ # sequentially ordered training and evaluation datasources:
2829
+ #
2830
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":70,
2831
+ # "percentEnd":100, "strategy":"sequential"\}\}`
2832
+ #
2833
+ # Datasource for training: `\{"splitting":\{"percentBegin":70,
2834
+ # "percentEnd":100, "strategy":"sequential",
2835
+ # "complement":"true"\}\}`
2836
+ #
2837
+ # To randomly split the input data into the proportions indicated by
2838
+ # the percentBegin and percentEnd parameters, set the `strategy`
2839
+ # parameter to `random` and provide a string that is used as the
2840
+ # seed value for the random data splitting (for example, you can use
2841
+ # the S3 path to your data as the random seed string). If you choose
2842
+ # the random split strategy, Amazon ML assigns each row of data a
2843
+ # pseudo-random number between 0 and 100, and then selects the rows
2844
+ # that have an assigned number between `percentBegin` and
2845
+ # `percentEnd`. Pseudo-random numbers are assigned using both the
2846
+ # input seed string value and the byte offset as a seed, so changing
2847
+ # the data results in a different split. Any existing ordering is
2848
+ # preserved. The random splitting strategy ensures that variables in
2849
+ # the training and evaluation data are distributed similarly. It is
2850
+ # useful in the cases where the input data may have an implicit sort
2851
+ # order, which would otherwise result in training and evaluation
2852
+ # datasources containing non-similar data records.
2853
+ #
2854
+ # The following two `DataRearrangement` lines are examples of
2855
+ # non-sequentially ordered training and evaluation datasources:
2856
+ #
2857
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":70,
2858
+ # "percentEnd":100, "strategy":"random",
2859
+ # "randomSeed"="s3://my_s3_path/bucket/file.csv"\}\}`
2860
+ #
2861
+ # Datasource for training: `\{"splitting":\{"percentBegin":70,
2862
+ # "percentEnd":100, "strategy":"random",
2863
+ # "randomSeed"="s3://my_s3_path/bucket/file.csv",
2864
+ # "complement":"true"\}\}`
2865
+ # @return [String]
2866
+ #
2867
+ # @!attribute [rw] data_schema
2868
+ # A JSON string that represents the schema for an Amazon RDS
2869
+ # `DataSource`. The `DataSchema` defines the structure of the
2870
+ # observation data in the data file(s) referenced in the `DataSource`.
2871
+ #
2872
+ # A `DataSchema` is not required if you specify a `DataSchemaUri`
2873
+ #
2874
+ # Define your `DataSchema` as a series of key-value pairs.
2875
+ # `attributes` and `excludedVariableNames` have an array of key-value
2876
+ # pairs for their value. Use the following format to define your
2877
+ # `DataSchema`.
2878
+ #
2879
+ # \\\{ "version": "1.0",
2880
+ #
2881
+ # "recordAnnotationFieldName": "F1",
2882
+ #
2883
+ # "recordWeightFieldName": "F2",
2884
+ #
2885
+ # "targetFieldName": "F3",
2886
+ #
2887
+ # "dataFormat": "CSV",
2888
+ #
2889
+ # "dataFileContainsHeader": true,
2890
+ #
2891
+ # "attributes": \[
2892
+ #
2893
+ # \\\{ "fieldName": "F1", "fieldType": "TEXT" \\}, \\\{
2894
+ # "fieldName": "F2", "fieldType": "NUMERIC" \\}, \\\{
2895
+ # "fieldName": "F3", "fieldType": "CATEGORICAL" \\}, \\\{
2896
+ # "fieldName": "F4", "fieldType": "NUMERIC" \\}, \\\{
2897
+ # "fieldName": "F5", "fieldType": "CATEGORICAL" \\}, \\\{
2898
+ # "fieldName": "F6", "fieldType": "TEXT" \\}, \\\{
2899
+ # "fieldName": "F7", "fieldType": "WEIGHTED\_INT\_SEQUENCE"
2900
+ # \\}, \\\{ "fieldName": "F8", "fieldType":
2901
+ # "WEIGHTED\_STRING\_SEQUENCE" \\} \],
2902
+ #
2903
+ # "excludedVariableNames": \[ "F6" \] \\}
2904
+ #
2905
+ # <?oxy\_insert\_end>
2906
+ # @return [String]
2907
+ #
2908
+ # @!attribute [rw] data_schema_uri
2909
+ # The Amazon S3 location of the `DataSchema`.
2910
+ # @return [String]
2911
+ #
2912
+ # @!attribute [rw] resource_role
2913
+ # The role (DataPipelineDefaultResourceRole) assumed by an Amazon
2914
+ # Elastic Compute Cloud (Amazon EC2) instance to carry out the copy
2915
+ # operation from Amazon RDS to an Amazon S3 task. For more
2916
+ # information, see [Role templates][1] for data pipelines.
2917
+ #
2918
+ #
2919
+ #
2920
+ # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
2921
+ # @return [String]
2922
+ #
2923
+ # @!attribute [rw] service_role
2924
+ # The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline
2925
+ # service to monitor the progress of the copy task from Amazon RDS to
2926
+ # Amazon S3. For more information, see [Role templates][1] for data
2927
+ # pipelines.
2928
+ #
2929
+ #
2930
+ #
2931
+ # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
2932
+ # @return [String]
2933
+ #
2934
+ # @!attribute [rw] subnet_id
2935
+ # The subnet ID to be used to access a VPC-based RDS DB instance. This
2936
+ # attribute is used by Data Pipeline to carry out the copy task from
2937
+ # Amazon RDS to Amazon S3.
2938
+ # @return [String]
2939
+ #
2940
+ # @!attribute [rw] security_group_ids
2941
+ # The security group IDs to be used to access a VPC-based RDS DB
2942
+ # instance. Ensure that there are appropriate ingress rules set up to
2943
+ # allow access to the RDS DB instance. This attribute is used by Data
2944
+ # Pipeline to carry out the copy operation from Amazon RDS to an
2945
+ # Amazon S3 task.
2946
+ # @return [Array<String>]
2947
+ class RDSDataSpec < Struct.new(
2948
+ :database_information,
2949
+ :select_sql_query,
2950
+ :database_credentials,
2951
+ :s3_staging_location,
2952
+ :data_rearrangement,
2953
+ :data_schema,
2954
+ :data_schema_uri,
2955
+ :resource_role,
2956
+ :service_role,
2957
+ :subnet_id,
2958
+ :security_group_ids)
2959
+ include Aws::Structure
2960
+ end
2961
+
2962
+ # The database details of an Amazon RDS database.
2963
+ # @note When making an API call, pass RDSDatabase
2964
+ # data as a hash:
2965
+ #
2966
+ # {
2967
+ # instance_identifier: "RDSInstanceIdentifier", # required
2968
+ # database_name: "RDSDatabaseName", # required
2969
+ # }
2970
+ # @!attribute [rw] instance_identifier
2971
+ # The ID of an RDS DB instance.
2972
+ # @return [String]
2973
+ #
2974
+ # @!attribute [rw] database_name
2975
+ # The name of a database hosted on an RDS DB instance.
2976
+ # @return [String]
2977
+ class RDSDatabase < Struct.new(
2978
+ :instance_identifier,
2979
+ :database_name)
2980
+ include Aws::Structure
2981
+ end
2982
+
2983
+ # The database credentials to connect to a database on an RDS DB
2984
+ # instance.
2985
+ # @note When making an API call, pass RDSDatabaseCredentials
2986
+ # data as a hash:
2987
+ #
2988
+ # {
2989
+ # username: "RDSDatabaseUsername", # required
2990
+ # password: "RDSDatabasePassword", # required
2991
+ # }
2992
+ # @!attribute [rw] username
2993
+ # The username to be used by Amazon ML to connect to database on an
2994
+ # Amazon RDS instance. The username should have sufficient permissions
2995
+ # to execute an `RDSSelectSqlQuery` query.
2996
+ # @return [String]
2997
+ #
2998
+ # @!attribute [rw] password
2999
+ # The password to be used by Amazon ML to connect to a database on an
3000
+ # RDS DB instance. The password should have sufficient permissions to
3001
+ # execute the `RDSSelectQuery` query.
3002
+ # @return [String]
3003
+ class RDSDatabaseCredentials < Struct.new(
3004
+ :username,
3005
+ :password)
3006
+ include Aws::Structure
3007
+ end
3008
+
3009
+ # The datasource details that are specific to Amazon RDS.
3010
+ # @!attribute [rw] database
3011
+ # The database details required to connect to an Amazon RDS.
3012
+ # @return [Types::RDSDatabase]
3013
+ #
3014
+ # @!attribute [rw] database_user_name
3015
+ # The username to be used by Amazon ML to connect to database on an
3016
+ # Amazon RDS instance. The username should have sufficient permissions
3017
+ # to execute an `RDSSelectSqlQuery` query.
3018
+ # @return [String]
3019
+ #
3020
+ # @!attribute [rw] select_sql_query
3021
+ # The SQL query that is supplied during CreateDataSourceFromRDS.
3022
+ # Returns only if `Verbose` is true in `GetDataSourceInput`.
3023
+ # @return [String]
3024
+ #
3025
+ # @!attribute [rw] resource_role
3026
+ # The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2
3027
+ # instance to carry out the copy task from Amazon RDS to Amazon S3.
3028
+ # For more information, see [Role templates][1] for data pipelines.
3029
+ #
3030
+ #
3031
+ #
3032
+ # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
3033
+ # @return [String]
3034
+ #
3035
+ # @!attribute [rw] service_role
3036
+ # The role (DataPipelineDefaultRole) assumed by the Data Pipeline
3037
+ # service to monitor the progress of the copy task from Amazon RDS to
3038
+ # Amazon S3. For more information, see [Role templates][1] for data
3039
+ # pipelines.
3040
+ #
3041
+ #
3042
+ #
3043
+ # [1]: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html
3044
+ # @return [String]
3045
+ #
3046
+ # @!attribute [rw] data_pipeline_id
3047
+ # The ID of the Data Pipeline instance that is used to carry to copy
3048
+ # data from Amazon RDS to Amazon S3. You can use the ID to find
3049
+ # details about the instance in the Data Pipeline console.
3050
+ # @return [String]
3051
+ class RDSMetadata < Struct.new(
3052
+ :database,
3053
+ :database_user_name,
3054
+ :select_sql_query,
3055
+ :resource_role,
3056
+ :service_role,
3057
+ :data_pipeline_id)
3058
+ include Aws::Structure
3059
+ end
3060
+
3061
+ # Describes the real-time endpoint information for an `MLModel`.
3062
+ # @!attribute [rw] peak_requests_per_second
3063
+ # The maximum processing rate for the real-time endpoint for
3064
+ # `MLModel`, measured in incoming requests per second.
3065
+ # @return [Integer]
3066
+ #
3067
+ # @!attribute [rw] created_at
3068
+ # The time that the request to create the real-time endpoint for the
3069
+ # `MLModel` was received. The time is expressed in epoch time.
3070
+ # @return [Time]
3071
+ #
3072
+ # @!attribute [rw] endpoint_url
3073
+ # The URI that specifies where to send real-time prediction requests
3074
+ # for the `MLModel`.
3075
+ #
3076
+ # <note markdown="1"><title>Note</title> The application must wait until the real-time endpoint is ready
3077
+ # before using this URI.
3078
+ #
3079
+ # </note>
3080
+ # @return [String]
3081
+ #
3082
+ # @!attribute [rw] endpoint_status
3083
+ # The current status of the real-time endpoint for the `MLModel`. This
3084
+ # element can have one of the following values:
3085
+ #
3086
+ # * `NONE` - Endpoint does not exist or was previously deleted.
3087
+ # * `READY` - Endpoint is ready to be used for real-time predictions.
3088
+ # * `UPDATING` - Updating/creating the endpoint.
3089
+ # @return [String]
3090
+ class RealtimeEndpointInfo < Struct.new(
3091
+ :peak_requests_per_second,
3092
+ :created_at,
3093
+ :endpoint_url,
3094
+ :endpoint_status)
3095
+ include Aws::Structure
3096
+ end
3097
+
3098
+ # Describes the data specification of an Amazon Redshift `DataSource`.
3099
+ # @note When making an API call, pass RedshiftDataSpec
3100
+ # data as a hash:
3101
+ #
3102
+ # {
3103
+ # database_information: { # required
3104
+ # database_name: "RedshiftDatabaseName", # required
3105
+ # cluster_identifier: "RedshiftClusterIdentifier", # required
3106
+ # },
3107
+ # select_sql_query: "RedshiftSelectSqlQuery", # required
3108
+ # database_credentials: { # required
3109
+ # username: "RedshiftDatabaseUsername", # required
3110
+ # password: "RedshiftDatabasePassword", # required
3111
+ # },
3112
+ # s3_staging_location: "S3Url", # required
3113
+ # data_rearrangement: "DataRearrangement",
3114
+ # data_schema: "DataSchema",
3115
+ # data_schema_uri: "S3Url",
3116
+ # }
3117
+ # @!attribute [rw] database_information
3118
+ # Describes the `DatabaseName` and `ClusterIdentifier` for an Amazon
3119
+ # Redshift `DataSource`.
3120
+ # @return [Types::RedshiftDatabase]
3121
+ #
3122
+ # @!attribute [rw] select_sql_query
3123
+ # Describes the SQL Query to execute on an Amazon Redshift database
3124
+ # for an Amazon Redshift `DataSource`.
3125
+ # @return [String]
3126
+ #
3127
+ # @!attribute [rw] database_credentials
3128
+ # Describes AWS Identity and Access Management (IAM) credentials that
3129
+ # are used connect to the Amazon Redshift database.
3130
+ # @return [Types::RedshiftDatabaseCredentials]
3131
+ #
3132
+ # @!attribute [rw] s3_staging_location
3133
+ # Describes an Amazon S3 location to store the result set of the
3134
+ # `SelectSqlQuery` query.
3135
+ # @return [String]
3136
+ #
3137
+ # @!attribute [rw] data_rearrangement
3138
+ # A JSON string that represents the splitting and rearrangement
3139
+ # processing to be applied to a `DataSource`. If the
3140
+ # `DataRearrangement` parameter is not provided, all of the input data
3141
+ # is used to create the `Datasource`.
3142
+ #
3143
+ # There are multiple parameters that control what data is used to
3144
+ # create a datasource:
3145
+ #
3146
+ # * **`percentBegin`**
3147
+ #
3148
+ # Use `percentBegin` to indicate the beginning of the range of the
3149
+ # data used to create the Datasource. If you do not include
3150
+ # `percentBegin` and `percentEnd`, Amazon ML includes all of the
3151
+ # data when creating the datasource.
3152
+ #
3153
+ # * **`percentEnd`**
3154
+ #
3155
+ # Use `percentEnd` to indicate the end of the range of the data used
3156
+ # to create the Datasource. If you do not include `percentBegin` and
3157
+ # `percentEnd`, Amazon ML includes all of the data when creating the
3158
+ # datasource.
3159
+ #
3160
+ # * **`complement`**
3161
+ #
3162
+ # The `complement` parameter instructs Amazon ML to use the data
3163
+ # that is not included in the range of `percentBegin` to
3164
+ # `percentEnd` to create a datasource. The `complement` parameter is
3165
+ # useful if you need to create complementary datasources for
3166
+ # training and evaluation. To create a complementary datasource, use
3167
+ # the same values for `percentBegin` and `percentEnd`, along with
3168
+ # the `complement` parameter.
3169
+ #
3170
+ # For example, the following two datasources do not share any data,
3171
+ # and can be used to train and evaluate a model. The first
3172
+ # datasource has 25 percent of the data, and the second one has 75
3173
+ # percent of the data.
3174
+ #
3175
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":0,
3176
+ # "percentEnd":25\}\}`
3177
+ #
3178
+ # Datasource for training: `\{"splitting":\{"percentBegin":0,
3179
+ # "percentEnd":25, "complement":"true"\}\}`
3180
+ #
3181
+ # * **`strategy`**
3182
+ #
3183
+ # To change how Amazon ML splits the data for a datasource, use the
3184
+ # `strategy` parameter.
3185
+ #
3186
+ # The default value for the `strategy` parameter is `sequential`,
3187
+ # meaning that Amazon ML takes all of the data records between the
3188
+ # `percentBegin` and `percentEnd` parameters for the datasource, in
3189
+ # the order that the records appear in the input data.
3190
+ #
3191
+ # The following two `DataRearrangement` lines are examples of
3192
+ # sequentially ordered training and evaluation datasources:
3193
+ #
3194
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":70,
3195
+ # "percentEnd":100, "strategy":"sequential"\}\}`
3196
+ #
3197
+ # Datasource for training: `\{"splitting":\{"percentBegin":70,
3198
+ # "percentEnd":100, "strategy":"sequential",
3199
+ # "complement":"true"\}\}`
3200
+ #
3201
+ # To randomly split the input data into the proportions indicated by
3202
+ # the percentBegin and percentEnd parameters, set the `strategy`
3203
+ # parameter to `random` and provide a string that is used as the
3204
+ # seed value for the random data splitting (for example, you can use
3205
+ # the S3 path to your data as the random seed string). If you choose
3206
+ # the random split strategy, Amazon ML assigns each row of data a
3207
+ # pseudo-random number between 0 and 100, and then selects the rows
3208
+ # that have an assigned number between `percentBegin` and
3209
+ # `percentEnd`. Pseudo-random numbers are assigned using both the
3210
+ # input seed string value and the byte offset as a seed, so changing
3211
+ # the data results in a different split. Any existing ordering is
3212
+ # preserved. The random splitting strategy ensures that variables in
3213
+ # the training and evaluation data are distributed similarly. It is
3214
+ # useful in the cases where the input data may have an implicit sort
3215
+ # order, which would otherwise result in training and evaluation
3216
+ # datasources containing non-similar data records.
3217
+ #
3218
+ # The following two `DataRearrangement` lines are examples of
3219
+ # non-sequentially ordered training and evaluation datasources:
3220
+ #
3221
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":70,
3222
+ # "percentEnd":100, "strategy":"random",
3223
+ # "randomSeed"="s3://my_s3_path/bucket/file.csv"\}\}`
3224
+ #
3225
+ # Datasource for training: `\{"splitting":\{"percentBegin":70,
3226
+ # "percentEnd":100, "strategy":"random",
3227
+ # "randomSeed"="s3://my_s3_path/bucket/file.csv",
3228
+ # "complement":"true"\}\}`
3229
+ # @return [String]
3230
+ #
3231
+ # @!attribute [rw] data_schema
3232
+ # A JSON string that represents the schema for an Amazon Redshift
3233
+ # `DataSource`. The `DataSchema` defines the structure of the
3234
+ # observation data in the data file(s) referenced in the `DataSource`.
3235
+ #
3236
+ # A `DataSchema` is not required if you specify a `DataSchemaUri`.
3237
+ #
3238
+ # Define your `DataSchema` as a series of key-value pairs.
3239
+ # `attributes` and `excludedVariableNames` have an array of key-value
3240
+ # pairs for their value. Use the following format to define your
3241
+ # `DataSchema`.
3242
+ #
3243
+ # \\\{ "version": "1.0",
3244
+ #
3245
+ # "recordAnnotationFieldName": "F1",
3246
+ #
3247
+ # "recordWeightFieldName": "F2",
3248
+ #
3249
+ # "targetFieldName": "F3",
3250
+ #
3251
+ # "dataFormat": "CSV",
3252
+ #
3253
+ # "dataFileContainsHeader": true,
3254
+ #
3255
+ # "attributes": \[
3256
+ #
3257
+ # \\\{ "fieldName": "F1", "fieldType": "TEXT" \\}, \\\{
3258
+ # "fieldName": "F2", "fieldType": "NUMERIC" \\}, \\\{
3259
+ # "fieldName": "F3", "fieldType": "CATEGORICAL" \\}, \\\{
3260
+ # "fieldName": "F4", "fieldType": "NUMERIC" \\}, \\\{
3261
+ # "fieldName": "F5", "fieldType": "CATEGORICAL" \\}, \\\{
3262
+ # "fieldName": "F6", "fieldType": "TEXT" \\}, \\\{
3263
+ # "fieldName": "F7", "fieldType": "WEIGHTED\_INT\_SEQUENCE"
3264
+ # \\}, \\\{ "fieldName": "F8", "fieldType":
3265
+ # "WEIGHTED\_STRING\_SEQUENCE" \\} \],
3266
+ #
3267
+ # "excludedVariableNames": \[ "F6" \] \\}
3268
+ # @return [String]
3269
+ #
3270
+ # @!attribute [rw] data_schema_uri
3271
+ # Describes the schema location for an Amazon Redshift `DataSource`.
3272
+ # @return [String]
3273
+ class RedshiftDataSpec < Struct.new(
3274
+ :database_information,
3275
+ :select_sql_query,
3276
+ :database_credentials,
3277
+ :s3_staging_location,
3278
+ :data_rearrangement,
3279
+ :data_schema,
3280
+ :data_schema_uri)
3281
+ include Aws::Structure
3282
+ end
3283
+
3284
+ # Describes the database details required to connect to an Amazon
3285
+ # Redshift database.
3286
+ # @note When making an API call, pass RedshiftDatabase
3287
+ # data as a hash:
3288
+ #
3289
+ # {
3290
+ # database_name: "RedshiftDatabaseName", # required
3291
+ # cluster_identifier: "RedshiftClusterIdentifier", # required
3292
+ # }
3293
+ # @!attribute [rw] database_name
3294
+ # The name of a database hosted on an Amazon Redshift cluster.
3295
+ # @return [String]
3296
+ #
3297
+ # @!attribute [rw] cluster_identifier
3298
+ # The ID of an Amazon Redshift cluster.
3299
+ # @return [String]
3300
+ class RedshiftDatabase < Struct.new(
3301
+ :database_name,
3302
+ :cluster_identifier)
3303
+ include Aws::Structure
3304
+ end
3305
+
3306
+ # Describes the database credentials for connecting to a database on an
3307
+ # Amazon Redshift cluster.
3308
+ # @note When making an API call, pass RedshiftDatabaseCredentials
3309
+ # data as a hash:
3310
+ #
3311
+ # {
3312
+ # username: "RedshiftDatabaseUsername", # required
3313
+ # password: "RedshiftDatabasePassword", # required
3314
+ # }
3315
+ # @!attribute [rw] username
3316
+ # A username to be used by Amazon Machine Learning (Amazon ML)to
3317
+ # connect to a database on an Amazon Redshift cluster. The username
3318
+ # should have sufficient permissions to execute the
3319
+ # `RedshiftSelectSqlQuery` query. The username should be valid for an
3320
+ # Amazon Redshift [USER][1].
3321
+ #
3322
+ #
3323
+ #
3324
+ # [1]: http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html
3325
+ # @return [String]
3326
+ #
3327
+ # @!attribute [rw] password
3328
+ # A password to be used by Amazon ML to connect to a database on an
3329
+ # Amazon Redshift cluster. The password should have sufficient
3330
+ # permissions to execute a `RedshiftSelectSqlQuery` query. The
3331
+ # password should be valid for an Amazon Redshift [USER][1].
3332
+ #
3333
+ #
3334
+ #
3335
+ # [1]: http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html
3336
+ # @return [String]
3337
+ class RedshiftDatabaseCredentials < Struct.new(
3338
+ :username,
3339
+ :password)
3340
+ include Aws::Structure
3341
+ end
3342
+
3343
+ # Describes the `DataSource` details specific to Amazon Redshift.
3344
+ # @!attribute [rw] redshift_database
3345
+ # Describes the database details required to connect to an Amazon
3346
+ # Redshift database.
3347
+ # @return [Types::RedshiftDatabase]
3348
+ #
3349
+ # @!attribute [rw] database_user_name
3350
+ # A username to be used by Amazon Machine Learning (Amazon ML)to
3351
+ # connect to a database on an Amazon Redshift cluster. The username
3352
+ # should have sufficient permissions to execute the
3353
+ # `RedshiftSelectSqlQuery` query. The username should be valid for an
3354
+ # Amazon Redshift [USER][1].
3355
+ #
3356
+ #
3357
+ #
3358
+ # [1]: http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html
3359
+ # @return [String]
3360
+ #
3361
+ # @!attribute [rw] select_sql_query
3362
+ # The SQL query that is specified during CreateDataSourceFromRedshift.
3363
+ # Returns only if `Verbose` is true in GetDataSourceInput.
3364
+ # @return [String]
3365
+ class RedshiftMetadata < Struct.new(
3366
+ :redshift_database,
3367
+ :database_user_name,
3368
+ :select_sql_query)
3369
+ include Aws::Structure
3370
+ end
3371
+
3372
+ # Describes the data specification of a `DataSource`.
3373
+ # @note When making an API call, pass S3DataSpec
3374
+ # data as a hash:
3375
+ #
3376
+ # {
3377
+ # data_location_s3: "S3Url", # required
3378
+ # data_rearrangement: "DataRearrangement",
3379
+ # data_schema: "DataSchema",
3380
+ # data_schema_location_s3: "S3Url",
3381
+ # }
3382
+ # @!attribute [rw] data_location_s3
3383
+ # The location of the data file(s) used by a `DataSource`. The URI
3384
+ # specifies a data file or an Amazon Simple Storage Service (Amazon
3385
+ # S3) directory or bucket containing data files.
3386
+ # @return [String]
3387
+ #
3388
+ # @!attribute [rw] data_rearrangement
3389
+ # A JSON string that represents the splitting and rearrangement
3390
+ # processing to be applied to a `DataSource`. If the
3391
+ # `DataRearrangement` parameter is not provided, all of the input data
3392
+ # is used to create the `Datasource`.
3393
+ #
3394
+ # There are multiple parameters that control what data is used to
3395
+ # create a datasource:
3396
+ #
3397
+ # * **`percentBegin`**
3398
+ #
3399
+ # Use `percentBegin` to indicate the beginning of the range of the
3400
+ # data used to create the Datasource. If you do not include
3401
+ # `percentBegin` and `percentEnd`, Amazon ML includes all of the
3402
+ # data when creating the datasource.
3403
+ #
3404
+ # * **`percentEnd`**
3405
+ #
3406
+ # Use `percentEnd` to indicate the end of the range of the data used
3407
+ # to create the Datasource. If you do not include `percentBegin` and
3408
+ # `percentEnd`, Amazon ML includes all of the data when creating the
3409
+ # datasource.
3410
+ #
3411
+ # * **`complement`**
3412
+ #
3413
+ # The `complement` parameter instructs Amazon ML to use the data
3414
+ # that is not included in the range of `percentBegin` to
3415
+ # `percentEnd` to create a datasource. The `complement` parameter is
3416
+ # useful if you need to create complementary datasources for
3417
+ # training and evaluation. To create a complementary datasource, use
3418
+ # the same values for `percentBegin` and `percentEnd`, along with
3419
+ # the `complement` parameter.
3420
+ #
3421
+ # For example, the following two datasources do not share any data,
3422
+ # and can be used to train and evaluate a model. The first
3423
+ # datasource has 25 percent of the data, and the second one has 75
3424
+ # percent of the data.
3425
+ #
3426
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":0,
3427
+ # "percentEnd":25\}\}`
3428
+ #
3429
+ # Datasource for training: `\{"splitting":\{"percentBegin":0,
3430
+ # "percentEnd":25, "complement":"true"\}\}`
3431
+ #
3432
+ # * **`strategy`**
3433
+ #
3434
+ # To change how Amazon ML splits the data for a datasource, use the
3435
+ # `strategy` parameter.
3436
+ #
3437
+ # The default value for the `strategy` parameter is `sequential`,
3438
+ # meaning that Amazon ML takes all of the data records between the
3439
+ # `percentBegin` and `percentEnd` parameters for the datasource, in
3440
+ # the order that the records appear in the input data.
3441
+ #
3442
+ # The following two `DataRearrangement` lines are examples of
3443
+ # sequentially ordered training and evaluation datasources:
3444
+ #
3445
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":70,
3446
+ # "percentEnd":100, "strategy":"sequential"\}\}`
3447
+ #
3448
+ # Datasource for training: `\{"splitting":\{"percentBegin":70,
3449
+ # "percentEnd":100, "strategy":"sequential",
3450
+ # "complement":"true"\}\}`
3451
+ #
3452
+ # To randomly split the input data into the proportions indicated by
3453
+ # the percentBegin and percentEnd parameters, set the `strategy`
3454
+ # parameter to `random` and provide a string that is used as the
3455
+ # seed value for the random data splitting (for example, you can use
3456
+ # the S3 path to your data as the random seed string). If you choose
3457
+ # the random split strategy, Amazon ML assigns each row of data a
3458
+ # pseudo-random number between 0 and 100, and then selects the rows
3459
+ # that have an assigned number between `percentBegin` and
3460
+ # `percentEnd`. Pseudo-random numbers are assigned using both the
3461
+ # input seed string value and the byte offset as a seed, so changing
3462
+ # the data results in a different split. Any existing ordering is
3463
+ # preserved. The random splitting strategy ensures that variables in
3464
+ # the training and evaluation data are distributed similarly. It is
3465
+ # useful in the cases where the input data may have an implicit sort
3466
+ # order, which would otherwise result in training and evaluation
3467
+ # datasources containing non-similar data records.
3468
+ #
3469
+ # The following two `DataRearrangement` lines are examples of
3470
+ # non-sequentially ordered training and evaluation datasources:
3471
+ #
3472
+ # Datasource for evaluation: `\{"splitting":\{"percentBegin":70,
3473
+ # "percentEnd":100, "strategy":"random",
3474
+ # "randomSeed"="s3://my_s3_path/bucket/file.csv"\}\}`
3475
+ #
3476
+ # Datasource for training: `\{"splitting":\{"percentBegin":70,
3477
+ # "percentEnd":100, "strategy":"random",
3478
+ # "randomSeed"="s3://my_s3_path/bucket/file.csv",
3479
+ # "complement":"true"\}\}`
3480
+ # @return [String]
3481
+ #
3482
+ # @!attribute [rw] data_schema
3483
+ # A JSON string that represents the schema for an Amazon S3
3484
+ # `DataSource`. The `DataSchema` defines the structure of the
3485
+ # observation data in the data file(s) referenced in the `DataSource`.
3486
+ #
3487
+ # You must provide either the `DataSchema` or the
3488
+ # `DataSchemaLocationS3`.
3489
+ #
3490
+ # Define your `DataSchema` as a series of key-value pairs.
3491
+ # `attributes` and `excludedVariableNames` have an array of key-value
3492
+ # pairs for their value. Use the following format to define your
3493
+ # `DataSchema`.
3494
+ #
3495
+ # \\\{ "version": "1.0",
3496
+ #
3497
+ # "recordAnnotationFieldName": "F1",
3498
+ #
3499
+ # "recordWeightFieldName": "F2",
3500
+ #
3501
+ # "targetFieldName": "F3",
3502
+ #
3503
+ # "dataFormat": "CSV",
3504
+ #
3505
+ # "dataFileContainsHeader": true,
3506
+ #
3507
+ # "attributes": \[
3508
+ #
3509
+ # \\\{ "fieldName": "F1", "fieldType": "TEXT" \\}, \\\{
3510
+ # "fieldName": "F2", "fieldType": "NUMERIC" \\}, \\\{
3511
+ # "fieldName": "F3", "fieldType": "CATEGORICAL" \\}, \\\{
3512
+ # "fieldName": "F4", "fieldType": "NUMERIC" \\}, \\\{
3513
+ # "fieldName": "F5", "fieldType": "CATEGORICAL" \\}, \\\{
3514
+ # "fieldName": "F6", "fieldType": "TEXT" \\}, \\\{
3515
+ # "fieldName": "F7", "fieldType": "WEIGHTED\_INT\_SEQUENCE"
3516
+ # \\}, \\\{ "fieldName": "F8", "fieldType":
3517
+ # "WEIGHTED\_STRING\_SEQUENCE" \\} \],
3518
+ #
3519
+ # "excludedVariableNames": \[ "F6" \] \\}
3520
+ #
3521
+ # <?oxy\_insert\_end>
3522
+ # @return [String]
3523
+ #
3524
+ # @!attribute [rw] data_schema_location_s3
3525
+ # Describes the schema location in Amazon S3. You must provide either
3526
+ # the `DataSchema` or the `DataSchemaLocationS3`.
3527
+ # @return [String]
3528
+ class S3DataSpec < Struct.new(
3529
+ :data_location_s3,
3530
+ :data_rearrangement,
3531
+ :data_schema,
3532
+ :data_schema_location_s3)
3533
+ include Aws::Structure
3534
+ end
3535
+
3536
+ # A custom key-value pair associated with an ML object, such as an ML
3537
+ # model.
3538
+ # @note When making an API call, pass Tag
3539
+ # data as a hash:
3540
+ #
3541
+ # {
3542
+ # key: "TagKey",
3543
+ # value: "TagValue",
3544
+ # }
3545
+ # @!attribute [rw] key
3546
+ # A unique identifier for the tag. Valid characters include Unicode
3547
+ # letters, digits, white space, \_, ., /, =, +, -, %, and @.
3548
+ # @return [String]
3549
+ #
3550
+ # @!attribute [rw] value
3551
+ # An optional string, typically used to describe or define the tag.
3552
+ # Valid characters include Unicode letters, digits, white space, \_,
3553
+ # ., /, =, +, -, %, and @.
3554
+ # @return [String]
3555
+ class Tag < Struct.new(
3556
+ :key,
3557
+ :value)
3558
+ include Aws::Structure
3559
+ end
3560
+
3561
+ # @note When making an API call, pass UpdateBatchPredictionInput
3562
+ # data as a hash:
3563
+ #
3564
+ # {
3565
+ # batch_prediction_id: "EntityId", # required
3566
+ # batch_prediction_name: "EntityName", # required
3567
+ # }
3568
+ # @!attribute [rw] batch_prediction_id
3569
+ # The ID assigned to the `BatchPrediction` during creation.
3570
+ # @return [String]
3571
+ #
3572
+ # @!attribute [rw] batch_prediction_name
3573
+ # A new user-supplied name or description of the `BatchPrediction`.
3574
+ # @return [String]
3575
+ class UpdateBatchPredictionInput < Struct.new(
3576
+ :batch_prediction_id,
3577
+ :batch_prediction_name)
3578
+ include Aws::Structure
3579
+ end
3580
+
3581
+ # Represents the output of an `UpdateBatchPrediction` operation.
3582
+ #
3583
+ # You can see the updated content by using the `GetBatchPrediction`
3584
+ # operation.
3585
+ # @!attribute [rw] batch_prediction_id
3586
+ # The ID assigned to the `BatchPrediction` during creation. This value
3587
+ # should be identical to the value of the `BatchPredictionId` in the
3588
+ # request.
3589
+ # @return [String]
3590
+ class UpdateBatchPredictionOutput < Struct.new(
3591
+ :batch_prediction_id)
3592
+ include Aws::Structure
3593
+ end
3594
+
3595
+ # @note When making an API call, pass UpdateDataSourceInput
3596
+ # data as a hash:
3597
+ #
3598
+ # {
3599
+ # data_source_id: "EntityId", # required
3600
+ # data_source_name: "EntityName", # required
3601
+ # }
3602
+ # @!attribute [rw] data_source_id
3603
+ # The ID assigned to the `DataSource` during creation.
3604
+ # @return [String]
3605
+ #
3606
+ # @!attribute [rw] data_source_name
3607
+ # A new user-supplied name or description of the `DataSource` that
3608
+ # will replace the current description.
3609
+ # @return [String]
3610
+ class UpdateDataSourceInput < Struct.new(
3611
+ :data_source_id,
3612
+ :data_source_name)
3613
+ include Aws::Structure
3614
+ end
3615
+
3616
+ # Represents the output of an `UpdateDataSource` operation.
3617
+ #
3618
+ # You can see the updated content by using the `GetBatchPrediction`
3619
+ # operation.
3620
+ # @!attribute [rw] data_source_id
3621
+ # The ID assigned to the `DataSource` during creation. This value
3622
+ # should be identical to the value of the `DataSourceID` in the
3623
+ # request.
3624
+ # @return [String]
3625
+ class UpdateDataSourceOutput < Struct.new(
3626
+ :data_source_id)
3627
+ include Aws::Structure
3628
+ end
3629
+
3630
+ # @note When making an API call, pass UpdateEvaluationInput
3631
+ # data as a hash:
3632
+ #
3633
+ # {
3634
+ # evaluation_id: "EntityId", # required
3635
+ # evaluation_name: "EntityName", # required
3636
+ # }
3637
+ # @!attribute [rw] evaluation_id
3638
+ # The ID assigned to the `Evaluation` during creation.
3639
+ # @return [String]
3640
+ #
3641
+ # @!attribute [rw] evaluation_name
3642
+ # A new user-supplied name or description of the `Evaluation` that
3643
+ # will replace the current content.
3644
+ # @return [String]
3645
+ class UpdateEvaluationInput < Struct.new(
3646
+ :evaluation_id,
3647
+ :evaluation_name)
3648
+ include Aws::Structure
3649
+ end
3650
+
3651
+ # Represents the output of an `UpdateEvaluation` operation.
3652
+ #
3653
+ # You can see the updated content by using the `GetEvaluation`
3654
+ # operation.
3655
+ # @!attribute [rw] evaluation_id
3656
+ # The ID assigned to the `Evaluation` during creation. This value
3657
+ # should be identical to the value of the `Evaluation` in the request.
3658
+ # @return [String]
3659
+ class UpdateEvaluationOutput < Struct.new(
3660
+ :evaluation_id)
3661
+ include Aws::Structure
3662
+ end
3663
+
3664
+ # @note When making an API call, pass UpdateMLModelInput
3665
+ # data as a hash:
3666
+ #
3667
+ # {
3668
+ # ml_model_id: "EntityId", # required
3669
+ # ml_model_name: "EntityName",
3670
+ # score_threshold: 1.0,
3671
+ # }
3672
+ # @!attribute [rw] ml_model_id
3673
+ # The ID assigned to the `MLModel` during creation.
3674
+ # @return [String]
3675
+ #
3676
+ # @!attribute [rw] ml_model_name
3677
+ # A user-supplied name or description of the `MLModel`.
3678
+ # @return [String]
3679
+ #
3680
+ # @!attribute [rw] score_threshold
3681
+ # The `ScoreThreshold` used in binary classification `MLModel` that
3682
+ # marks the boundary between a positive prediction and a negative
3683
+ # prediction.
3684
+ #
3685
+ # Output values greater than or equal to the `ScoreThreshold` receive
3686
+ # a positive result from the `MLModel`, such as `true`. Output values
3687
+ # less than the `ScoreThreshold` receive a negative response from the
3688
+ # `MLModel`, such as `false`.
3689
+ # @return [Float]
3690
+ class UpdateMLModelInput < Struct.new(
3691
+ :ml_model_id,
3692
+ :ml_model_name,
3693
+ :score_threshold)
3694
+ include Aws::Structure
3695
+ end
3696
+
3697
+ # Represents the output of an `UpdateMLModel` operation.
3698
+ #
3699
+ # You can see the updated content by using the `GetMLModel` operation.
3700
+ # @!attribute [rw] ml_model_id
3701
+ # The ID assigned to the `MLModel` during creation. This value should
3702
+ # be identical to the value of the `MLModelID` in the request.
3703
+ # @return [String]
3704
+ class UpdateMLModelOutput < Struct.new(
3705
+ :ml_model_id)
3706
+ include Aws::Structure
3707
+ end
3708
+
3709
+ end
3710
+ end
3711
+ end