aws-sdk-machinelearning 1.18.0 → 1.19.0
Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d20916286dd395ed9c43c965da88adc76363c6f3ba5b16df81e28db32759a101
|
4
|
+
data.tar.gz: a5f0b38493514e13a1d87f4e90b17d454fedcdfa8ac27b989326bcb3116a2c02
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1e50fe4a89f4de91781589e5edb6233233d3acaaf9af66162310bf4989f64f93c8c018a8f4eca2b65ceff17de350aad733da92910a612bdefdc217a3b63ee717
|
7
|
+
data.tar.gz: fd86672ee09b8e384879d1b14be9be1fecd8846cf3a4f4e5300159776d4c010c2e9462941ef9abf61566468dd0d18153080d05be6a38bf5c90c6f2528168665d
|
@@ -107,7 +107,7 @@ module Aws::MachineLearning
|
|
107
107
|
# @option options [required, String] :region
|
108
108
|
# The AWS region to connect to. The configured `:region` is
|
109
109
|
# used to determine the service `:endpoint`. When not passed,
|
110
|
-
# a default `:region` is
|
110
|
+
# a default `:region` is searched for in the following locations:
|
111
111
|
#
|
112
112
|
# * `Aws.config[:region]`
|
113
113
|
# * `ENV['AWS_REGION']`
|
@@ -163,7 +163,7 @@ module Aws::MachineLearning
|
|
163
163
|
# @option options [String] :endpoint
|
164
164
|
# The client endpoint is normally constructed from the `:region`
|
165
165
|
# option. You should only configure an `:endpoint` when connecting
|
166
|
-
# to test endpoints. This should be
|
166
|
+
# to test endpoints. This should be a valid HTTP(S) URI.
|
167
167
|
#
|
168
168
|
# @option options [Integer] :endpoint_cache_max_entries (1000)
|
169
169
|
# Used for the maximum size limit of the LRU cache storing endpoints data
|
@@ -281,8 +281,7 @@ module Aws::MachineLearning
|
|
281
281
|
#
|
282
282
|
# @option options [Integer] :http_read_timeout (60) The default
|
283
283
|
# number of seconds to wait for response data. This value can
|
284
|
-
# safely be set
|
285
|
-
# per-request on the session yielded by {#session_for}.
|
284
|
+
# safely be set per-request on the session.
|
286
285
|
#
|
287
286
|
# @option options [Float] :http_idle_timeout (5) The number of
|
288
287
|
# seconds a connection is allowed to sit idle before it is
|
@@ -294,7 +293,7 @@ module Aws::MachineLearning
|
|
294
293
|
# request body. This option has no effect unless the request has
|
295
294
|
# "Expect" header set to "100-continue". Defaults to `nil` which
|
296
295
|
# disables this behaviour. This value can safely be set per
|
297
|
-
# request on the session
|
296
|
+
# request on the session.
|
298
297
|
#
|
299
298
|
# @option options [Boolean] :http_wire_trace (false) When `true`,
|
300
299
|
# HTTP debug output will be sent to the `:logger`.
|
@@ -1316,6 +1315,8 @@ module Aws::MachineLearning
|
|
1316
1315
|
# * {Types::DescribeBatchPredictionsOutput#results #results} => Array<Types::BatchPrediction>
|
1317
1316
|
# * {Types::DescribeBatchPredictionsOutput#next_token #next_token} => String
|
1318
1317
|
#
|
1318
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
1319
|
+
#
|
1319
1320
|
# @example Request syntax with placeholder values
|
1320
1321
|
#
|
1321
1322
|
# resp = client.describe_batch_predictions({
|
@@ -1353,6 +1354,11 @@ module Aws::MachineLearning
|
|
1353
1354
|
# resp.results[0].invalid_record_count #=> Integer
|
1354
1355
|
# resp.next_token #=> String
|
1355
1356
|
#
|
1357
|
+
#
|
1358
|
+
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
|
1359
|
+
#
|
1360
|
+
# * batch_prediction_available
|
1361
|
+
#
|
1356
1362
|
# @overload describe_batch_predictions(params = {})
|
1357
1363
|
# @param [Hash] params ({})
|
1358
1364
|
def describe_batch_predictions(params = {}, options = {})
|
@@ -1441,6 +1447,8 @@ module Aws::MachineLearning
|
|
1441
1447
|
# * {Types::DescribeDataSourcesOutput#results #results} => Array<Types::DataSource>
|
1442
1448
|
# * {Types::DescribeDataSourcesOutput#next_token #next_token} => String
|
1443
1449
|
#
|
1450
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
1451
|
+
#
|
1444
1452
|
# @example Request syntax with placeholder values
|
1445
1453
|
#
|
1446
1454
|
# resp = client.describe_data_sources({
|
@@ -1489,6 +1497,11 @@ module Aws::MachineLearning
|
|
1489
1497
|
# resp.results[0].started_at #=> Time
|
1490
1498
|
# resp.next_token #=> String
|
1491
1499
|
#
|
1500
|
+
#
|
1501
|
+
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
|
1502
|
+
#
|
1503
|
+
# * data_source_available
|
1504
|
+
#
|
1492
1505
|
# @overload describe_data_sources(params = {})
|
1493
1506
|
# @param [Hash] params ({})
|
1494
1507
|
def describe_data_sources(params = {}, options = {})
|
@@ -1582,6 +1595,8 @@ module Aws::MachineLearning
|
|
1582
1595
|
# * {Types::DescribeEvaluationsOutput#results #results} => Array<Types::Evaluation>
|
1583
1596
|
# * {Types::DescribeEvaluationsOutput#next_token #next_token} => String
|
1584
1597
|
#
|
1598
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
1599
|
+
#
|
1585
1600
|
# @example Request syntax with placeholder values
|
1586
1601
|
#
|
1587
1602
|
# resp = client.describe_evaluations({
|
@@ -1618,6 +1633,11 @@ module Aws::MachineLearning
|
|
1618
1633
|
# resp.results[0].started_at #=> Time
|
1619
1634
|
# resp.next_token #=> String
|
1620
1635
|
#
|
1636
|
+
#
|
1637
|
+
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
|
1638
|
+
#
|
1639
|
+
# * evaluation_available
|
1640
|
+
#
|
1621
1641
|
# @overload describe_evaluations(params = {})
|
1622
1642
|
# @param [Hash] params ({})
|
1623
1643
|
def describe_evaluations(params = {}, options = {})
|
@@ -1714,6 +1734,8 @@ module Aws::MachineLearning
|
|
1714
1734
|
# * {Types::DescribeMLModelsOutput#results #results} => Array<Types::MLModel>
|
1715
1735
|
# * {Types::DescribeMLModelsOutput#next_token #next_token} => String
|
1716
1736
|
#
|
1737
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
1738
|
+
#
|
1717
1739
|
# @example Request syntax with placeholder values
|
1718
1740
|
#
|
1719
1741
|
# resp = client.describe_ml_models({
|
@@ -1758,6 +1780,11 @@ module Aws::MachineLearning
|
|
1758
1780
|
# resp.results[0].started_at #=> Time
|
1759
1781
|
# resp.next_token #=> String
|
1760
1782
|
#
|
1783
|
+
#
|
1784
|
+
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
|
1785
|
+
#
|
1786
|
+
# * ml_model_available
|
1787
|
+
#
|
1761
1788
|
# @overload describe_ml_models(params = {})
|
1762
1789
|
# @param [Hash] params ({})
|
1763
1790
|
def describe_ml_models(params = {}, options = {})
|
@@ -2290,7 +2317,7 @@ module Aws::MachineLearning
|
|
2290
2317
|
params: params,
|
2291
2318
|
config: config)
|
2292
2319
|
context[:gem_name] = 'aws-sdk-machinelearning'
|
2293
|
-
context[:gem_version] = '1.
|
2320
|
+
context[:gem_version] = '1.19.0'
|
2294
2321
|
Seahorse::Client::Request.new(handlers, context)
|
2295
2322
|
end
|
2296
2323
|
|
@@ -2356,12 +2383,12 @@ module Aws::MachineLearning
|
|
2356
2383
|
# The following table lists the valid waiter names, the operations they call,
|
2357
2384
|
# and the default `:delay` and `:max_attempts` values.
|
2358
2385
|
#
|
2359
|
-
# | waiter_name | params
|
2360
|
-
# | -------------------------- |
|
2361
|
-
# | batch_prediction_available | {#describe_batch_predictions} | 30 | 60 |
|
2362
|
-
# | data_source_available | {#describe_data_sources} | 30 | 60 |
|
2363
|
-
# | evaluation_available | {#describe_evaluations} | 30 | 60 |
|
2364
|
-
# | ml_model_available | {#describe_ml_models} | 30 | 60 |
|
2386
|
+
# | waiter_name | params | :delay | :max_attempts |
|
2387
|
+
# | -------------------------- | ----------------------------------- | -------- | ------------- |
|
2388
|
+
# | batch_prediction_available | {Client#describe_batch_predictions} | 30 | 60 |
|
2389
|
+
# | data_source_available | {Client#describe_data_sources} | 30 | 60 |
|
2390
|
+
# | evaluation_available | {Client#describe_evaluations} | 30 | 60 |
|
2391
|
+
# | ml_model_available | {Client#describe_ml_models} | 30 | 60 |
|
2365
2392
|
#
|
2366
2393
|
# @raise [Errors::FailureStateError] Raised when the waiter terminates
|
2367
2394
|
# because the waiter has entered a state that it will not transition
|
@@ -6,13 +6,7 @@
|
|
6
6
|
# WARNING ABOUT GENERATED CODE
|
7
7
|
|
8
8
|
module Aws::MachineLearning
|
9
|
-
|
10
|
-
# To create a resource object:
|
11
|
-
# resource = Aws::MachineLearning::Resource.new(region: 'us-west-2')
|
12
|
-
# You can supply a client object with custom configuration that will be used for all resource operations.
|
13
|
-
# If you do not pass +:client+, a default client will be constructed.
|
14
|
-
# client = Aws::MachineLearning::Client.new(region: 'us-west-2')
|
15
|
-
# resource = Aws::MachineLearning::Resource.new(client: client)
|
9
|
+
|
16
10
|
class Resource
|
17
11
|
|
18
12
|
# @param options ({})
|
@@ -8,6 +8,70 @@
|
|
8
8
|
require 'aws-sdk-core/waiters'
|
9
9
|
|
10
10
|
module Aws::MachineLearning
|
11
|
+
# Waiters are utility methods that poll for a particular state to occur
|
12
|
+
# on a client. Waiters can fail after a number of attempts at a polling
|
13
|
+
# interval defined for the service client.
|
14
|
+
#
|
15
|
+
# For a list of operations that can be waited for and the
|
16
|
+
# client methods called for each operation, see the table below or the
|
17
|
+
# {Client#wait_until} field documentation for the {Client}.
|
18
|
+
#
|
19
|
+
# # Invoking a Waiter
|
20
|
+
# To invoke a waiter, call #wait_until on a {Client}. The first parameter
|
21
|
+
# is the waiter name, which is specific to the service client and indicates
|
22
|
+
# which operation is being waited for. The second parameter is a hash of
|
23
|
+
# parameters that are passed to the client method called by the waiter,
|
24
|
+
# which varies according to the waiter name.
|
25
|
+
#
|
26
|
+
# # Wait Failures
|
27
|
+
# To catch errors in a waiter, use WaiterFailed,
|
28
|
+
# as shown in the following example.
|
29
|
+
#
|
30
|
+
# rescue rescue Aws::Waiters::Errors::WaiterFailed => error
|
31
|
+
# puts "failed waiting for instance running: #{error.message}
|
32
|
+
# end
|
33
|
+
#
|
34
|
+
# # Configuring a Waiter
|
35
|
+
# Each waiter has a default polling interval and a maximum number of
|
36
|
+
# attempts it will make before returning control to your program.
|
37
|
+
# To set these values, use the `max_attempts` and `delay` parameters
|
38
|
+
# in your `#wait_until` call.
|
39
|
+
# The following example waits for up to 25 seconds, polling every five seconds.
|
40
|
+
#
|
41
|
+
# client.wait_until(...) do |w|
|
42
|
+
# w.max_attempts = 5
|
43
|
+
# w.delay = 5
|
44
|
+
# end
|
45
|
+
#
|
46
|
+
# To disable wait failures, set the value of either of these parameters
|
47
|
+
# to `nil`.
|
48
|
+
#
|
49
|
+
# # Extending a Waiter
|
50
|
+
# To modify the behavior of waiters, you can register callbacks that are
|
51
|
+
# triggered before each polling attempt and before waiting.
|
52
|
+
#
|
53
|
+
# The following example implements an exponential backoff in a waiter
|
54
|
+
# by doubling the amount of time to wait on every attempt.
|
55
|
+
#
|
56
|
+
# client.wait_until(...) do |w|
|
57
|
+
# w.interval = 0 # disable normal sleep
|
58
|
+
# w.before_wait do |n, resp|
|
59
|
+
# sleep(n ** 2)
|
60
|
+
# end
|
61
|
+
# end
|
62
|
+
#
|
63
|
+
# # Available Waiters
|
64
|
+
#
|
65
|
+
# The following table lists the valid waiter names, the operations they call,
|
66
|
+
# and the default `:delay` and `:max_attempts` values.
|
67
|
+
#
|
68
|
+
# | waiter_name | params | :delay | :max_attempts |
|
69
|
+
# | -------------------------- | ----------------------------------- | -------- | ------------- |
|
70
|
+
# | batch_prediction_available | {Client#describe_batch_predictions} | 30 | 60 |
|
71
|
+
# | data_source_available | {Client#describe_data_sources} | 30 | 60 |
|
72
|
+
# | evaluation_available | {Client#describe_evaluations} | 30 | 60 |
|
73
|
+
# | ml_model_available | {Client#describe_ml_models} | 30 | 60 |
|
74
|
+
#
|
11
75
|
module Waiters
|
12
76
|
|
13
77
|
class BatchPredictionAvailable
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: aws-sdk-machinelearning
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.19.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Amazon Web Services
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
11
|
+
date: 2020-05-07 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-core
|
@@ -82,7 +82,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
82
82
|
- !ruby/object:Gem::Version
|
83
83
|
version: '0'
|
84
84
|
requirements: []
|
85
|
-
|
85
|
+
rubyforge_project:
|
86
|
+
rubygems_version: 2.7.6.2
|
86
87
|
signing_key:
|
87
88
|
specification_version: 4
|
88
89
|
summary: AWS SDK for Ruby - Amazon Machine Learning
|