aws-sdk-core 3.46.0 → 3.94.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/VERSION +1 -1
- data/lib/aws-sdk-core.rb +7 -0
- data/lib/aws-sdk-core/arn.rb +77 -0
- data/lib/aws-sdk-core/arn_parser.rb +38 -0
- data/lib/aws-sdk-core/assume_role_web_identity_credentials.rb +102 -0
- data/lib/aws-sdk-core/async_client_stubs.rb +80 -0
- data/lib/aws-sdk-core/binary.rb +3 -0
- data/lib/aws-sdk-core/binary/decode_handler.rb +9 -1
- data/lib/aws-sdk-core/binary/encode_handler.rb +32 -0
- data/lib/aws-sdk-core/binary/event_builder.rb +122 -0
- data/lib/aws-sdk-core/binary/event_parser.rb +48 -18
- data/lib/aws-sdk-core/binary/event_stream_decoder.rb +5 -2
- data/lib/aws-sdk-core/binary/event_stream_encoder.rb +53 -0
- data/lib/aws-sdk-core/client_side_monitoring/publisher.rb +9 -1
- data/lib/aws-sdk-core/client_stubs.rb +10 -9
- data/lib/aws-sdk-core/credential_provider.rb +0 -31
- data/lib/aws-sdk-core/credential_provider_chain.rb +79 -39
- data/lib/aws-sdk-core/deprecations.rb +16 -10
- data/lib/aws-sdk-core/ecs_credentials.rb +12 -8
- data/lib/aws-sdk-core/endpoint_cache.rb +14 -11
- data/lib/aws-sdk-core/errors.rb +94 -6
- data/lib/aws-sdk-core/event_emitter.rb +42 -0
- data/lib/aws-sdk-core/instance_profile_credentials.rb +120 -38
- data/lib/aws-sdk-core/json.rb +13 -14
- data/lib/aws-sdk-core/json/error_handler.rb +19 -2
- data/lib/aws-sdk-core/json/handler.rb +19 -1
- data/lib/aws-sdk-core/log/formatter.rb +7 -1
- data/lib/aws-sdk-core/log/param_filter.rb +3 -3
- data/lib/aws-sdk-core/pageable_response.rb +34 -20
- data/lib/aws-sdk-core/param_validator.rb +11 -5
- data/lib/aws-sdk-core/plugins/client_metrics_plugin.rb +26 -1
- data/lib/aws-sdk-core/plugins/endpoint_discovery.rb +1 -1
- data/lib/aws-sdk-core/plugins/event_stream_configuration.rb +14 -0
- data/lib/aws-sdk-core/plugins/invocation_id.rb +33 -0
- data/lib/aws-sdk-core/plugins/regional_endpoint.rb +8 -1
- data/lib/aws-sdk-core/plugins/retries/client_rate_limiter.rb +137 -0
- data/lib/aws-sdk-core/plugins/retries/clock_skew.rb +98 -0
- data/lib/aws-sdk-core/plugins/retries/error_inspector.rb +142 -0
- data/lib/aws-sdk-core/plugins/retries/retry_quota.rb +57 -0
- data/lib/aws-sdk-core/plugins/retry_errors.rb +290 -106
- data/lib/aws-sdk-core/plugins/signature_v4.rb +13 -2
- data/lib/aws-sdk-core/plugins/stub_responses.rb +20 -7
- data/lib/aws-sdk-core/plugins/transfer_encoding.rb +51 -0
- data/lib/aws-sdk-core/plugins/user_agent.rb +4 -8
- data/lib/aws-sdk-core/process_credentials.rb +9 -3
- data/lib/aws-sdk-core/shared_config.rb +95 -125
- data/lib/aws-sdk-core/structure.rb +1 -2
- data/lib/aws-sdk-core/stubbing/protocols/rest.rb +19 -0
- data/lib/aws-sdk-core/stubbing/stub_data.rb +13 -4
- data/lib/aws-sdk-core/util.rb +4 -0
- data/lib/aws-sdk-core/waiters/waiter.rb +2 -2
- data/lib/aws-sdk-core/xml/error_handler.rb +26 -3
- data/lib/aws-sdk-sts.rb +7 -4
- data/lib/aws-sdk-sts/client.rb +1109 -459
- data/lib/aws-sdk-sts/client_api.rb +67 -0
- data/lib/aws-sdk-sts/customizations.rb +2 -0
- data/lib/aws-sdk-sts/errors.rb +150 -0
- data/lib/aws-sdk-sts/plugins/sts_regional_endpoints.rb +32 -0
- data/lib/aws-sdk-sts/presigner.rb +67 -0
- data/lib/aws-sdk-sts/resource.rb +1 -0
- data/lib/aws-sdk-sts/types.rb +736 -176
- data/lib/seahorse.rb +9 -0
- data/lib/seahorse/client/async_base.rb +50 -0
- data/lib/seahorse/client/async_response.rb +62 -0
- data/lib/seahorse/client/base.rb +4 -2
- data/lib/seahorse/client/configuration.rb +4 -2
- data/lib/seahorse/client/events.rb +1 -1
- data/lib/seahorse/client/h2/connection.rb +246 -0
- data/lib/seahorse/client/h2/handler.rb +151 -0
- data/lib/seahorse/client/handler_list_entry.rb +2 -2
- data/lib/seahorse/client/http/async_response.rb +42 -0
- data/lib/seahorse/client/http/response.rb +13 -8
- data/lib/seahorse/client/logging/formatter.rb +4 -2
- data/lib/seahorse/client/net_http/connection_pool.rb +19 -20
- data/lib/seahorse/client/net_http/handler.rb +7 -1
- data/lib/seahorse/client/net_http/patches.rb +7 -1
- data/lib/seahorse/client/networking_error.rb +28 -0
- data/lib/seahorse/client/plugin.rb +5 -4
- data/lib/seahorse/client/plugins/content_length.rb +5 -2
- data/lib/seahorse/client/plugins/h2.rb +64 -0
- data/lib/seahorse/client/response.rb +3 -5
- data/lib/seahorse/model/api.rb +4 -0
- data/lib/seahorse/model/operation.rb +4 -0
- data/lib/seahorse/model/shapes.rb +2 -2
- metadata +43 -10
@@ -0,0 +1,98 @@
|
|
1
|
+
module Aws
|
2
|
+
module Plugins
|
3
|
+
module Retries
|
4
|
+
|
5
|
+
# @api private
|
6
|
+
class ClockSkew
|
7
|
+
|
8
|
+
CLOCK_SKEW_THRESHOLD = 5 * 60 # five minutes
|
9
|
+
|
10
|
+
def initialize
|
11
|
+
@mutex = Mutex.new
|
12
|
+
# clock_corrections are recorded only on errors
|
13
|
+
# and only when time difference is greater than the
|
14
|
+
# CLOCK_SKEW_THRESHOLD
|
15
|
+
@endpoint_clock_corrections = Hash.new(0)
|
16
|
+
|
17
|
+
# estimated_skew is calculated on every request
|
18
|
+
# and is used to estimate a TTL for requests
|
19
|
+
@endpoint_estimated_skews = Hash.new(nil)
|
20
|
+
end
|
21
|
+
|
22
|
+
# Gets the clock_correction in seconds to apply to a given endpoint
|
23
|
+
# @param endpoint [URI / String]
|
24
|
+
def clock_correction(endpoint)
|
25
|
+
@mutex.synchronize { @endpoint_clock_corrections[endpoint.to_s] }
|
26
|
+
end
|
27
|
+
|
28
|
+
# The estimated skew factors in any clock skew from
|
29
|
+
# the service along with any network latency.
|
30
|
+
# This provides a more accurate value for the ttl,
|
31
|
+
# which should represent when the client will stop
|
32
|
+
# waiting for a request.
|
33
|
+
# Estimated Skew should not be used to correct clock skew errors
|
34
|
+
# it should only be used to estimate TTL for a request
|
35
|
+
def estimated_skew(endpoint)
|
36
|
+
@mutex.synchronize { @endpoint_estimated_skews[endpoint.to_s] }
|
37
|
+
end
|
38
|
+
|
39
|
+
# Determines whether a request has clock skew by comparing
|
40
|
+
# the current time against the server's time in the response
|
41
|
+
# @param context [Seahorse::Client::RequestContext]
|
42
|
+
def clock_skewed?(context)
|
43
|
+
server_time = server_time(context.http_response)
|
44
|
+
!!server_time &&
|
45
|
+
(Time.now.utc - server_time).abs > CLOCK_SKEW_THRESHOLD
|
46
|
+
end
|
47
|
+
|
48
|
+
# Called only on clock skew related errors
|
49
|
+
# Update the stored clock skew correction value for an endpoint
|
50
|
+
# from the server's time in the response
|
51
|
+
# @param context [Seahorse::Client::RequestContext]
|
52
|
+
def update_clock_correction(context)
|
53
|
+
endpoint = context.http_request.endpoint
|
54
|
+
now_utc = Time.now.utc
|
55
|
+
server_time = server_time(context.http_response)
|
56
|
+
if server_time && (now_utc - server_time).abs > CLOCK_SKEW_THRESHOLD
|
57
|
+
set_clock_correction(endpoint, server_time - now_utc)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
# Called for every request
|
62
|
+
# Update our estimated clock skew for the endpoint
|
63
|
+
# from the servers time in the response
|
64
|
+
# @param context [Seahorse::Client::RequestContext]
|
65
|
+
def update_estimated_skew(context)
|
66
|
+
endpoint = context.http_request.endpoint
|
67
|
+
now_utc = Time.now.utc
|
68
|
+
server_time = server_time(context.http_response)
|
69
|
+
return unless server_time
|
70
|
+
@mutex.synchronize do
|
71
|
+
@endpoint_estimated_skews[endpoint.to_s] = server_time - now_utc
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
private
|
76
|
+
|
77
|
+
# @param response [Seahorse::Client::Http::Response:]
|
78
|
+
def server_time(response)
|
79
|
+
begin
|
80
|
+
Time.parse(response.headers['date']).utc
|
81
|
+
rescue
|
82
|
+
nil
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
# Sets the clock correction for an endpoint
|
87
|
+
# @param endpoint [URI / String]
|
88
|
+
# @param correction [Number]
|
89
|
+
def set_clock_correction(endpoint, correction)
|
90
|
+
@mutex.synchronize do
|
91
|
+
@endpoint_clock_corrections[endpoint.to_s] = correction
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
@@ -0,0 +1,142 @@
|
|
1
|
+
module Aws
|
2
|
+
module Plugins
|
3
|
+
module Retries
|
4
|
+
# @api private
|
5
|
+
# This class will be obsolete when APIs contain modeled exceptions
|
6
|
+
class ErrorInspector
|
7
|
+
EXPIRED_CREDS = Set.new(
|
8
|
+
[
|
9
|
+
'InvalidClientTokenId', # query services
|
10
|
+
'UnrecognizedClientException', # json services
|
11
|
+
'InvalidAccessKeyId', # s3
|
12
|
+
'AuthFailure', # ec2
|
13
|
+
'InvalidIdentityToken', # sts
|
14
|
+
'ExpiredToken' # route53
|
15
|
+
]
|
16
|
+
)
|
17
|
+
|
18
|
+
THROTTLING_ERRORS = Set.new(
|
19
|
+
[
|
20
|
+
'Throttling', # query services
|
21
|
+
'ThrottlingException', # json services
|
22
|
+
'ThrottledException', # sns
|
23
|
+
'RequestThrottled', # sqs
|
24
|
+
'RequestThrottledException', # generic service
|
25
|
+
'ProvisionedThroughputExceededException', # dynamodb
|
26
|
+
'TransactionInProgressException', # dynamodb
|
27
|
+
'RequestLimitExceeded', # ec2
|
28
|
+
'BandwidthLimitExceeded', # cloud search
|
29
|
+
'LimitExceededException', # kinesis
|
30
|
+
'TooManyRequestsException', # batch
|
31
|
+
'PriorRequestNotComplete', # route53
|
32
|
+
'SlowDown', # s3
|
33
|
+
'EC2ThrottledException' # ec2
|
34
|
+
]
|
35
|
+
)
|
36
|
+
|
37
|
+
CHECKSUM_ERRORS = Set.new(
|
38
|
+
[
|
39
|
+
'CRC32CheckFailed' # dynamodb
|
40
|
+
]
|
41
|
+
)
|
42
|
+
|
43
|
+
NETWORKING_ERRORS = Set.new(
|
44
|
+
[
|
45
|
+
'RequestTimeout', # s3
|
46
|
+
'RequestTimeoutException', # glacier
|
47
|
+
'IDPCommunicationError' # sts
|
48
|
+
]
|
49
|
+
)
|
50
|
+
|
51
|
+
# See: https://github.com/aws/aws-sdk-net/blob/5810dfe401e0eac2e59d02276d4b479224b4538e/sdk/src/Core/Amazon.Runtime/Pipeline/RetryHandler/RetryPolicy.cs#L78
|
52
|
+
CLOCK_SKEW_ERRORS = Set.new(
|
53
|
+
[
|
54
|
+
'RequestTimeTooSkewed',
|
55
|
+
'RequestExpired',
|
56
|
+
'InvalidSignatureException',
|
57
|
+
'SignatureDoesNotMatch',
|
58
|
+
'AuthFailure',
|
59
|
+
'RequestInTheFuture'
|
60
|
+
]
|
61
|
+
)
|
62
|
+
|
63
|
+
def initialize(error, http_status_code)
|
64
|
+
@error = error
|
65
|
+
@name = extract_name(@error)
|
66
|
+
@http_status_code = http_status_code
|
67
|
+
end
|
68
|
+
|
69
|
+
def expired_credentials?
|
70
|
+
!!(EXPIRED_CREDS.include?(@name) || @name.match(/expired/i))
|
71
|
+
end
|
72
|
+
|
73
|
+
def throttling_error?
|
74
|
+
!!(THROTTLING_ERRORS.include?(@name) ||
|
75
|
+
@name.match(/throttl/i) ||
|
76
|
+
@http_status_code == 429) ||
|
77
|
+
modeled_throttling?
|
78
|
+
end
|
79
|
+
|
80
|
+
def checksum?
|
81
|
+
CHECKSUM_ERRORS.include?(@name) || @error.is_a?(Errors::ChecksumError)
|
82
|
+
end
|
83
|
+
|
84
|
+
def networking?
|
85
|
+
@error.is_a?(Seahorse::Client::NetworkingError) ||
|
86
|
+
@error.is_a?(Errors::NoSuchEndpointError) ||
|
87
|
+
NETWORKING_ERRORS.include?(@name)
|
88
|
+
end
|
89
|
+
|
90
|
+
def server?
|
91
|
+
(500..599).cover?(@http_status_code)
|
92
|
+
end
|
93
|
+
|
94
|
+
def endpoint_discovery?(context)
|
95
|
+
return false unless context.operation.endpoint_discovery
|
96
|
+
|
97
|
+
@http_status_code == 421 ||
|
98
|
+
@name == 'InvalidEndpointException' ||
|
99
|
+
@error.is_a?(Errors::EndpointDiscoveryError)
|
100
|
+
end
|
101
|
+
|
102
|
+
def modeled_retryable?
|
103
|
+
@error.is_a?(Errors::ServiceError) && @error.retryable?
|
104
|
+
end
|
105
|
+
|
106
|
+
def modeled_throttling?
|
107
|
+
@error.is_a?(Errors::ServiceError) && @error.throttling?
|
108
|
+
end
|
109
|
+
|
110
|
+
def clock_skew?(context)
|
111
|
+
CLOCK_SKEW_ERRORS.include?(@name) &&
|
112
|
+
context.config.clock_skew.clock_skewed?(context)
|
113
|
+
end
|
114
|
+
|
115
|
+
def retryable?(context)
|
116
|
+
server? ||
|
117
|
+
modeled_retryable? ||
|
118
|
+
throttling_error? ||
|
119
|
+
networking? ||
|
120
|
+
checksum? ||
|
121
|
+
endpoint_discovery?(context) ||
|
122
|
+
(expired_credentials? && refreshable_credentials?(context)) ||
|
123
|
+
clock_skew?(context)
|
124
|
+
end
|
125
|
+
|
126
|
+
private
|
127
|
+
|
128
|
+
def refreshable_credentials?(context)
|
129
|
+
context.config.credentials.respond_to?(:refresh!)
|
130
|
+
end
|
131
|
+
|
132
|
+
def extract_name(error)
|
133
|
+
if error.is_a?(Errors::ServiceError)
|
134
|
+
error.class.code || error.class.name.to_s
|
135
|
+
else
|
136
|
+
error.class.name.to_s
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
@@ -0,0 +1,57 @@
|
|
1
|
+
module Aws
|
2
|
+
module Plugins
|
3
|
+
module Retries
|
4
|
+
|
5
|
+
# @api private
|
6
|
+
# Used in 'standard' and 'adaptive' retry modes.
|
7
|
+
class RetryQuota
|
8
|
+
INITIAL_RETRY_TOKENS = 500
|
9
|
+
RETRY_COST = 5
|
10
|
+
NO_RETRY_INCREMENT = 1
|
11
|
+
TIMEOUT_RETRY_COST = 10
|
12
|
+
|
13
|
+
def initialize(opts = {})
|
14
|
+
@mutex = Mutex.new
|
15
|
+
@max_capacity = opts.fetch(:max_capacity, INITIAL_RETRY_TOKENS)
|
16
|
+
@available_capacity = @max_capacity
|
17
|
+
end
|
18
|
+
|
19
|
+
# check if there is sufficient capacity to retry
|
20
|
+
# and return it. If there is insufficient capacity
|
21
|
+
# return 0
|
22
|
+
# @return [Integer] The amount of capacity checked out
|
23
|
+
def checkout_capacity(error_inspector)
|
24
|
+
@mutex.synchronize do
|
25
|
+
capacity_amount = if error_inspector.networking?
|
26
|
+
TIMEOUT_RETRY_COST
|
27
|
+
else
|
28
|
+
RETRY_COST
|
29
|
+
end
|
30
|
+
|
31
|
+
# unable to acquire capacity
|
32
|
+
return 0 if capacity_amount > @available_capacity
|
33
|
+
|
34
|
+
@available_capacity -= capacity_amount
|
35
|
+
capacity_amount
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
# capacity_amount refers to the amount of capacity requested from
|
40
|
+
# the last retry. It can either be RETRY_COST, TIMEOUT_RETRY_COST,
|
41
|
+
# or unset.
|
42
|
+
def release(capacity_amount)
|
43
|
+
# Implementation note: The release() method is called for
|
44
|
+
# every API call. In the common case where the request is
|
45
|
+
# successful and we're at full capacity, we can avoid locking.
|
46
|
+
# We can't exceed max capacity so there's no work we have to do.
|
47
|
+
return if @available_capacity == @max_capacity
|
48
|
+
|
49
|
+
@mutex.synchronize do
|
50
|
+
@available_capacity += capacity_amount || NO_RETRY_INCREMENT
|
51
|
+
@available_capacity = [@available_capacity, @max_capacity].min
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
@@ -1,13 +1,17 @@
|
|
1
1
|
require 'set'
|
2
|
+
require_relative 'retries/error_inspector'
|
3
|
+
require_relative 'retries/retry_quota'
|
4
|
+
require_relative 'retries/client_rate_limiter'
|
5
|
+
require_relative 'retries/clock_skew'
|
2
6
|
|
3
7
|
module Aws
|
4
8
|
module Plugins
|
5
9
|
# @api private
|
6
10
|
class RetryErrors < Seahorse::Client::Plugin
|
7
|
-
|
8
|
-
EQUAL_JITTER =
|
9
|
-
FULL_JITTER=
|
10
|
-
NO_JITTER =
|
11
|
+
# BEGIN LEGACY OPTIONS
|
12
|
+
EQUAL_JITTER = ->(delay) { (delay / 2) + Kernel.rand(0..(delay / 2)) }
|
13
|
+
FULL_JITTER = ->(delay) { Kernel.rand(0..delay) }
|
14
|
+
NO_JITTER = ->(delay) { delay }
|
11
15
|
|
12
16
|
JITTERS = {
|
13
17
|
none: NO_JITTER,
|
@@ -15,162 +19,346 @@ module Aws
|
|
15
19
|
full: FULL_JITTER
|
16
20
|
}
|
17
21
|
|
18
|
-
JITTERS.default_proc = lambda { |h,k|
|
19
|
-
raise KeyError,
|
22
|
+
JITTERS.default_proc = lambda { |h, k|
|
23
|
+
raise KeyError,
|
24
|
+
"#{k} is not a named jitter function. Must be one of #{h.keys}"
|
20
25
|
}
|
21
26
|
|
22
27
|
DEFAULT_BACKOFF = lambda do |c|
|
23
|
-
delay = 2
|
24
|
-
|
28
|
+
delay = 2**c.retries * c.config.retry_base_delay
|
29
|
+
if (c.config.retry_max_delay || 0) > 0
|
30
|
+
delay = [delay, c.config.retry_max_delay].min
|
31
|
+
end
|
25
32
|
jitter = c.config.retry_jitter
|
26
|
-
jitter = JITTERS[jitter] if Symbol
|
33
|
+
jitter = JITTERS[jitter] if jitter.is_a?(Symbol)
|
27
34
|
delay = jitter.call(delay) if jitter
|
28
35
|
Kernel.sleep(delay)
|
29
36
|
end
|
30
37
|
|
31
|
-
option(
|
38
|
+
option(
|
39
|
+
:retry_limit,
|
32
40
|
default: 3,
|
33
41
|
doc_type: Integer,
|
34
42
|
docstring: <<-DOCS)
|
35
43
|
The maximum number of times to retry failed requests. Only
|
36
44
|
~ 500 level server errors and certain ~ 400 level client errors
|
37
45
|
are retried. Generally, these are throttling errors, data
|
38
|
-
checksum errors, networking errors, timeout errors
|
39
|
-
errors from expired credentials.
|
46
|
+
checksum errors, networking errors, timeout errors, auth errors,
|
47
|
+
endpoint discovery, and errors from expired credentials.
|
48
|
+
This option is only used in the `legacy` retry mode.
|
40
49
|
DOCS
|
41
50
|
|
42
|
-
option(
|
51
|
+
option(
|
52
|
+
:retry_max_delay,
|
43
53
|
default: 0,
|
44
54
|
doc_type: Integer,
|
45
55
|
docstring: <<-DOCS)
|
46
|
-
The maximum number of seconds to delay between retries (0 for no limit)
|
56
|
+
The maximum number of seconds to delay between retries (0 for no limit)
|
57
|
+
used by the default backoff function. This option is only used in the
|
58
|
+
`legacy` retry mode.
|
47
59
|
DOCS
|
48
60
|
|
49
|
-
option(
|
61
|
+
option(
|
62
|
+
:retry_base_delay,
|
50
63
|
default: 0.3,
|
51
64
|
doc_type: Float,
|
52
65
|
docstring: <<-DOCS)
|
53
|
-
The base delay in seconds used by the default backoff function.
|
66
|
+
The base delay in seconds used by the default backoff function. This option
|
67
|
+
is only used in the `legacy` retry mode.
|
54
68
|
DOCS
|
55
69
|
|
56
|
-
option(
|
70
|
+
option(
|
71
|
+
:retry_jitter,
|
57
72
|
default: :none,
|
58
73
|
doc_type: Symbol,
|
59
74
|
docstring: <<-DOCS)
|
60
|
-
A delay randomiser function used by the default backoff function.
|
75
|
+
A delay randomiser function used by the default backoff function.
|
76
|
+
Some predefined functions can be referenced by name - :none, :equal, :full,
|
77
|
+
otherwise a Proc that takes and returns a number. This option is only used
|
78
|
+
in the `legacy` retry mode.
|
61
79
|
|
62
80
|
@see https://www.awsarchitectureblog.com/2015/03/backoff.html
|
63
81
|
DOCS
|
64
82
|
|
65
|
-
option(
|
83
|
+
option(
|
84
|
+
:retry_backoff,
|
85
|
+
default: DEFAULT_BACKOFF,
|
86
|
+
doc_type: Proc,
|
87
|
+
docstring: <<-DOCS)
|
88
|
+
A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
|
89
|
+
This option is only used in the `legacy` retry mode.
|
90
|
+
DOCS
|
66
91
|
|
67
|
-
#
|
68
|
-
class ErrorInspector
|
92
|
+
# END LEGACY OPTIONS
|
69
93
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
94
|
+
option(
|
95
|
+
:retry_mode,
|
96
|
+
default: 'legacy',
|
97
|
+
doc_type: String,
|
98
|
+
docstring: <<-DOCS) do |cfg|
|
99
|
+
Specifies which retry algorithm to use. Values are:
|
76
100
|
|
77
|
-
|
78
|
-
|
79
|
-
'ThrottlingException', # json services
|
80
|
-
'RequestThrottled', # sqs
|
81
|
-
'ProvisionedThroughputExceededException', # dynamodb
|
82
|
-
'TransactionInProgressException', # dynamodb
|
83
|
-
'RequestLimitExceeded', # ec2
|
84
|
-
'BandwidthLimitExceeded', # cloud search
|
85
|
-
'LimitExceededException', # kinesis
|
86
|
-
'TooManyRequestsException', # batch
|
87
|
-
])
|
101
|
+
* `legacy` - The pre-existing retry behavior. This is default value if
|
102
|
+
no retry mode is provided.
|
88
103
|
|
89
|
-
|
90
|
-
|
91
|
-
|
104
|
+
* `standard` - A standardized set of retry rules across the AWS SDKs.
|
105
|
+
This includes support for retry quotas, which limit the number of
|
106
|
+
unsuccessful retries a client can make.
|
92
107
|
|
93
|
-
|
94
|
-
|
95
|
-
|
108
|
+
* `adaptive` - An experimental retry mode that includes all the
|
109
|
+
functionality of `standard` mode along with automatic client side
|
110
|
+
throttling. This is a provisional mode that may change behavior
|
111
|
+
in the future.
|
96
112
|
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
113
|
+
DOCS
|
114
|
+
resolve_retry_mode(cfg)
|
115
|
+
end
|
116
|
+
|
117
|
+
option(
|
118
|
+
:max_attempts,
|
119
|
+
default: 3,
|
120
|
+
doc_type: Integer,
|
121
|
+
docstring: <<-DOCS) do |cfg|
|
122
|
+
An integer representing the maximum number attempts that will be made for
|
123
|
+
a single request, including the initial attempt. For example,
|
124
|
+
setting this value to 5 will result in a request being retried up to
|
125
|
+
4 times. Used in `standard` and `adaptive` retry modes.
|
126
|
+
DOCS
|
127
|
+
resolve_max_attempts(cfg)
|
128
|
+
end
|
129
|
+
|
130
|
+
option(
|
131
|
+
:adaptive_retry_wait_to_fill,
|
132
|
+
default: true,
|
133
|
+
doc_type: 'Boolean',
|
134
|
+
docstring: <<-DOCS) do |cfg|
|
135
|
+
Used only in `adaptive` retry mode. When true, the request will sleep
|
136
|
+
until there is sufficent client side capacity to retry the request.
|
137
|
+
When false, the request will raise a `RetryCapacityNotAvailableError` and will
|
138
|
+
not retry instead of sleeping.
|
139
|
+
DOCS
|
140
|
+
resolve_adaptive_retry_wait_to_fill(cfg)
|
141
|
+
end
|
142
|
+
|
143
|
+
option(
|
144
|
+
:correct_clock_skew,
|
145
|
+
default: true,
|
146
|
+
doc_type: 'Boolean',
|
147
|
+
docstring: <<-DOCS) do |cfg|
|
148
|
+
Used only in `standard` and adaptive retry modes. Specifies whether to apply
|
149
|
+
a clock skew correction and retry requests with skewed client clocks.
|
150
|
+
DOCS
|
151
|
+
resolve_correct_clock_skew(cfg)
|
152
|
+
end
|
153
|
+
|
154
|
+
# @api private undocumented
|
155
|
+
option(:client_rate_limiter) { Retries::ClientRateLimiter.new }
|
102
156
|
|
103
|
-
|
104
|
-
|
157
|
+
# @api private undocumented
|
158
|
+
option(:retry_quota) { Retries::RetryQuota.new }
|
159
|
+
|
160
|
+
# @api private undocumented
|
161
|
+
option(:clock_skew) { Retries::ClockSkew.new }
|
162
|
+
|
163
|
+
def self.resolve_retry_mode(cfg)
|
164
|
+
value = ENV['AWS_RETRY_MODE'] ||
|
165
|
+
Aws.shared_config.retry_mode(profile: cfg.profile) ||
|
166
|
+
'legacy'
|
167
|
+
# Raise if provided value is not one of the retry modes
|
168
|
+
if value != 'legacy' && value != 'standard' && value != 'adaptive'
|
169
|
+
raise ArgumentError,
|
170
|
+
'Must provide either `legacy`, `standard`, or `adaptive` for '\
|
171
|
+
'retry_mode profile option or for ENV[\'AWS_RETRY_MODE\']'
|
105
172
|
end
|
173
|
+
value
|
174
|
+
end
|
106
175
|
|
107
|
-
|
108
|
-
|
176
|
+
def self.resolve_max_attempts(cfg)
|
177
|
+
value = ENV['AWS_MAX_ATTEMPTS'] ||
|
178
|
+
Aws.shared_config.max_attempts(profile: cfg.profile) ||
|
179
|
+
3
|
180
|
+
# Raise if provided value is not a positive integer
|
181
|
+
if !value.is_a?(Integer) || value <= 0
|
182
|
+
raise ArgumentError,
|
183
|
+
'Must provide a positive integer for max_attempts profile '\
|
184
|
+
'option or for ENV[\'AWS_MAX_ATTEMPTS\']'
|
109
185
|
end
|
186
|
+
value
|
187
|
+
end
|
110
188
|
|
111
|
-
|
112
|
-
|
189
|
+
def self.resolve_adaptive_retry_wait_to_fill(cfg)
|
190
|
+
value = ENV['AWS_ADAPTIVE_RETRY_WAIT_TO_FILL'] ||
|
191
|
+
Aws.shared_config.adaptive_retry_wait_to_fill(profile: cfg.profile) ||
|
192
|
+
'true'
|
193
|
+
|
194
|
+
# Raise if provided value is not true or false
|
195
|
+
if value != 'true' && value != 'false'
|
196
|
+
raise ArgumentError,
|
197
|
+
'Must provide either `true` or `false` for '\
|
198
|
+
'adaptive_retry_wait_to_fill profile option or for '\
|
199
|
+
'ENV[\'AWS_ADAPTIVE_RETRY_WAIT_TO_FILL\']'
|
113
200
|
end
|
114
201
|
|
115
|
-
|
116
|
-
|
117
|
-
|
202
|
+
value == 'true'
|
203
|
+
end
|
204
|
+
|
205
|
+
def self.resolve_correct_clock_skew(cfg)
|
206
|
+
value = ENV['AWS_CORRECT_CLOCK_SKEW'] ||
|
207
|
+
Aws.shared_config.correct_clock_skew(profile: cfg.profile) ||
|
208
|
+
'true'
|
209
|
+
|
210
|
+
# Raise if provided value is not true or false
|
211
|
+
if value != 'true' && value != 'false'
|
212
|
+
raise ArgumentError,
|
213
|
+
'Must provide either `true` or `false` for '\
|
214
|
+
'correct_clock_skew profile option or for '\
|
215
|
+
'ENV[\'AWS_CORRECT_CLOCK_SKEW\']'
|
118
216
|
end
|
119
217
|
|
120
|
-
|
121
|
-
|
218
|
+
value == 'true'
|
219
|
+
end
|
220
|
+
|
221
|
+
class Handler < Seahorse::Client::Handler
|
222
|
+
# Max backoff (in seconds)
|
223
|
+
MAX_BACKOFF = 20
|
224
|
+
|
225
|
+
def call(context)
|
226
|
+
context.metadata[:retries] ||= {}
|
227
|
+
config = context.config
|
228
|
+
|
229
|
+
get_send_token(config)
|
230
|
+
add_retry_headers(context)
|
231
|
+
response = @handler.call(context)
|
232
|
+
error_inspector = Retries::ErrorInspector.new(
|
233
|
+
response.error, response.context.http_response.status_code
|
234
|
+
)
|
235
|
+
|
236
|
+
request_bookkeeping(context, response, error_inspector)
|
237
|
+
|
238
|
+
if error_inspector.endpoint_discovery?(context)
|
239
|
+
key = config.endpoint_cache.extract_key(context)
|
240
|
+
config.endpoint_cache.delete(key)
|
241
|
+
end
|
242
|
+
|
243
|
+
# Clock correction needs to be updated from the response even when
|
244
|
+
# the request is not retryable but should only be updated
|
245
|
+
# in the case of clock skew errors
|
246
|
+
if error_inspector.clock_skew?(context)
|
247
|
+
config.clock_skew.update_clock_correction(context)
|
248
|
+
end
|
249
|
+
|
250
|
+
# Estimated skew needs to be updated on every request
|
251
|
+
config.clock_skew.update_estimated_skew(context)
|
252
|
+
|
253
|
+
return response unless retryable?(context, response, error_inspector)
|
254
|
+
|
255
|
+
return response if context.retries >= config.max_attempts - 1
|
256
|
+
|
257
|
+
context.metadata[:retries][:capacity_amount] =
|
258
|
+
config.retry_quota.checkout_capacity(error_inspector)
|
259
|
+
return response unless context.metadata[:retries][:capacity_amount] > 0
|
260
|
+
|
261
|
+
delay = exponential_backoff(context.retries)
|
262
|
+
Kernel.sleep(delay)
|
263
|
+
retry_request(context, error_inspector)
|
122
264
|
end
|
123
265
|
|
124
|
-
|
125
|
-
return false unless context.operation.endpoint_discovery
|
266
|
+
private
|
126
267
|
|
127
|
-
|
128
|
-
|
129
|
-
|
268
|
+
def get_send_token(config)
|
269
|
+
# either fail fast or block until a token becomes available
|
270
|
+
# must be configurable
|
271
|
+
# need a maximum rate at which we can send requests (max_send_rate)
|
272
|
+
# is unset until a throttle is seen
|
273
|
+
if config.retry_mode == 'adaptive'
|
274
|
+
config.client_rate_limiter.token_bucket_acquire(
|
275
|
+
1,
|
276
|
+
config.adaptive_retry_wait_to_fill
|
277
|
+
)
|
130
278
|
end
|
279
|
+
end
|
131
280
|
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
281
|
+
# maxsendrate is updated if on adaptive mode and based on response
|
282
|
+
# retry quota is updated if the request is successful (both modes)
|
283
|
+
def request_bookkeeping(context, response, error_inspector)
|
284
|
+
config = context.config
|
285
|
+
if response.successful?
|
286
|
+
config.retry_quota.release(
|
287
|
+
context.metadata[:retries][:capacity_amount]
|
288
|
+
)
|
289
|
+
end
|
290
|
+
|
291
|
+
if config.retry_mode == 'adaptive'
|
292
|
+
is_throttling_error = error_inspector.throttling_error?
|
293
|
+
config.client_rate_limiter.update_sending_rate(is_throttling_error)
|
140
294
|
end
|
141
295
|
end
|
142
|
-
|
143
|
-
def retryable?(context)
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
server? or
|
149
|
-
endpoint_discovery?(context)
|
296
|
+
|
297
|
+
def retryable?(context, response, error_inspector)
|
298
|
+
return false if response.successful?
|
299
|
+
|
300
|
+
error_inspector.retryable?(context) &&
|
301
|
+
context.http_response.body.respond_to?(:truncate)
|
150
302
|
end
|
151
303
|
|
152
|
-
|
304
|
+
def exponential_backoff(retries)
|
305
|
+
# for a transient error, use backoff
|
306
|
+
[Kernel.rand * 2**retries, MAX_BACKOFF].min
|
307
|
+
end
|
153
308
|
|
154
|
-
def
|
155
|
-
context.
|
309
|
+
def retry_request(context, error)
|
310
|
+
context.retries += 1
|
311
|
+
context.config.credentials.refresh! if error.expired_credentials?
|
312
|
+
context.http_request.body.rewind
|
313
|
+
context.http_response.reset
|
314
|
+
call(context)
|
156
315
|
end
|
157
316
|
|
158
|
-
def
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
317
|
+
def add_retry_headers(context)
|
318
|
+
request_pairs = {
|
319
|
+
'attempt' => context.retries,
|
320
|
+
'max' => context.config.max_attempts
|
321
|
+
}
|
322
|
+
if (ttl = compute_request_ttl(context))
|
323
|
+
request_pairs['ttl'] = ttl
|
163
324
|
end
|
325
|
+
|
326
|
+
# create the request header
|
327
|
+
formatted_header = request_pairs.map { |k, v| "#{k}=#{v}" }.join('; ')
|
328
|
+
context.http_request.headers['amz-sdk-request'] = formatted_header
|
164
329
|
end
|
165
330
|
|
331
|
+
def compute_request_ttl(context)
|
332
|
+
return if context.operation.async
|
333
|
+
|
334
|
+
endpoint = context.http_request.endpoint
|
335
|
+
estimated_skew = context.config.clock_skew.estimated_skew(endpoint)
|
336
|
+
if context.config.respond_to?(:http_read_timeout)
|
337
|
+
read_timeout = context.config.http_read_timeout
|
338
|
+
end
|
339
|
+
|
340
|
+
if estimated_skew && read_timeout
|
341
|
+
(Time.now.utc + read_timeout + estimated_skew)
|
342
|
+
.strftime('%Y%m%dT%H%M%SZ')
|
343
|
+
end
|
344
|
+
end
|
166
345
|
end
|
167
346
|
|
168
|
-
class
|
347
|
+
class LegacyHandler < Seahorse::Client::Handler
|
169
348
|
|
170
349
|
def call(context)
|
171
350
|
response = @handler.call(context)
|
172
351
|
if response.error
|
173
|
-
|
352
|
+
error_inspector = Retries::ErrorInspector.new(
|
353
|
+
response.error, response.context.http_response.status_code
|
354
|
+
)
|
355
|
+
|
356
|
+
if error_inspector.endpoint_discovery?(context)
|
357
|
+
key = context.config.endpoint_cache.extract_key(context)
|
358
|
+
context.config.endpoint_cache.delete(key)
|
359
|
+
end
|
360
|
+
|
361
|
+
retry_if_possible(response, error_inspector)
|
174
362
|
else
|
175
363
|
response
|
176
364
|
end
|
@@ -178,21 +366,15 @@ A delay randomiser function used by the default backoff function. Some predefine
|
|
178
366
|
|
179
367
|
private
|
180
368
|
|
181
|
-
def retry_if_possible(response)
|
369
|
+
def retry_if_possible(response, error_inspector)
|
182
370
|
context = response.context
|
183
|
-
|
184
|
-
|
185
|
-
retry_request(context, error)
|
371
|
+
if should_retry?(context, error_inspector)
|
372
|
+
retry_request(context, error_inspector)
|
186
373
|
else
|
187
374
|
response
|
188
375
|
end
|
189
376
|
end
|
190
377
|
|
191
|
-
def error_for(response)
|
192
|
-
status_code = response.context.http_response.status_code
|
193
|
-
ErrorInspector.new(response.error, status_code)
|
194
|
-
end
|
195
|
-
|
196
378
|
def retry_request(context, error)
|
197
379
|
delay_retry(context)
|
198
380
|
context.retries += 1
|
@@ -207,9 +389,9 @@ A delay randomiser function used by the default backoff function. Some predefine
|
|
207
389
|
end
|
208
390
|
|
209
391
|
def should_retry?(context, error)
|
210
|
-
error.retryable?(context)
|
211
|
-
|
212
|
-
|
392
|
+
error.retryable?(context) &&
|
393
|
+
context.retries < retry_limit(context) &&
|
394
|
+
response_truncatable?(context)
|
213
395
|
end
|
214
396
|
|
215
397
|
def retry_limit(context)
|
@@ -219,15 +401,17 @@ A delay randomiser function used by the default backoff function. Some predefine
|
|
219
401
|
def response_truncatable?(context)
|
220
402
|
context.http_response.body.respond_to?(:truncate)
|
221
403
|
end
|
222
|
-
|
223
404
|
end
|
224
405
|
|
225
406
|
def add_handlers(handlers, config)
|
226
|
-
if config.
|
407
|
+
if config.retry_mode == 'legacy'
|
408
|
+
if config.retry_limit > 0
|
409
|
+
handlers.add(LegacyHandler, step: :sign, priority: 99)
|
410
|
+
end
|
411
|
+
else
|
227
412
|
handlers.add(Handler, step: :sign, priority: 99)
|
228
413
|
end
|
229
414
|
end
|
230
|
-
|
231
415
|
end
|
232
416
|
end
|
233
417
|
end
|