aws-sdk-http-async 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +7 -0
- data/LICENSE.txt +202 -0
- data/README.md +382 -0
- data/VERSION +1 -0
- data/exe/async-rake +10 -0
- data/lib/async/aws/auto.rb +1 -0
- data/lib/async/aws/client_cache.rb +527 -0
- data/lib/async/aws/errors.rb +6 -0
- data/lib/async/aws/handler.rb +705 -0
- data/lib/async/aws/http_plugin.rb +36 -0
- data/lib/async/aws/patcher.rb +167 -0
- data/lib/async/aws/rake_patch.rb +13 -0
- data/lib/async/aws/version.rb +11 -0
- data/lib/aws-sdk-http-async/core.rb +10 -0
- data/lib/aws-sdk-http-async/rake.rb +6 -0
- data/lib/aws-sdk-http-async.rb +3 -0
- metadata +254 -0
|
@@ -0,0 +1,705 @@
|
|
|
1
|
+
require 'async'
|
|
2
|
+
require 'async/http'
|
|
3
|
+
require 'async/aws/errors'
|
|
4
|
+
require 'openssl'
|
|
5
|
+
require 'protocol/http'
|
|
6
|
+
require 'protocol/http/body/buffered'
|
|
7
|
+
begin
|
|
8
|
+
require 'protocol/http2'
|
|
9
|
+
rescue LoadError
|
|
10
|
+
end
|
|
11
|
+
require 'seahorse/client/networking_error'
|
|
12
|
+
|
|
13
|
+
module Async
|
|
14
|
+
module Aws
|
|
15
|
+
class Handler < Seahorse::Client::Handler
|
|
16
|
+
@transport_mutex = Mutex.new
|
|
17
|
+
@transport_logged = {}
|
|
18
|
+
|
|
19
|
+
class << self
|
|
20
|
+
# @param kind [Symbol]
|
|
21
|
+
# @param logger [Logger, nil]
|
|
22
|
+
# @return [void]
|
|
23
|
+
def log_transport_once(kind, logger)
|
|
24
|
+
return unless kind == :async_http
|
|
25
|
+
|
|
26
|
+
@transport_mutex.synchronize do
|
|
27
|
+
return if @transport_logged[kind]
|
|
28
|
+
|
|
29
|
+
@transport_logged[kind] = true
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
logger&.info('[aws-sdk-http-async] using async-http transport')
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
class StreamingBody < Protocol::HTTP::Body::Readable
|
|
37
|
+
CHUNK_SIZE = 16 * 1024
|
|
38
|
+
|
|
39
|
+
# @param io [#read, #rewind, #size, nil]
|
|
40
|
+
# @param size [Integer, nil]
|
|
41
|
+
# @param max_buffer [Integer, nil]
|
|
42
|
+
# @return [void]
|
|
43
|
+
def initialize(io, size: nil, max_buffer: nil)
|
|
44
|
+
@io = io
|
|
45
|
+
@size = size
|
|
46
|
+
@max_buffer = max_buffer
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# @return [Integer, nil]
|
|
50
|
+
def length
|
|
51
|
+
@size
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# @return [Boolean]
|
|
55
|
+
def rewindable?
|
|
56
|
+
@io.respond_to?(:rewind)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# @return [Boolean]
|
|
60
|
+
def rewind
|
|
61
|
+
return false unless rewindable?
|
|
62
|
+
|
|
63
|
+
@io.rewind
|
|
64
|
+
true
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
# @return [String, nil]
|
|
68
|
+
def read
|
|
69
|
+
chunk = read_chunk
|
|
70
|
+
return nil if chunk.nil? || chunk.empty?
|
|
71
|
+
|
|
72
|
+
chunk
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# @param error [Exception, nil]
|
|
76
|
+
# @return [void]
|
|
77
|
+
def close(error = nil)
|
|
78
|
+
nil
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
# @return [Protocol::HTTP::Body::Readable, nil]
|
|
82
|
+
def buffered
|
|
83
|
+
return nil unless rewindable?
|
|
84
|
+
|
|
85
|
+
original_pos = @io.pos if @io.respond_to?(:pos)
|
|
86
|
+
@io.rewind
|
|
87
|
+
content = +''
|
|
88
|
+
begin
|
|
89
|
+
loop do
|
|
90
|
+
chunk = read_chunk
|
|
91
|
+
break if chunk.nil? || chunk.empty?
|
|
92
|
+
|
|
93
|
+
if @max_buffer && (content.bytesize + chunk.bytesize) > @max_buffer
|
|
94
|
+
raise BodyTooLargeError, "async_http_max_buffer_bytes exceeded (#{content.bytesize + chunk.bytesize} > #{@max_buffer})"
|
|
95
|
+
end
|
|
96
|
+
content << chunk
|
|
97
|
+
end
|
|
98
|
+
ensure
|
|
99
|
+
if original_pos && @io.respond_to?(:pos=)
|
|
100
|
+
@io.pos = original_pos
|
|
101
|
+
else
|
|
102
|
+
@io.rewind
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
Protocol::HTTP::Body::Buffered.new([content], content.bytesize)
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
private
|
|
109
|
+
|
|
110
|
+
def read_chunk
|
|
111
|
+
@io.read(CHUNK_SIZE)
|
|
112
|
+
rescue ArgumentError
|
|
113
|
+
if @io.respond_to?(:readpartial)
|
|
114
|
+
begin
|
|
115
|
+
@io.readpartial(CHUNK_SIZE)
|
|
116
|
+
rescue EOFError
|
|
117
|
+
nil
|
|
118
|
+
end
|
|
119
|
+
elsif @max_buffer && @max_buffer > 0
|
|
120
|
+
raise BodyTooLargeError, 'body does not support chunked reads; cannot enforce async_http_max_buffer_bytes'
|
|
121
|
+
else
|
|
122
|
+
@io.read
|
|
123
|
+
end
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
NETWORK_ERRORS = [
|
|
128
|
+
Async::TimeoutError,
|
|
129
|
+
SocketError,
|
|
130
|
+
EOFError,
|
|
131
|
+
IOError,
|
|
132
|
+
Errno::ECONNREFUSED,
|
|
133
|
+
Errno::ECONNRESET,
|
|
134
|
+
Errno::EPIPE,
|
|
135
|
+
Errno::ETIMEDOUT,
|
|
136
|
+
Errno::EADDRNOTAVAIL,
|
|
137
|
+
Errno::ENETDOWN,
|
|
138
|
+
Errno::ENOBUFS,
|
|
139
|
+
Errno::EHOSTUNREACH,
|
|
140
|
+
Errno::ENETUNREACH,
|
|
141
|
+
Errno::ENOTCONN,
|
|
142
|
+
OpenSSL::SSL::SSLError,
|
|
143
|
+
Protocol::HTTP::Error,
|
|
144
|
+
].tap do |errors|
|
|
145
|
+
if defined?(Async::HTTP::ConnectionError)
|
|
146
|
+
errors << Async::HTTP::ConnectionError
|
|
147
|
+
end
|
|
148
|
+
if defined?(Protocol::HTTP2::Error)
|
|
149
|
+
errors << Protocol::HTTP2::Error
|
|
150
|
+
end
|
|
151
|
+
if defined?(Protocol::HTTP2)
|
|
152
|
+
errors << Protocol::HTTP2::GoawayError if Protocol::HTTP2.const_defined?(:GoawayError)
|
|
153
|
+
errors << Protocol::HTTP2::StreamError if Protocol::HTTP2.const_defined?(:StreamError)
|
|
154
|
+
end
|
|
155
|
+
end.freeze
|
|
156
|
+
|
|
157
|
+
DNS_ERROR_PATTERNS = [
|
|
158
|
+
/getaddrinfo/i,
|
|
159
|
+
/nodename nor servname/i,
|
|
160
|
+
/name or service not known/i,
|
|
161
|
+
/host not found/i,
|
|
162
|
+
/temporary failure in name resolution/i,
|
|
163
|
+
].freeze
|
|
164
|
+
|
|
165
|
+
# @param handler [Seahorse::Client::Handler, nil]
|
|
166
|
+
# @param client_cache [Async::Aws::ClientCache, nil]
|
|
167
|
+
# @return [void]
|
|
168
|
+
def initialize(handler = nil, client_cache: nil)
|
|
169
|
+
super(handler)
|
|
170
|
+
@client_cache = client_cache || ClientCache.new
|
|
171
|
+
@fallback_mutex = Mutex.new
|
|
172
|
+
@invalid_fallback_warned = {}
|
|
173
|
+
@invalid_streaming_warned = {}
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
# @param context [Seahorse::Client::RequestContext]
|
|
177
|
+
# @return [Seahorse::Client::Response]
|
|
178
|
+
def call(context)
|
|
179
|
+
if event_stream_operation?(context)
|
|
180
|
+
ensure_reactor!
|
|
181
|
+
return delegate_event_stream(context)
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
return force_net_http(context) if force_fallback?
|
|
185
|
+
return fallback_handler(context) unless async_context?
|
|
186
|
+
|
|
187
|
+
call_with_reactor(context)
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
private
|
|
191
|
+
|
|
192
|
+
class TruncatedBodyError < IOError
|
|
193
|
+
def initialize(bytes_expected, bytes_received)
|
|
194
|
+
msg = "http response body truncated, expected #{bytes_expected} bytes, received #{bytes_received} bytes"
|
|
195
|
+
super(msg)
|
|
196
|
+
end
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
def ensure_reactor!
|
|
200
|
+
return if Async::Task.current?
|
|
201
|
+
|
|
202
|
+
raise NoReactorError, 'Async reactor is required. Wrap calls in Sync { }.'
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
def async_context?
|
|
206
|
+
Async::Task.current?
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
def force_fallback?
|
|
210
|
+
value = ENV.fetch('AWS_SDK_HTTP_ASYNC_FORCE_NET_HTTP', nil)
|
|
211
|
+
return false if value.nil?
|
|
212
|
+
|
|
213
|
+
%w[1 true yes].include?(value.to_s.strip.downcase)
|
|
214
|
+
end
|
|
215
|
+
|
|
216
|
+
def call_with_reactor(context)
|
|
217
|
+
log_transport_once(:async_http, context.config)
|
|
218
|
+
span_wrapper(context) do
|
|
219
|
+
transmit(context.config, context.http_request, context.http_response)
|
|
220
|
+
end
|
|
221
|
+
|
|
222
|
+
Seahorse::Client::Response.new(context:)
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
def fallback_handler(context)
|
|
226
|
+
mode = fallback_mode(context.config)
|
|
227
|
+
|
|
228
|
+
case mode
|
|
229
|
+
when :raise
|
|
230
|
+
raise NoReactorError, 'Async reactor is required. Wrap calls in Sync { }.'
|
|
231
|
+
when :sync
|
|
232
|
+
Sync { call_with_reactor(context) }
|
|
233
|
+
else
|
|
234
|
+
return @handler.call(context) if @handler
|
|
235
|
+
|
|
236
|
+
net_http_handler.call(context)
|
|
237
|
+
end
|
|
238
|
+
end
|
|
239
|
+
|
|
240
|
+
def force_net_http(context)
|
|
241
|
+
return @handler.call(context) if @handler
|
|
242
|
+
|
|
243
|
+
net_http_handler.call(context)
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
def fallback_mode(config)
|
|
247
|
+
env_mode = ENV.fetch('AWS_SDK_HTTP_ASYNC_FALLBACK', nil)
|
|
248
|
+
if env_mode && !env_mode.to_s.strip.empty?
|
|
249
|
+
normalized_env = env_mode.to_s.strip.downcase
|
|
250
|
+
return normalized_env.to_sym if %w[net_http sync raise].include?(normalized_env)
|
|
251
|
+
warn_invalid_fallback_once("ENV['AWS_SDK_HTTP_ASYNC_FALLBACK']", env_mode, config)
|
|
252
|
+
end
|
|
253
|
+
|
|
254
|
+
mode = config.async_http_fallback
|
|
255
|
+
return :net_http if mode.nil?
|
|
256
|
+
|
|
257
|
+
normalized = mode.to_s
|
|
258
|
+
normalized = normalized.strip.downcase
|
|
259
|
+
return normalized.to_sym if %w[net_http sync raise].include?(normalized)
|
|
260
|
+
warn_invalid_fallback_once('config.async_http_fallback', mode, config)
|
|
261
|
+
|
|
262
|
+
:net_http
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
def warn_invalid_fallback_once(source, value, config)
|
|
266
|
+
key = "#{source}:#{value.inspect}"
|
|
267
|
+
@fallback_mutex.synchronize do
|
|
268
|
+
return if @invalid_fallback_warned[key]
|
|
269
|
+
|
|
270
|
+
@invalid_fallback_warned[key] = true
|
|
271
|
+
end
|
|
272
|
+
logger_for(config)&.warn(
|
|
273
|
+
"[aws-sdk-http-async] invalid async_http_fallback #{source}=#{value.inspect}; using :net_http"
|
|
274
|
+
)
|
|
275
|
+
end
|
|
276
|
+
|
|
277
|
+
def log_transport_once(kind, config)
|
|
278
|
+
self.class.log_transport_once(kind, logger_for(config))
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
def streaming_mode(config)
|
|
282
|
+
mode = config.async_http_streaming_uploads
|
|
283
|
+
return :auto if mode.nil?
|
|
284
|
+
|
|
285
|
+
normalized = mode.to_s.strip.downcase
|
|
286
|
+
return normalized.to_sym if %w[auto force off].include?(normalized)
|
|
287
|
+
|
|
288
|
+
warn_invalid_streaming_once(mode, config)
|
|
289
|
+
:auto
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
def warn_invalid_streaming_once(value, config)
|
|
293
|
+
key = value.inspect
|
|
294
|
+
@fallback_mutex.synchronize do
|
|
295
|
+
return if @invalid_streaming_warned[key]
|
|
296
|
+
|
|
297
|
+
@invalid_streaming_warned[key] = true
|
|
298
|
+
end
|
|
299
|
+
logger_for(config)&.warn(
|
|
300
|
+
"[aws-sdk-http-async] invalid async_http_streaming_uploads=#{value.inspect}; using :auto"
|
|
301
|
+
)
|
|
302
|
+
end
|
|
303
|
+
|
|
304
|
+
def net_http_handler
|
|
305
|
+
return @net_http_handler if @net_http_handler
|
|
306
|
+
|
|
307
|
+
@fallback_mutex.synchronize do
|
|
308
|
+
return @net_http_handler if @net_http_handler
|
|
309
|
+
|
|
310
|
+
require 'seahorse/client/net_http/handler'
|
|
311
|
+
@net_http_handler = Seahorse::Client::NetHttp::Handler.new(nil)
|
|
312
|
+
end
|
|
313
|
+
end
|
|
314
|
+
|
|
315
|
+
def transmit(config, req, resp)
|
|
316
|
+
total_timeout = config.async_http_total_timeout
|
|
317
|
+
if total_timeout && total_timeout > 0
|
|
318
|
+
Async::Task.current.with_timeout(total_timeout, Async::TimeoutError) do
|
|
319
|
+
transmit_inner(config, req, resp)
|
|
320
|
+
end
|
|
321
|
+
else
|
|
322
|
+
transmit_inner(config, req, resp)
|
|
323
|
+
end
|
|
324
|
+
end
|
|
325
|
+
|
|
326
|
+
def transmit_inner(config, req, resp)
|
|
327
|
+
cache = config.async_http_client_cache || @client_cache
|
|
328
|
+
runner = ->(client) do
|
|
329
|
+
request = build_request(req, config)
|
|
330
|
+
response = nil
|
|
331
|
+
|
|
332
|
+
begin
|
|
333
|
+
response = call_with_timeout(client, request, config)
|
|
334
|
+
bytes_received = 0
|
|
335
|
+
|
|
336
|
+
headers = response_headers(response)
|
|
337
|
+
resp.signal_headers(response.status.to_i, headers)
|
|
338
|
+
|
|
339
|
+
loop do
|
|
340
|
+
chunk = read_with_timeout(response, config)
|
|
341
|
+
break if chunk.nil? || chunk.empty?
|
|
342
|
+
|
|
343
|
+
bytes_received += chunk.bytesize
|
|
344
|
+
resp.signal_data(chunk)
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
complete_response(req, resp, bytes_received, headers)
|
|
348
|
+
rescue Async::Stop
|
|
349
|
+
raise
|
|
350
|
+
rescue *NETWORK_ERRORS => error
|
|
351
|
+
resp.signal_error(networking_error(error, req))
|
|
352
|
+
rescue SystemCallError => error
|
|
353
|
+
resp.signal_error(networking_error(error, req))
|
|
354
|
+
rescue StandardError => error
|
|
355
|
+
logger_for(config)&.error("[aws-sdk-http-async] unexpected error: #{error.class}: #{error.message}")
|
|
356
|
+
resp.signal_error(error)
|
|
357
|
+
ensure
|
|
358
|
+
response&.close
|
|
359
|
+
end
|
|
360
|
+
end
|
|
361
|
+
|
|
362
|
+
if cache.respond_to?(:with_client)
|
|
363
|
+
cache.with_client(req.endpoint, config, &runner)
|
|
364
|
+
else
|
|
365
|
+
runner.call(cache.client_for(req.endpoint, config))
|
|
366
|
+
end
|
|
367
|
+
end
|
|
368
|
+
|
|
369
|
+
def build_request(http_request, config)
|
|
370
|
+
method = http_request.http_method.to_s.upcase
|
|
371
|
+
path = http_request.endpoint.request_uri
|
|
372
|
+
headers = normalize_headers(http_request.headers, config)
|
|
373
|
+
body = prepare_body(http_request.body, http_request.headers, config)
|
|
374
|
+
headers.delete('transfer-encoding') if body.is_a?(String)
|
|
375
|
+
|
|
376
|
+
Protocol::HTTP::Request[method, path, headers:, body:]
|
|
377
|
+
end
|
|
378
|
+
|
|
379
|
+
def normalize_headers(headers, config)
|
|
380
|
+
normalized = headers.to_h.transform_keys { it.to_s.downcase }
|
|
381
|
+
normalized.delete('host')
|
|
382
|
+
normalized.delete('content-length')
|
|
383
|
+
if config.async_http_force_accept_encoding && !normalized.key?('accept-encoding')
|
|
384
|
+
normalized['accept-encoding'] = ''
|
|
385
|
+
end
|
|
386
|
+
Protocol::HTTP::Headers[normalized]
|
|
387
|
+
end
|
|
388
|
+
|
|
389
|
+
def prepare_body(body, headers, config)
|
|
390
|
+
return nil if body.nil?
|
|
391
|
+
if body.is_a?(String)
|
|
392
|
+
enforce_max_buffer!(body.bytesize, config)
|
|
393
|
+
warn_large_body(body.bytesize, config)
|
|
394
|
+
return body
|
|
395
|
+
end
|
|
396
|
+
|
|
397
|
+
mode = streaming_mode(config)
|
|
398
|
+
size = body_size(body, headers)
|
|
399
|
+
rewindable = body.respond_to?(:rewind)
|
|
400
|
+
max_buffer = config.async_http_max_buffer_bytes
|
|
401
|
+
|
|
402
|
+
if mode == :auto
|
|
403
|
+
return StreamingBody.new(body, size:, max_buffer:) if size && rewindable
|
|
404
|
+
return buffer_body(body, config)
|
|
405
|
+
end
|
|
406
|
+
|
|
407
|
+
if mode == :force
|
|
408
|
+
ensure_streaming_retry_safe!(config, rewindable)
|
|
409
|
+
warn_unknown_stream_size(size, config)
|
|
410
|
+
return StreamingBody.new(body, size:, max_buffer:)
|
|
411
|
+
end
|
|
412
|
+
|
|
413
|
+
buffer_body(body, config)
|
|
414
|
+
end
|
|
415
|
+
|
|
416
|
+
def buffer_body(body, config)
|
|
417
|
+
return nil if body.nil?
|
|
418
|
+
if body.is_a?(String)
|
|
419
|
+
enforce_max_buffer!(body.bytesize, config)
|
|
420
|
+
warn_large_body(body.bytesize, config)
|
|
421
|
+
return body
|
|
422
|
+
end
|
|
423
|
+
original_pos = body.pos if body.respond_to?(:pos)
|
|
424
|
+
|
|
425
|
+
size_hint = if body.respond_to?(:length)
|
|
426
|
+
body.length
|
|
427
|
+
elsif body.respond_to?(:size)
|
|
428
|
+
body.size
|
|
429
|
+
end
|
|
430
|
+
enforce_max_buffer!(size_hint, config) if size_hint
|
|
431
|
+
|
|
432
|
+
content = if size_hint.nil?
|
|
433
|
+
buffer_unknown_size_body(body, config)
|
|
434
|
+
else
|
|
435
|
+
body.read || ''
|
|
436
|
+
end
|
|
437
|
+
body.rewind if body.respond_to?(:rewind)
|
|
438
|
+
enforce_max_buffer!(content.bytesize, config)
|
|
439
|
+
warn_large_body(content.bytesize, config)
|
|
440
|
+
content
|
|
441
|
+
rescue BodyTooLargeError
|
|
442
|
+
if body.respond_to?(:pos=) && !original_pos.nil?
|
|
443
|
+
body.pos = original_pos
|
|
444
|
+
elsif body.respond_to?(:rewind)
|
|
445
|
+
body.rewind
|
|
446
|
+
end
|
|
447
|
+
raise
|
|
448
|
+
rescue StandardError
|
|
449
|
+
if body.respond_to?(:pos=) && !original_pos.nil?
|
|
450
|
+
body.pos = original_pos
|
|
451
|
+
elsif body.respond_to?(:rewind)
|
|
452
|
+
body.rewind
|
|
453
|
+
end
|
|
454
|
+
raise
|
|
455
|
+
end
|
|
456
|
+
|
|
457
|
+
def buffer_unknown_size_body(body, config)
|
|
458
|
+
content = +''
|
|
459
|
+
loop do
|
|
460
|
+
chunk = begin
|
|
461
|
+
body.read(StreamingBody::CHUNK_SIZE)
|
|
462
|
+
rescue ArgumentError
|
|
463
|
+
if body.respond_to?(:readpartial)
|
|
464
|
+
begin
|
|
465
|
+
body.readpartial(StreamingBody::CHUNK_SIZE)
|
|
466
|
+
rescue EOFError
|
|
467
|
+
nil
|
|
468
|
+
end
|
|
469
|
+
elsif config.async_http_max_buffer_bytes && config.async_http_max_buffer_bytes > 0
|
|
470
|
+
raise BodyTooLargeError,
|
|
471
|
+
'body does not support chunked reads; cannot enforce async_http_max_buffer_bytes'
|
|
472
|
+
else
|
|
473
|
+
body.read
|
|
474
|
+
end
|
|
475
|
+
end
|
|
476
|
+
break if chunk.nil? || chunk.empty?
|
|
477
|
+
|
|
478
|
+
enforce_max_buffer!(content.bytesize + chunk.bytesize, config)
|
|
479
|
+
content << chunk
|
|
480
|
+
end
|
|
481
|
+
content
|
|
482
|
+
end
|
|
483
|
+
|
|
484
|
+
def body_size(body, headers)
|
|
485
|
+
content_length = headers['content-length']
|
|
486
|
+
if content_length && !content_length.to_s.empty?
|
|
487
|
+
return content_length.to_i if content_length.to_s.match?(/\A\d+\z/)
|
|
488
|
+
end
|
|
489
|
+
return body.bytesize if body.is_a?(String)
|
|
490
|
+
if body.respond_to?(:length)
|
|
491
|
+
length = body.length
|
|
492
|
+
return length unless length.nil?
|
|
493
|
+
end
|
|
494
|
+
if body.respond_to?(:size)
|
|
495
|
+
size = body.size
|
|
496
|
+
return size unless size.nil?
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
nil
|
|
500
|
+
end
|
|
501
|
+
|
|
502
|
+
def ensure_streaming_retry_safe!(config, rewindable)
|
|
503
|
+
return if rewindable
|
|
504
|
+
return unless retries_enabled?(config)
|
|
505
|
+
|
|
506
|
+
raise ArgumentError,
|
|
507
|
+
'Non-rewindable streaming bodies cannot be retried. ' \
|
|
508
|
+
'Use a rewindable body (File, StringIO) or disable retries, e.g. ' \
|
|
509
|
+
'Aws::S3::Client.new(retry_max_attempts: 1, async_http_streaming_uploads: :force).'
|
|
510
|
+
end
|
|
511
|
+
|
|
512
|
+
def retries_enabled?(config)
|
|
513
|
+
if config.respond_to?(:max_attempts) && !config.max_attempts.nil?
|
|
514
|
+
return config.max_attempts.to_i > 1
|
|
515
|
+
end
|
|
516
|
+
|
|
517
|
+
if config.respond_to?(:retry_max_attempts) && !config.retry_max_attempts.nil?
|
|
518
|
+
return config.retry_max_attempts.to_i > 1
|
|
519
|
+
end
|
|
520
|
+
|
|
521
|
+
if config.respond_to?(:retry_limit) && !config.retry_limit.nil?
|
|
522
|
+
return config.retry_limit.to_i > 0
|
|
523
|
+
end
|
|
524
|
+
|
|
525
|
+
false
|
|
526
|
+
end
|
|
527
|
+
|
|
528
|
+
def warn_unknown_stream_size(size, config)
|
|
529
|
+
return unless size.nil?
|
|
530
|
+
|
|
531
|
+
logger_for(config)&.warn(
|
|
532
|
+
'[aws-sdk-http-async] streaming request body with unknown size'
|
|
533
|
+
)
|
|
534
|
+
end
|
|
535
|
+
|
|
536
|
+
def delegate_event_stream(context)
|
|
537
|
+
require 'seahorse/client/h2/handler'
|
|
538
|
+
|
|
539
|
+
if context.client.respond_to?(:connection)
|
|
540
|
+
@h2_handler ||= Seahorse::Client::H2::Handler.new(nil)
|
|
541
|
+
return @h2_handler.call(context)
|
|
542
|
+
end
|
|
543
|
+
|
|
544
|
+
return @handler.call(context) if @handler
|
|
545
|
+
|
|
546
|
+
raise ArgumentError, 'event stream operations require an Async client (Seahorse::Client::AsyncBase) or a native H2 handler'
|
|
547
|
+
end
|
|
548
|
+
|
|
549
|
+
def read_with_timeout(response, config)
|
|
550
|
+
timeout = config.http_read_timeout
|
|
551
|
+
body = response.body
|
|
552
|
+
return nil if body.nil?
|
|
553
|
+
return read_response_chunk(body) if timeout.nil? || timeout <= 0
|
|
554
|
+
|
|
555
|
+
Async::Task.current.with_timeout(timeout, Async::TimeoutError) { read_response_chunk(body) }
|
|
556
|
+
end
|
|
557
|
+
|
|
558
|
+
def read_response_chunk(body)
|
|
559
|
+
body.read(StreamingBody::CHUNK_SIZE)
|
|
560
|
+
rescue ArgumentError
|
|
561
|
+
if body.respond_to?(:readpartial)
|
|
562
|
+
begin
|
|
563
|
+
body.readpartial(StreamingBody::CHUNK_SIZE)
|
|
564
|
+
rescue EOFError
|
|
565
|
+
nil
|
|
566
|
+
end
|
|
567
|
+
else
|
|
568
|
+
body.read
|
|
569
|
+
end
|
|
570
|
+
end
|
|
571
|
+
|
|
572
|
+
def call_with_timeout(client, request, config)
|
|
573
|
+
header_timeout = config.async_http_header_timeout
|
|
574
|
+
if header_timeout && header_timeout > 0
|
|
575
|
+
return Async::Task.current.with_timeout(header_timeout, Async::TimeoutError) { client.call(request) }
|
|
576
|
+
end
|
|
577
|
+
|
|
578
|
+
timeout = config.http_read_timeout
|
|
579
|
+
return client.call(request) if timeout.nil? || timeout <= 0
|
|
580
|
+
|
|
581
|
+
body = request.body
|
|
582
|
+
return client.call(request) unless body.nil? || body.is_a?(String) || body.is_a?(Protocol::HTTP::Body::Buffered)
|
|
583
|
+
|
|
584
|
+
Async::Task.current.with_timeout(timeout, Async::TimeoutError) { client.call(request) }
|
|
585
|
+
end
|
|
586
|
+
|
|
587
|
+
def complete_response(req, resp, bytes_received, headers)
|
|
588
|
+
content_length = headers['content-length']&.to_i
|
|
589
|
+
content_encoding = headers['content-encoding']
|
|
590
|
+
if req.http_method != 'HEAD' && content_length && (content_encoding.nil? || content_encoding.empty?) &&
|
|
591
|
+
bytes_received != content_length
|
|
592
|
+
error = TruncatedBodyError.new(content_length, bytes_received)
|
|
593
|
+
resp.signal_error(Seahorse::Client::NetworkingError.new(error, error.message))
|
|
594
|
+
else
|
|
595
|
+
resp.signal_done
|
|
596
|
+
end
|
|
597
|
+
end
|
|
598
|
+
|
|
599
|
+
def networking_error(error, req)
|
|
600
|
+
message = error.message
|
|
601
|
+
if DNS_ERROR_PATTERNS.any? { |pattern| message.match?(pattern) }
|
|
602
|
+
message = "Unable to connect to `#{req.endpoint.host}`: #{message}"
|
|
603
|
+
end
|
|
604
|
+
|
|
605
|
+
Seahorse::Client::NetworkingError.new(error, message)
|
|
606
|
+
end
|
|
607
|
+
|
|
608
|
+
def span_wrapper(context)
|
|
609
|
+
context.tracer.in_span(
|
|
610
|
+
'Handler.AsyncHttp',
|
|
611
|
+
attributes: ::Aws::Telemetry.http_request_attrs(context),
|
|
612
|
+
) do |span|
|
|
613
|
+
yield
|
|
614
|
+
span.add_attributes(::Aws::Telemetry.http_response_attrs(context))
|
|
615
|
+
end
|
|
616
|
+
end
|
|
617
|
+
|
|
618
|
+
def response_headers(response)
|
|
619
|
+
headers = {}
|
|
620
|
+
set_cookies = []
|
|
621
|
+
set_cookie2 = []
|
|
622
|
+
|
|
623
|
+
# NOTE: HTTP allows duplicate header names, but Seahorse expects Hash<String, String>.
|
|
624
|
+
# Multiple values are joined with ", " (comma-space). This is correct for most headers.
|
|
625
|
+
# Set-Cookie/Set-Cookie2 are joined with "\n" to preserve cookie boundaries per RFC 6265.
|
|
626
|
+
response.headers.each do |key, value|
|
|
627
|
+
key = key.downcase
|
|
628
|
+
if key == 'set-cookie'
|
|
629
|
+
set_cookies << value.to_s
|
|
630
|
+
next
|
|
631
|
+
end
|
|
632
|
+
if key == 'set-cookie2'
|
|
633
|
+
set_cookie2 << value.to_s
|
|
634
|
+
next
|
|
635
|
+
end
|
|
636
|
+
|
|
637
|
+
headers[key] = headers[key] ? "#{headers[key]}, #{value}" : value.to_s
|
|
638
|
+
end
|
|
639
|
+
|
|
640
|
+
headers['set-cookie'] = set_cookies.join("\n") unless set_cookies.empty?
|
|
641
|
+
headers['set-cookie2'] = set_cookie2.join("\n") unless set_cookie2.empty?
|
|
642
|
+
|
|
643
|
+
headers
|
|
644
|
+
end
|
|
645
|
+
|
|
646
|
+
def warn_large_body(size, config)
|
|
647
|
+
limit = config.async_http_body_warn_bytes
|
|
648
|
+
return if limit.nil? || limit <= 0 || size <= limit
|
|
649
|
+
|
|
650
|
+
logger_for(config)&.warn(
|
|
651
|
+
"[aws-sdk-http-async] request body buffered in memory (#{size} bytes)"
|
|
652
|
+
)
|
|
653
|
+
end
|
|
654
|
+
|
|
655
|
+
def enforce_max_buffer!(size, config)
|
|
656
|
+
limit = config.async_http_max_buffer_bytes
|
|
657
|
+
return if limit.nil? || limit <= 0 || size.nil?
|
|
658
|
+
return if size <= limit
|
|
659
|
+
|
|
660
|
+
raise BodyTooLargeError, "buffered body size #{size} exceeds async_http_max_buffer_bytes=#{limit}"
|
|
661
|
+
end
|
|
662
|
+
|
|
663
|
+
def event_stream_operation?(context)
|
|
664
|
+
return true if context[:input_event_stream_handler] ||
|
|
665
|
+
context[:output_event_stream_handler] ||
|
|
666
|
+
context[:event_stream_handler]
|
|
667
|
+
|
|
668
|
+
operation = context.operation
|
|
669
|
+
return false unless operation
|
|
670
|
+
|
|
671
|
+
shape_ref_eventstream?(operation.input) || shape_ref_eventstream?(operation.output)
|
|
672
|
+
end
|
|
673
|
+
|
|
674
|
+
def shape_ref_eventstream?(shape_ref)
|
|
675
|
+
return false unless shape_ref
|
|
676
|
+
|
|
677
|
+
payload = shape_ref[:payload_member]
|
|
678
|
+
return true if payload && payload.eventstream
|
|
679
|
+
|
|
680
|
+
shape = shape_ref.shape
|
|
681
|
+
return false unless shape.respond_to?(:members)
|
|
682
|
+
|
|
683
|
+
members = shape.members
|
|
684
|
+
return false unless members
|
|
685
|
+
|
|
686
|
+
if members.respond_to?(:each_value)
|
|
687
|
+
members.each_value do |ref|
|
|
688
|
+
return true if ref.eventstream
|
|
689
|
+
end
|
|
690
|
+
else
|
|
691
|
+
members.each do |item|
|
|
692
|
+
ref = item.is_a?(Array) ? item.last : item
|
|
693
|
+
return true if ref.respond_to?(:eventstream) && ref.eventstream
|
|
694
|
+
end
|
|
695
|
+
end
|
|
696
|
+
|
|
697
|
+
false
|
|
698
|
+
end
|
|
699
|
+
|
|
700
|
+
def logger_for(config)
|
|
701
|
+
config.logger || ::Aws.config[:logger]
|
|
702
|
+
end
|
|
703
|
+
end
|
|
704
|
+
end
|
|
705
|
+
end
|