aws-sdk-http-async 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +7 -0
- data/LICENSE.txt +202 -0
- data/README.md +382 -0
- data/VERSION +1 -0
- data/exe/async-rake +10 -0
- data/lib/async/aws/auto.rb +1 -0
- data/lib/async/aws/client_cache.rb +527 -0
- data/lib/async/aws/errors.rb +6 -0
- data/lib/async/aws/handler.rb +705 -0
- data/lib/async/aws/http_plugin.rb +36 -0
- data/lib/async/aws/patcher.rb +167 -0
- data/lib/async/aws/rake_patch.rb +13 -0
- data/lib/async/aws/version.rb +11 -0
- data/lib/aws-sdk-http-async/core.rb +10 -0
- data/lib/aws-sdk-http-async/rake.rb +6 -0
- data/lib/aws-sdk-http-async.rb +3 -0
- metadata +254 -0
|
@@ -0,0 +1,527 @@
|
|
|
1
|
+
require 'async/http'
|
|
2
|
+
require 'async/http/proxy'
|
|
3
|
+
require 'async/aws/errors'
|
|
4
|
+
require 'base64'
|
|
5
|
+
require 'digest'
|
|
6
|
+
require 'openssl'
|
|
7
|
+
require 'thread'
|
|
8
|
+
require 'uri'
|
|
9
|
+
require 'weakref'
|
|
10
|
+
|
|
11
|
+
module Async
|
|
12
|
+
module Aws
|
|
13
|
+
class ClientCache
|
|
14
|
+
@default_cert_store_mutex = Mutex.new
|
|
15
|
+
@default_cert_store = nil
|
|
16
|
+
|
|
17
|
+
def self.default_cert_store
|
|
18
|
+
return @default_cert_store if @default_cert_store
|
|
19
|
+
|
|
20
|
+
@default_cert_store_mutex.synchronize do
|
|
21
|
+
return @default_cert_store if @default_cert_store
|
|
22
|
+
|
|
23
|
+
store = OpenSSL::X509::Store.new
|
|
24
|
+
store.set_default_paths
|
|
25
|
+
@default_cert_store = store
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
Entry = Struct.new(:client, :reactor_ref, :inflight)
|
|
30
|
+
|
|
31
|
+
class ProxyClient
|
|
32
|
+
# @param client [Async::HTTP::Client]
|
|
33
|
+
# @param proxy_client [Async::HTTP::Client]
|
|
34
|
+
# @return [void]
|
|
35
|
+
def initialize(client, proxy_client)
|
|
36
|
+
@client = client
|
|
37
|
+
@proxy_client = proxy_client
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
# @param request [Protocol::HTTP::Request]
|
|
41
|
+
# @return [Async::HTTP::Response]
|
|
42
|
+
def call(request, &)
|
|
43
|
+
@client.call(request, &)
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
# @return [void]
|
|
47
|
+
def close
|
|
48
|
+
@client.close
|
|
49
|
+
ensure
|
|
50
|
+
@proxy_client.close
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# @return [void]
|
|
55
|
+
def initialize
|
|
56
|
+
@clients = {}
|
|
57
|
+
@mutex = Mutex.new
|
|
58
|
+
@access_count = 0
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# @param endpoint [URI::HTTP, URI::HTTPS]
|
|
62
|
+
# @param config [Seahorse::Client::Configuration]
|
|
63
|
+
# @return [Async::HTTP::Client]
|
|
64
|
+
def client_for(endpoint, config)
|
|
65
|
+
raise NoReactorError, 'Async reactor is required. Wrap calls in Sync { }.' unless Async::Task.current?
|
|
66
|
+
|
|
67
|
+
reactor = Async::Task.current.reactor
|
|
68
|
+
key = cache_key(endpoint, config, reactor)
|
|
69
|
+
entry = nil
|
|
70
|
+
stale_entry = nil
|
|
71
|
+
|
|
72
|
+
entry = @mutex.synchronize do
|
|
73
|
+
cached = @clients[key]
|
|
74
|
+
if entry_valid_for?(cached, reactor)
|
|
75
|
+
touch_lru!(key, cached)
|
|
76
|
+
cached
|
|
77
|
+
else
|
|
78
|
+
stale_entry = @clients.delete(key) if cached
|
|
79
|
+
nil
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
close_entry(stale_entry) if stale_entry
|
|
84
|
+
sweep_dead_entries_if_needed.each { |dead| close_entry(dead) }
|
|
85
|
+
return entry.client if entry
|
|
86
|
+
|
|
87
|
+
new_entry = nil
|
|
88
|
+
evicted = []
|
|
89
|
+
used_entry = nil
|
|
90
|
+
stale_existing = nil
|
|
91
|
+
|
|
92
|
+
new_entry = Entry.new(build_client(endpoint, config), WeakRef.new(reactor), 0)
|
|
93
|
+
|
|
94
|
+
@mutex.synchronize do
|
|
95
|
+
existing = @clients[key]
|
|
96
|
+
if entry_valid_for?(existing, reactor)
|
|
97
|
+
touch_lru!(key, existing)
|
|
98
|
+
used_entry = existing
|
|
99
|
+
else
|
|
100
|
+
stale_existing = existing
|
|
101
|
+
@clients[key] = new_entry
|
|
102
|
+
touch_lru!(key, new_entry)
|
|
103
|
+
used_entry = new_entry
|
|
104
|
+
evicted = evict_entries_locked(config, reactor)
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
close_entry(new_entry) if used_entry != new_entry
|
|
109
|
+
close_entry(stale_existing) if stale_existing
|
|
110
|
+
evicted.each { |entry_to_close| close_entry(entry_to_close) }
|
|
111
|
+
|
|
112
|
+
used_entry.client
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
# @param endpoint [URI::HTTP, URI::HTTPS]
|
|
116
|
+
# @param config [Seahorse::Client::Configuration]
|
|
117
|
+
# @yieldparam client [Async::HTTP::Client]
|
|
118
|
+
# @return [Object]
|
|
119
|
+
def with_client(endpoint, config)
|
|
120
|
+
raise NoReactorError, 'Async reactor is required. Wrap calls in Sync { }.' unless Async::Task.current?
|
|
121
|
+
|
|
122
|
+
reactor = Async::Task.current.reactor
|
|
123
|
+
key = cache_key(endpoint, config, reactor)
|
|
124
|
+
entry = nil
|
|
125
|
+
stale_entry = nil
|
|
126
|
+
|
|
127
|
+
entry = @mutex.synchronize do
|
|
128
|
+
cached = @clients[key]
|
|
129
|
+
if entry_valid_for?(cached, reactor)
|
|
130
|
+
touch_lru!(key, cached)
|
|
131
|
+
cached.inflight = cached.inflight.to_i + 1
|
|
132
|
+
cached
|
|
133
|
+
else
|
|
134
|
+
stale_entry = @clients.delete(key) if cached
|
|
135
|
+
nil
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
close_entry(stale_entry) if stale_entry
|
|
140
|
+
sweep_dead_entries_if_needed.each { |dead| close_entry(dead) }
|
|
141
|
+
|
|
142
|
+
unless entry
|
|
143
|
+
new_entry = Entry.new(build_client(endpoint, config), WeakRef.new(reactor), 0)
|
|
144
|
+
evicted = []
|
|
145
|
+
used_entry = nil
|
|
146
|
+
stale_existing = nil
|
|
147
|
+
|
|
148
|
+
@mutex.synchronize do
|
|
149
|
+
existing = @clients[key]
|
|
150
|
+
if entry_valid_for?(existing, reactor)
|
|
151
|
+
touch_lru!(key, existing)
|
|
152
|
+
existing.inflight = existing.inflight.to_i + 1
|
|
153
|
+
used_entry = existing
|
|
154
|
+
else
|
|
155
|
+
stale_existing = existing
|
|
156
|
+
@clients[key] = new_entry
|
|
157
|
+
touch_lru!(key, new_entry)
|
|
158
|
+
new_entry.inflight = new_entry.inflight.to_i + 1
|
|
159
|
+
used_entry = new_entry
|
|
160
|
+
evicted = evict_entries_locked(config, reactor)
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
close_entry(new_entry) if used_entry != new_entry
|
|
165
|
+
close_entry(stale_existing) if stale_existing
|
|
166
|
+
evicted.each { |entry_to_close| close_entry(entry_to_close) }
|
|
167
|
+
|
|
168
|
+
entry = used_entry
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
begin
|
|
172
|
+
yield extract_client(entry)
|
|
173
|
+
ensure
|
|
174
|
+
@mutex.synchronize do
|
|
175
|
+
if entry
|
|
176
|
+
entry.inflight = [entry.inflight.to_i - 1, 0].max
|
|
177
|
+
end
|
|
178
|
+
end
|
|
179
|
+
end
|
|
180
|
+
end
|
|
181
|
+
|
|
182
|
+
# Closes all cached clients and clears the cache. Intended for shutdown only.
|
|
183
|
+
#
|
|
184
|
+
# @return [void]
|
|
185
|
+
def clear!(timeout: nil)
|
|
186
|
+
clients = @mutex.synchronize do
|
|
187
|
+
values = @clients.values
|
|
188
|
+
@clients.clear
|
|
189
|
+
values
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
clients.each do |client_entry|
|
|
193
|
+
current_reactor = Async::Task.current? ? Async::Task.current.reactor : nil
|
|
194
|
+
owner_reactor = entry_reactor(client_entry)
|
|
195
|
+
if timeout && current_reactor && owner_reactor == current_reactor
|
|
196
|
+
begin
|
|
197
|
+
client = extract_client(client_entry)
|
|
198
|
+
Async::Task.current.with_timeout(timeout, Async::TimeoutError) { client.close if client.respond_to?(:close) }
|
|
199
|
+
rescue Async::TimeoutError
|
|
200
|
+
logger = logger_for
|
|
201
|
+
logger&.warn('[aws-sdk-http-async] force-closing client (timeout)')
|
|
202
|
+
close_entry(client_entry, force: true)
|
|
203
|
+
rescue StandardError => e
|
|
204
|
+
logger = logger_for
|
|
205
|
+
logger&.warn("[aws-sdk-http-async] failed to close client: #{e.message}")
|
|
206
|
+
end
|
|
207
|
+
else
|
|
208
|
+
close_entry(client_entry, force: true)
|
|
209
|
+
end
|
|
210
|
+
end
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
# @return [void]
|
|
214
|
+
def close!
|
|
215
|
+
clear!
|
|
216
|
+
end
|
|
217
|
+
|
|
218
|
+
private
|
|
219
|
+
|
|
220
|
+
def cache_key(endpoint, config, reactor)
|
|
221
|
+
reactor_id = reactor.object_id
|
|
222
|
+
"#{reactor_id}|#{endpoint.scheme}://#{endpoint.host}:#{endpoint.port}|" \
|
|
223
|
+
"limit=#{config.async_http_connection_limit}|" \
|
|
224
|
+
"timeout=#{config.http_open_timeout}|" \
|
|
225
|
+
"idle=#{config.async_http_idle_timeout}|" \
|
|
226
|
+
"proxy=#{proxy_cache_value(config.http_proxy)}|" \
|
|
227
|
+
"ssl=#{config.ssl_verify_peer}|" \
|
|
228
|
+
"ca_store=#{ssl_cache_value(config.ssl_ca_store)}|" \
|
|
229
|
+
"ca_bundle=#{config.ssl_ca_bundle}|" \
|
|
230
|
+
"ca_dir=#{config.ssl_ca_directory}|" \
|
|
231
|
+
"ssl_cert=#{ssl_cache_value(config.ssl_cert)}|" \
|
|
232
|
+
"ssl_key=#{ssl_cache_value(config.ssl_key)}"
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
def entry_valid_for?(entry, reactor)
|
|
236
|
+
return false unless entry.is_a?(Entry)
|
|
237
|
+
ref = entry.reactor_ref
|
|
238
|
+
return false unless ref&.weakref_alive?
|
|
239
|
+
|
|
240
|
+
ref.__getobj__.equal?(reactor)
|
|
241
|
+
rescue WeakRef::RefError
|
|
242
|
+
false
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
def touch_lru!(key, entry)
|
|
246
|
+
@clients.delete(key)
|
|
247
|
+
@clients[key] = entry
|
|
248
|
+
end
|
|
249
|
+
|
|
250
|
+
def evict_entries_locked(config, reactor)
|
|
251
|
+
limit = config.async_http_max_cached_clients
|
|
252
|
+
return [] if limit.nil? || limit <= 0
|
|
253
|
+
|
|
254
|
+
evicted = []
|
|
255
|
+
current_size = 0
|
|
256
|
+
dead_keys = []
|
|
257
|
+
@clients.each do |key, entry|
|
|
258
|
+
if entry_dead?(entry)
|
|
259
|
+
dead_keys << key
|
|
260
|
+
evicted << entry
|
|
261
|
+
next
|
|
262
|
+
end
|
|
263
|
+
current_size += 1 if entry_valid_for?(entry, reactor)
|
|
264
|
+
end
|
|
265
|
+
dead_keys.each { |key| @clients.delete(key) }
|
|
266
|
+
|
|
267
|
+
while current_size > limit
|
|
268
|
+
key = @clients.keys.find do |candidate|
|
|
269
|
+
candidate_entry = @clients[candidate]
|
|
270
|
+
entry_valid_for?(candidate_entry, reactor) && candidate_entry.inflight.to_i <= 0
|
|
271
|
+
end
|
|
272
|
+
break unless key
|
|
273
|
+
|
|
274
|
+
entry = @clients.delete(key)
|
|
275
|
+
evicted << entry if entry
|
|
276
|
+
current_size -= 1
|
|
277
|
+
end
|
|
278
|
+
evicted
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
def close_entry(entry, force: false)
|
|
282
|
+
client = extract_client(entry)
|
|
283
|
+
return unless client.respond_to?(:close)
|
|
284
|
+
|
|
285
|
+
reactor = entry_reactor(entry)
|
|
286
|
+
current_reactor = Async::Task.current? ? Async::Task.current.reactor : nil
|
|
287
|
+
if reactor && reactor != current_reactor && !force
|
|
288
|
+
logger = logger_for
|
|
289
|
+
logger&.debug('[aws-sdk-http-async] skipping close from different reactor')
|
|
290
|
+
return
|
|
291
|
+
end
|
|
292
|
+
if reactor && reactor != current_reactor && force
|
|
293
|
+
logger = logger_for
|
|
294
|
+
logger&.debug('[aws-sdk-http-async] force-closing client from different reactor')
|
|
295
|
+
end
|
|
296
|
+
|
|
297
|
+
safe_close(client)
|
|
298
|
+
rescue StandardError => e
|
|
299
|
+
logger = logger_for
|
|
300
|
+
logger&.warn("[aws-sdk-http-async] failed to close client: #{e.message}")
|
|
301
|
+
end
|
|
302
|
+
|
|
303
|
+
def extract_client(entry)
|
|
304
|
+
return entry.client if entry.is_a?(Entry)
|
|
305
|
+
|
|
306
|
+
entry
|
|
307
|
+
end
|
|
308
|
+
|
|
309
|
+
def entry_reactor(entry)
|
|
310
|
+
return nil unless entry.is_a?(Entry)
|
|
311
|
+
|
|
312
|
+
ref = entry.reactor_ref
|
|
313
|
+
return nil unless ref&.weakref_alive?
|
|
314
|
+
|
|
315
|
+
ref.__getobj__
|
|
316
|
+
rescue WeakRef::RefError
|
|
317
|
+
nil
|
|
318
|
+
end
|
|
319
|
+
|
|
320
|
+
def entry_dead?(entry)
|
|
321
|
+
return false unless entry.is_a?(Entry)
|
|
322
|
+
|
|
323
|
+
ref = entry.reactor_ref
|
|
324
|
+
return true unless ref&.weakref_alive?
|
|
325
|
+
|
|
326
|
+
false
|
|
327
|
+
rescue WeakRef::RefError
|
|
328
|
+
true
|
|
329
|
+
end
|
|
330
|
+
|
|
331
|
+
def sweep_dead_entries_if_needed
|
|
332
|
+
do_sweep = false
|
|
333
|
+
@mutex.synchronize do
|
|
334
|
+
@access_count += 1
|
|
335
|
+
do_sweep = (@access_count % 100).zero?
|
|
336
|
+
end
|
|
337
|
+
return [] unless do_sweep
|
|
338
|
+
|
|
339
|
+
sweep_dead_entries
|
|
340
|
+
end
|
|
341
|
+
|
|
342
|
+
def sweep_dead_entries
|
|
343
|
+
dead = []
|
|
344
|
+
@mutex.synchronize do
|
|
345
|
+
dead_keys = []
|
|
346
|
+
@clients.each do |key, entry|
|
|
347
|
+
next unless entry_dead?(entry)
|
|
348
|
+
|
|
349
|
+
dead_keys << key
|
|
350
|
+
dead << entry
|
|
351
|
+
end
|
|
352
|
+
dead_keys.each { |key| @clients.delete(key) }
|
|
353
|
+
end
|
|
354
|
+
dead
|
|
355
|
+
end
|
|
356
|
+
|
|
357
|
+
def safe_close(client)
|
|
358
|
+
client.close
|
|
359
|
+
rescue StandardError => e
|
|
360
|
+
logger = logger_for
|
|
361
|
+
logger&.warn("[aws-sdk-http-async] failed to close client: #{e.message}")
|
|
362
|
+
end
|
|
363
|
+
|
|
364
|
+
def build_client(endpoint, config)
|
|
365
|
+
target_endpoint = build_endpoint(endpoint, config)
|
|
366
|
+
return Async::HTTP::Client.new(
|
|
367
|
+
target_endpoint,
|
|
368
|
+
retries: 0,
|
|
369
|
+
limit: config.async_http_connection_limit,
|
|
370
|
+
) unless config.http_proxy
|
|
371
|
+
|
|
372
|
+
proxy_endpoint = build_proxy_endpoint(config)
|
|
373
|
+
proxy_client = Async::HTTP::Client.new(
|
|
374
|
+
proxy_endpoint,
|
|
375
|
+
retries: 0,
|
|
376
|
+
limit: config.async_http_connection_limit,
|
|
377
|
+
)
|
|
378
|
+
headers = proxy_headers(proxy_endpoint.url)
|
|
379
|
+
proxied_endpoint = proxy_client.proxied_endpoint(target_endpoint, headers)
|
|
380
|
+
client = Async::HTTP::Client.new(
|
|
381
|
+
proxied_endpoint,
|
|
382
|
+
retries: 0,
|
|
383
|
+
limit: config.async_http_connection_limit,
|
|
384
|
+
)
|
|
385
|
+
ProxyClient.new(client, proxy_client)
|
|
386
|
+
end
|
|
387
|
+
|
|
388
|
+
def build_endpoint(endpoint, config)
|
|
389
|
+
Async::HTTP::Endpoint.parse(
|
|
390
|
+
endpoint.to_s,
|
|
391
|
+
timeout: build_timeout(config),
|
|
392
|
+
ssl_context: ssl_context(config, endpoint),
|
|
393
|
+
)
|
|
394
|
+
end
|
|
395
|
+
|
|
396
|
+
def build_proxy_endpoint(config)
|
|
397
|
+
url = config.http_proxy.to_s
|
|
398
|
+
endpoint = Async::HTTP::Endpoint.parse(
|
|
399
|
+
url,
|
|
400
|
+
timeout: build_timeout(config),
|
|
401
|
+
ssl_context: ssl_context(config, URI.parse(url)),
|
|
402
|
+
)
|
|
403
|
+
endpoint
|
|
404
|
+
end
|
|
405
|
+
|
|
406
|
+
def build_timeout(config)
|
|
407
|
+
open_timeout = config.http_open_timeout
|
|
408
|
+
return open_timeout if open_timeout && open_timeout > 0
|
|
409
|
+
|
|
410
|
+
idle = config.async_http_idle_timeout
|
|
411
|
+
return idle if idle && idle > 0
|
|
412
|
+
|
|
413
|
+
nil
|
|
414
|
+
end
|
|
415
|
+
|
|
416
|
+
def ssl_context(config, endpoint)
|
|
417
|
+
return nil unless endpoint.scheme == 'https'
|
|
418
|
+
|
|
419
|
+
OpenSSL::SSL::SSLContext.new.tap do |context|
|
|
420
|
+
context.verify_mode = config.ssl_verify_peer ? OpenSSL::SSL::VERIFY_PEER : OpenSSL::SSL::VERIFY_NONE
|
|
421
|
+
if config.ssl_verify_peer && context.respond_to?(:verify_hostname=)
|
|
422
|
+
context.verify_hostname = true
|
|
423
|
+
end
|
|
424
|
+
if config.ssl_ca_store
|
|
425
|
+
context.cert_store = config.ssl_ca_store
|
|
426
|
+
else
|
|
427
|
+
context.ca_file = config.ssl_ca_bundle if config.ssl_ca_bundle
|
|
428
|
+
context.ca_path = config.ssl_ca_directory if config.ssl_ca_directory
|
|
429
|
+
unless config.ssl_ca_bundle || config.ssl_ca_directory
|
|
430
|
+
context.cert_store = self.class.default_cert_store
|
|
431
|
+
end
|
|
432
|
+
end
|
|
433
|
+
|
|
434
|
+
cert = load_certificate(config.ssl_cert, config)
|
|
435
|
+
context.cert = cert if cert
|
|
436
|
+
|
|
437
|
+
key = load_private_key(config.ssl_key, config)
|
|
438
|
+
context.key = key if key
|
|
439
|
+
end
|
|
440
|
+
end
|
|
441
|
+
|
|
442
|
+
def load_certificate(value, _config)
|
|
443
|
+
return nil if value.nil?
|
|
444
|
+
if value.respond_to?(:empty?) && value.empty?
|
|
445
|
+
raise ArgumentError, 'ssl_cert cannot be empty; set to nil to disable or provide a valid path'
|
|
446
|
+
end
|
|
447
|
+
return value if value.is_a?(OpenSSL::X509::Certificate)
|
|
448
|
+
if value.is_a?(String) || value.respond_to?(:to_path)
|
|
449
|
+
path = value.is_a?(String) ? value : value.to_path
|
|
450
|
+
if Async::Task.current?
|
|
451
|
+
logger = logger_for
|
|
452
|
+
logger&.debug('[aws-sdk-http-async] loading ssl_cert from path inside reactor (blocking IO)')
|
|
453
|
+
end
|
|
454
|
+
return OpenSSL::X509::Certificate.new(File.read(path))
|
|
455
|
+
end
|
|
456
|
+
|
|
457
|
+
raise ArgumentError, "ssl_cert must be an OpenSSL::X509::Certificate or a file path (got #{value.class})"
|
|
458
|
+
rescue StandardError => e
|
|
459
|
+
raise ArgumentError, "failed to load ssl_cert: #{e.message}"
|
|
460
|
+
end
|
|
461
|
+
|
|
462
|
+
def load_private_key(value, _config)
|
|
463
|
+
return nil if value.nil?
|
|
464
|
+
if value.respond_to?(:empty?) && value.empty?
|
|
465
|
+
raise ArgumentError, 'ssl_key cannot be empty; set to nil to disable or provide a valid path'
|
|
466
|
+
end
|
|
467
|
+
return value if value.is_a?(OpenSSL::PKey::PKey)
|
|
468
|
+
if value.is_a?(String) || value.respond_to?(:to_path)
|
|
469
|
+
path = value.is_a?(String) ? value : value.to_path
|
|
470
|
+
if Async::Task.current?
|
|
471
|
+
logger = logger_for
|
|
472
|
+
logger&.debug('[aws-sdk-http-async] loading ssl_key from path inside reactor (blocking IO)')
|
|
473
|
+
end
|
|
474
|
+
return OpenSSL::PKey.read(File.read(path))
|
|
475
|
+
end
|
|
476
|
+
|
|
477
|
+
raise ArgumentError, "ssl_key must be an OpenSSL::PKey or a file path (got #{value.class})"
|
|
478
|
+
rescue StandardError => e
|
|
479
|
+
raise ArgumentError, "failed to load ssl_key: #{e.message}"
|
|
480
|
+
end
|
|
481
|
+
|
|
482
|
+
def ssl_cache_value(value)
|
|
483
|
+
return nil if value.nil?
|
|
484
|
+
return value if value.is_a?(String)
|
|
485
|
+
return value.to_path if value.respond_to?(:to_path)
|
|
486
|
+
|
|
487
|
+
value.object_id.to_s
|
|
488
|
+
end
|
|
489
|
+
|
|
490
|
+
def proxy_headers(proxy_url)
|
|
491
|
+
return nil unless proxy_url.respond_to?(:user) && proxy_url.user
|
|
492
|
+
|
|
493
|
+
user = URI::DEFAULT_PARSER.unescape(proxy_url.user)
|
|
494
|
+
password = URI::DEFAULT_PARSER.unescape(proxy_url.password.to_s)
|
|
495
|
+
token = "#{user}:#{password}"
|
|
496
|
+
encoded = Base64.strict_encode64(token)
|
|
497
|
+
|
|
498
|
+
[['proxy-authorization', "Basic #{encoded}"]]
|
|
499
|
+
end
|
|
500
|
+
|
|
501
|
+
def proxy_cache_value(proxy)
|
|
502
|
+
return nil if proxy.nil?
|
|
503
|
+
|
|
504
|
+
uri = URI.parse(proxy.to_s)
|
|
505
|
+
return proxy.to_s unless uri.user
|
|
506
|
+
|
|
507
|
+
user = URI::DEFAULT_PARSER.unescape(uri.user)
|
|
508
|
+
password = URI::DEFAULT_PARSER.unescape(uri.password.to_s)
|
|
509
|
+
auth_hash = Digest::SHA256.hexdigest("#{user}:#{password}")
|
|
510
|
+
|
|
511
|
+
sanitized = uri.dup
|
|
512
|
+
sanitized.user = nil
|
|
513
|
+
sanitized.password = nil
|
|
514
|
+
|
|
515
|
+
"#{sanitized}#auth=#{auth_hash}"
|
|
516
|
+
rescue URI::InvalidURIError
|
|
517
|
+
proxy.to_s
|
|
518
|
+
end
|
|
519
|
+
|
|
520
|
+
def logger_for
|
|
521
|
+
return ::Aws.config[:logger] if defined?(::Aws)
|
|
522
|
+
|
|
523
|
+
nil
|
|
524
|
+
end
|
|
525
|
+
end
|
|
526
|
+
end
|
|
527
|
+
end
|