httpx 1.4.4 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/doc/release_notes/1_5_0.md +126 -0
- data/lib/httpx/adapters/datadog.rb +24 -3
- data/lib/httpx/adapters/webmock.rb +1 -0
- data/lib/httpx/buffer.rb +16 -5
- data/lib/httpx/connection/http1.rb +8 -9
- data/lib/httpx/connection/http2.rb +48 -24
- data/lib/httpx/connection.rb +36 -19
- data/lib/httpx/errors.rb +2 -11
- data/lib/httpx/headers.rb +24 -23
- data/lib/httpx/io/ssl.rb +2 -1
- data/lib/httpx/io/tcp.rb +9 -7
- data/lib/httpx/io/unix.rb +1 -1
- data/lib/httpx/loggable.rb +13 -1
- data/lib/httpx/options.rb +63 -48
- data/lib/httpx/parser/http1.rb +1 -1
- data/lib/httpx/plugins/aws_sigv4.rb +1 -0
- data/lib/httpx/plugins/callbacks.rb +19 -6
- data/lib/httpx/plugins/circuit_breaker.rb +4 -3
- data/lib/httpx/plugins/cookies/jar.rb +0 -2
- data/lib/httpx/plugins/cookies/set_cookie_parser.rb +7 -4
- data/lib/httpx/plugins/cookies.rb +4 -4
- data/lib/httpx/plugins/follow_redirects.rb +4 -2
- data/lib/httpx/plugins/grpc/call.rb +1 -1
- data/lib/httpx/plugins/h2c.rb +7 -1
- data/lib/httpx/plugins/persistent.rb +22 -1
- data/lib/httpx/plugins/proxy/http.rb +3 -1
- data/lib/httpx/plugins/query.rb +35 -0
- data/lib/httpx/plugins/response_cache/file_store.rb +115 -15
- data/lib/httpx/plugins/response_cache/store.rb +7 -67
- data/lib/httpx/plugins/response_cache.rb +179 -29
- data/lib/httpx/plugins/retries.rb +26 -14
- data/lib/httpx/plugins/stream.rb +4 -2
- data/lib/httpx/plugins/stream_bidi.rb +315 -0
- data/lib/httpx/pool.rb +58 -5
- data/lib/httpx/request/body.rb +1 -1
- data/lib/httpx/request.rb +6 -2
- data/lib/httpx/resolver/https.rb +10 -4
- data/lib/httpx/resolver/native.rb +13 -13
- data/lib/httpx/resolver/resolver.rb +4 -0
- data/lib/httpx/resolver/system.rb +37 -14
- data/lib/httpx/resolver.rb +2 -2
- data/lib/httpx/response/body.rb +10 -21
- data/lib/httpx/response/buffer.rb +36 -12
- data/lib/httpx/response.rb +11 -1
- data/lib/httpx/selector.rb +16 -12
- data/lib/httpx/session.rb +79 -19
- data/lib/httpx/timers.rb +24 -16
- data/lib/httpx/transcoder/multipart/decoder.rb +4 -2
- data/lib/httpx/transcoder/multipart/encoder.rb +2 -1
- data/lib/httpx/version.rb +1 -1
- data/sig/buffer.rbs +1 -1
- data/sig/chainable.rbs +5 -2
- data/sig/connection/http2.rbs +11 -2
- data/sig/connection.rbs +4 -4
- data/sig/errors.rbs +0 -3
- data/sig/headers.rbs +15 -10
- data/sig/httpx.rbs +5 -1
- data/sig/io/tcp.rbs +6 -0
- data/sig/loggable.rbs +2 -0
- data/sig/options.rbs +7 -1
- data/sig/plugins/cookies/cookie.rbs +1 -3
- data/sig/plugins/cookies/jar.rbs +4 -4
- data/sig/plugins/cookies/set_cookie_parser.rbs +22 -0
- data/sig/plugins/cookies.rbs +2 -0
- data/sig/plugins/h2c.rbs +4 -0
- data/sig/plugins/proxy/http.rbs +3 -0
- data/sig/plugins/proxy.rbs +4 -0
- data/sig/plugins/query.rbs +18 -0
- data/sig/plugins/response_cache/file_store.rbs +19 -0
- data/sig/plugins/response_cache/store.rbs +13 -0
- data/sig/plugins/response_cache.rbs +41 -19
- data/sig/plugins/retries.rbs +4 -3
- data/sig/plugins/stream.rbs +5 -1
- data/sig/plugins/stream_bidi.rbs +68 -0
- data/sig/plugins/upgrade/h2.rbs +9 -0
- data/sig/plugins/upgrade.rbs +5 -0
- data/sig/pool.rbs +5 -0
- data/sig/punycode.rbs +5 -0
- data/sig/request.rbs +2 -0
- data/sig/resolver/https.rbs +3 -2
- data/sig/resolver/native.rbs +1 -2
- data/sig/resolver/resolver.rbs +11 -3
- data/sig/resolver/system.rbs +19 -2
- data/sig/resolver.rbs +11 -7
- data/sig/response/body.rbs +3 -4
- data/sig/response/buffer.rbs +2 -3
- data/sig/response.rbs +2 -2
- data/sig/selector.rbs +20 -10
- data/sig/session.rbs +14 -6
- data/sig/timers.rbs +5 -7
- data/sig/transcoder/multipart.rbs +4 -3
- metadata +13 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 862a55f8ae6fa87e3630f33b9a831ab0867a9f0e7e97078c3bcf1e180bbb896a
|
4
|
+
data.tar.gz: a7664a44427245f87a93d7dd20404088a97b178a6bc0c7559b1bd0caa246a83a
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ec4fbde6b88bfc5886f651ef755f039c9037e90877dad38fc439d60af95b755e6946c41bee474db91bd801c8b8220371cf9adfd781aa3d4251f1f4de992dc356
|
7
|
+
data.tar.gz: f230f58ebff2fba1f52af295af8489aaecb23faf4c55e7eecd2735a968c3ff40788a93ab6c4c2426a5fe50b4d981dc1320315ca0c672051e1e5d67a96d5ac365
|
@@ -0,0 +1,126 @@
|
|
1
|
+
# 1.5.0
|
2
|
+
|
3
|
+
## Features
|
4
|
+
|
5
|
+
### `:stream_bidi` plugin
|
6
|
+
|
7
|
+
The `:stream_bidi` plugin enables bidirectional streaming support (an HTTP/2 only feature!). It builds on top of the `:stream` plugin, and uses its block-based syntax to process incoming frames, while allowing the user to pipe more data to the request (from the same, or another thread/fiber).
|
8
|
+
|
9
|
+
```ruby
|
10
|
+
http = HTTPX.plugin(:stream_bidi)
|
11
|
+
request = http.build_request(
|
12
|
+
"POST",
|
13
|
+
"https://your-origin.com/stream",
|
14
|
+
headers: { "content-type" => "application/x-ndjson" },
|
15
|
+
body: ["{\"message\":\"started\"}\n"]
|
16
|
+
)
|
17
|
+
|
18
|
+
chunks = []
|
19
|
+
|
20
|
+
response = http.request(request, stream: true)
|
21
|
+
|
22
|
+
Thread.start do
|
23
|
+
response.each do |chunk|
|
24
|
+
handle_data(chunk)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
# now send data...
|
29
|
+
request << "{\"message\":\"foo\"}\n"
|
30
|
+
request << "{\"message\":\"bar\"}\n"
|
31
|
+
# ...
|
32
|
+
```
|
33
|
+
|
34
|
+
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Stream-Bidi
|
35
|
+
|
36
|
+
### `:query` plugin
|
37
|
+
|
38
|
+
The `:query` plugin adds public methods supporting the `QUERY` HTTP verb:
|
39
|
+
|
40
|
+
```ruby
|
41
|
+
http = HTTPX.plugin(:query)
|
42
|
+
|
43
|
+
http.query("https://example.com/gquery", body: "foo=bar") # QUERY /gquery ....
|
44
|
+
```
|
45
|
+
|
46
|
+
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Query
|
47
|
+
|
48
|
+
this functionality was added as a plugin for explicit opt-in, as it's experimental (RFC for the new HTTP verb is still in draft).
|
49
|
+
|
50
|
+
### `:response_cache` plugin filesystem based store
|
51
|
+
|
52
|
+
The `:response_cache` plugin supports setting the filesystem as the response cache store (instead of just storing them in memory, which is the default `:store`).
|
53
|
+
|
54
|
+
```ruby
|
55
|
+
# cache store in the filesystem, writes to the temporary directory from the OS
|
56
|
+
http = HTTPX.plugin(:response_cache, response_cache_store: :file_store)
|
57
|
+
# if you want a separate location
|
58
|
+
http = HTTPX.plugin(:response_cache).with(response_cache_store: HTTPX::Plugins::ResponseCache::FileStore.new("/path/to/dir"))
|
59
|
+
```
|
60
|
+
|
61
|
+
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Response-Cache#:file_store
|
62
|
+
|
63
|
+
### `:close_on_fork` option
|
64
|
+
|
65
|
+
A new option `:close_on_fork` can be used to ensure that a session object which may have open connections will not leak them in case the process is forked (this can be the case of `:persistent` plugin enabled sessions which have add usage before fork):
|
66
|
+
|
67
|
+
```ruby
|
68
|
+
http = HTTPX.plugin(:persistent, close_on_fork: true)
|
69
|
+
|
70
|
+
# http may have open connections here
|
71
|
+
fork do
|
72
|
+
# http has no connections here
|
73
|
+
end
|
74
|
+
```
|
75
|
+
|
76
|
+
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools#Fork-Safety .
|
77
|
+
|
78
|
+
### `:debug_redact` option
|
79
|
+
|
80
|
+
The `:debug_redact` option will, when enabled, replace parts of the debug logs (enabled via `:debug` and `:debug_level` options) which may contain sensitive information, with the `"[REDACTED]"` placeholder.
|
81
|
+
|
82
|
+
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Debugging .
|
83
|
+
|
84
|
+
### `:max_connections` pool option
|
85
|
+
|
86
|
+
A new `:max_connections` pool option (settable under `:pool_options`) can be used to defined the maximum number **overall** of connections for a pool ("in-transit" or "at-rest"); this complements, and supersedes when used, the already existing `:max_connections_per_origin`, which does the same per connection origin.
|
87
|
+
|
88
|
+
```ruby
|
89
|
+
HTTPX.with(pool_options: { max_connections: 100 })
|
90
|
+
```
|
91
|
+
|
92
|
+
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Connection-Pools .
|
93
|
+
|
94
|
+
### Subplugins
|
95
|
+
|
96
|
+
An enhancement to the plugins architecture, it allows plugins to define submodules ("subplugins") which are loaded if another plugin is in use, or is loaded afterwards.
|
97
|
+
|
98
|
+
You can read more about it in https://honeyryderchuck.gitlab.io/httpx/wiki/Custom-Plugins#Subplugins .
|
99
|
+
|
100
|
+
## Improvements
|
101
|
+
|
102
|
+
* `:persistent` plugin: several improvements around reconnections of failure:
|
103
|
+
* reconnections will only happen for "connection broken" errors (and will discard reconnection on timeouts)
|
104
|
+
* reconnections won't exhaust retries
|
105
|
+
* `:response_cache` plugin: several improements:
|
106
|
+
* return cached response if not stale, send conditional request otherwise (it was always doing the latter).
|
107
|
+
* consider immutable (i.e. `"Cache-Control: immutable"`) responses as never stale.
|
108
|
+
* `:datadog` adapter: decorate spans with more tags (header, kind, component, etc...)
|
109
|
+
* timers operations have been improved to use more efficient algorithms and reduce object creation.
|
110
|
+
|
111
|
+
## Bugfixes
|
112
|
+
|
113
|
+
* ensure that setting request timeouts happens before the request is buffered (the latter could trigger a state transition required by the former).
|
114
|
+
* `:response_cache` plugin: fix `"Vary"` header handling by supporting a new plugin option, `:supported_vary_headers`, which defines which headers are taken into account for cache key calculation.
|
115
|
+
* fixed query string encoded value when passed an empty hash to the `:query` param and the URL already contains query string.
|
116
|
+
* `:callbacks` plugin: ensure the callbacks from a session are copied when a new session is derived from it (via a `.plugin` call, for example).
|
117
|
+
* `:callbacks` plugin: errors raised from hostname resolution should bubble up to user code.
|
118
|
+
* fixed connection coalescing selector monitoring in cases where the coalescable connecton is cloned, while other branches were simplified.
|
119
|
+
* clear the connection write buffer in corner cases where the remaining bytes may be interpreted as GOAWAY handshake frame (and may cause unintended writes to connections already identified as broken).
|
120
|
+
* remove idle connections from the selector when an error happens before the state changes (this may happen if the thread is interrupted during name resolution).
|
121
|
+
|
122
|
+
## Chore
|
123
|
+
|
124
|
+
`httpx` makes extensive use of features introduced in ruby 3.4, such as `Module#set_temporary_name` for otherwise plugin-generated anonymous classes (improves debugging and issue reporting), or `String#append_as_bytes` for a small but non-negligible perf boost in buffer operations. It falls back to the previous behaviour when used with ruby 3.3 or lower.
|
125
|
+
|
126
|
+
Also, and in preparation for the incoming ruby 3.5 release, dependency of the `cgi` gem (which will be removed from stdlib) was removed.
|
@@ -13,8 +13,13 @@ module Datadog::Tracing
|
|
13
13
|
|
14
14
|
TYPE_OUTBOUND = Datadog::Tracing::Metadata::Ext::HTTP::TYPE_OUTBOUND
|
15
15
|
|
16
|
-
|
16
|
+
TAG_BASE_SERVICE = Datadog::Tracing::Contrib::Ext::Metadata::TAG_BASE_SERVICE
|
17
|
+
TAG_PEER_HOSTNAME = Datadog::Tracing::Metadata::Ext::TAG_PEER_HOSTNAME
|
17
18
|
|
19
|
+
TAG_KIND = Datadog::Tracing::Metadata::Ext::TAG_KIND
|
20
|
+
TAG_CLIENT = Datadog::Tracing::Metadata::Ext::SpanKind::TAG_CLIENT
|
21
|
+
TAG_COMPONENT = Datadog::Tracing::Metadata::Ext::TAG_COMPONENT
|
22
|
+
TAG_OPERATION = Datadog::Tracing::Metadata::Ext::TAG_OPERATION
|
18
23
|
TAG_URL = Datadog::Tracing::Metadata::Ext::HTTP::TAG_URL
|
19
24
|
TAG_METHOD = Datadog::Tracing::Metadata::Ext::HTTP::TAG_METHOD
|
20
25
|
TAG_TARGET_HOST = Datadog::Tracing::Metadata::Ext::NET::TAG_TARGET_HOST
|
@@ -81,6 +86,10 @@ module Datadog::Tracing
|
|
81
86
|
span.set_tag(TAG_STATUS_CODE, response.status.to_s)
|
82
87
|
|
83
88
|
span.set_error(::HTTPX::HTTPError.new(response)) if response.status >= 400 && response.status <= 599
|
89
|
+
|
90
|
+
span.set_tags(
|
91
|
+
Datadog.configuration.tracing.header_tags.response_tags(response.headers.to_h)
|
92
|
+
)
|
84
93
|
end
|
85
94
|
|
86
95
|
span.finish
|
@@ -97,7 +106,13 @@ module Datadog::Tracing
|
|
97
106
|
|
98
107
|
span.resource = verb
|
99
108
|
|
100
|
-
#
|
109
|
+
# Tag original global service name if not used
|
110
|
+
span.set_tag(TAG_BASE_SERVICE, Datadog.configuration.service) if span.service != Datadog.configuration.service
|
111
|
+
|
112
|
+
span.set_tag(TAG_KIND, TAG_CLIENT)
|
113
|
+
|
114
|
+
span.set_tag(TAG_COMPONENT, "httpx")
|
115
|
+
span.set_tag(TAG_OPERATION, "request")
|
101
116
|
|
102
117
|
span.set_tag(TAG_URL, request.path)
|
103
118
|
span.set_tag(TAG_METHOD, verb)
|
@@ -105,8 +120,10 @@ module Datadog::Tracing
|
|
105
120
|
span.set_tag(TAG_TARGET_HOST, uri.host)
|
106
121
|
span.set_tag(TAG_TARGET_PORT, uri.port)
|
107
122
|
|
123
|
+
span.set_tag(TAG_PEER_HOSTNAME, uri.host)
|
124
|
+
|
108
125
|
# Tag as an external peer service
|
109
|
-
span.set_tag(TAG_PEER_SERVICE, span.service)
|
126
|
+
# span.set_tag(TAG_PEER_SERVICE, span.service)
|
110
127
|
|
111
128
|
if config[:distributed_tracing]
|
112
129
|
propagate_trace_http(
|
@@ -120,6 +137,10 @@ module Datadog::Tracing
|
|
120
137
|
Contrib::Analytics.set_sample_rate(span, config[:analytics_sample_rate])
|
121
138
|
end
|
122
139
|
|
140
|
+
span.set_tags(
|
141
|
+
Datadog.configuration.tracing.header_tags.request_tags(request.headers.to_h)
|
142
|
+
)
|
143
|
+
|
123
144
|
span
|
124
145
|
rescue StandardError => e
|
125
146
|
Datadog.logger.error("error preparing span for http request: #{e}")
|
data/lib/httpx/buffer.rb
CHANGED
@@ -14,8 +14,6 @@ module HTTPX
|
|
14
14
|
class Buffer
|
15
15
|
extend Forwardable
|
16
16
|
|
17
|
-
def_delegator :@buffer, :<<
|
18
|
-
|
19
17
|
def_delegator :@buffer, :to_s
|
20
18
|
|
21
19
|
def_delegator :@buffer, :to_str
|
@@ -30,9 +28,22 @@ module HTTPX
|
|
30
28
|
|
31
29
|
attr_reader :limit
|
32
30
|
|
33
|
-
|
34
|
-
|
35
|
-
|
31
|
+
if RUBY_VERSION >= "3.4.0"
|
32
|
+
def initialize(limit)
|
33
|
+
@buffer = String.new("", encoding: Encoding::BINARY, capacity: limit)
|
34
|
+
@limit = limit
|
35
|
+
end
|
36
|
+
|
37
|
+
def <<(chunk)
|
38
|
+
@buffer.append_as_bytes(chunk)
|
39
|
+
end
|
40
|
+
else
|
41
|
+
def initialize(limit)
|
42
|
+
@buffer = "".b
|
43
|
+
@limit = limit
|
44
|
+
end
|
45
|
+
|
46
|
+
def_delegator :@buffer, :<<
|
36
47
|
end
|
37
48
|
|
38
49
|
def full?
|
@@ -93,7 +93,7 @@ module HTTPX
|
|
93
93
|
concurrent_requests_limit = [@max_concurrent_requests, requests_limit].min
|
94
94
|
@requests.each_with_index do |request, idx|
|
95
95
|
break if idx >= concurrent_requests_limit
|
96
|
-
next
|
96
|
+
next unless request.can_buffer?
|
97
97
|
|
98
98
|
handle(request)
|
99
99
|
end
|
@@ -119,7 +119,7 @@ module HTTPX
|
|
119
119
|
@parser.http_version.join("."),
|
120
120
|
headers)
|
121
121
|
log(color: :yellow) { "-> HEADLINE: #{response.status} HTTP/#{@parser.http_version.join(".")}" }
|
122
|
-
log(color: :yellow) { response.headers.each.map { |f, v| "-> HEADER: #{f}: #{v}" }.join("\n") }
|
122
|
+
log(color: :yellow) { response.headers.each.map { |f, v| "-> HEADER: #{f}: #{log_redact(v)}" }.join("\n") }
|
123
123
|
|
124
124
|
@request.response = response
|
125
125
|
on_complete if response.finished?
|
@@ -131,7 +131,7 @@ module HTTPX
|
|
131
131
|
response = @request.response
|
132
132
|
log(level: 2) { "trailer headers received" }
|
133
133
|
|
134
|
-
log(color: :yellow) { h.each.map { |f, v| "-> HEADER: #{f}: #{v.join(", ")}" }.join("\n") }
|
134
|
+
log(color: :yellow) { h.each.map { |f, v| "-> HEADER: #{f}: #{log_redact(v.join(", "))}" }.join("\n") }
|
135
135
|
response.merge_headers(h)
|
136
136
|
end
|
137
137
|
|
@@ -141,7 +141,7 @@ module HTTPX
|
|
141
141
|
return unless request
|
142
142
|
|
143
143
|
log(color: :green) { "-> DATA: #{chunk.bytesize} bytes..." }
|
144
|
-
log(level: 2, color: :green) { "-> #{chunk.inspect}" }
|
144
|
+
log(level: 2, color: :green) { "-> #{log_redact(chunk.inspect)}" }
|
145
145
|
response = request.response
|
146
146
|
|
147
147
|
response << chunk
|
@@ -171,7 +171,6 @@ module HTTPX
|
|
171
171
|
@request = nil
|
172
172
|
@requests.shift
|
173
173
|
response = request.response
|
174
|
-
response.finish! unless response.is_a?(ErrorResponse)
|
175
174
|
emit(:response, request, response)
|
176
175
|
|
177
176
|
if @parser.upgrade?
|
@@ -362,7 +361,7 @@ module HTTPX
|
|
362
361
|
|
363
362
|
while (chunk = request.drain_body)
|
364
363
|
log(color: :green) { "<- DATA: #{chunk.bytesize} bytes..." }
|
365
|
-
log(level: 2, color: :green) { "<- #{chunk.inspect}" }
|
364
|
+
log(level: 2, color: :green) { "<- #{log_redact(chunk.inspect)}" }
|
366
365
|
@buffer << chunk
|
367
366
|
throw(:buffer_full, request) if @buffer.full?
|
368
367
|
end
|
@@ -382,9 +381,9 @@ module HTTPX
|
|
382
381
|
|
383
382
|
def join_headers2(headers)
|
384
383
|
headers.each do |field, value|
|
385
|
-
|
386
|
-
log(color: :yellow) { "<- HEADER: #{
|
387
|
-
@buffer <<
|
384
|
+
field = capitalized(field)
|
385
|
+
log(color: :yellow) { "<- HEADER: #{[field, log_redact(value)].join(": ")}" }
|
386
|
+
@buffer << "#{field}: #{value}#{CRLF}"
|
388
387
|
end
|
389
388
|
end
|
390
389
|
|
@@ -11,8 +11,8 @@ module HTTPX
|
|
11
11
|
MAX_CONCURRENT_REQUESTS = ::HTTP2::DEFAULT_MAX_CONCURRENT_STREAMS
|
12
12
|
|
13
13
|
class Error < Error
|
14
|
-
def initialize(id,
|
15
|
-
super("stream #{id} closed with error: #{
|
14
|
+
def initialize(id, error)
|
15
|
+
super("stream #{id} closed with error: #{error}")
|
16
16
|
end
|
17
17
|
end
|
18
18
|
|
@@ -98,12 +98,6 @@ module HTTPX
|
|
98
98
|
@connection << data
|
99
99
|
end
|
100
100
|
|
101
|
-
def can_buffer_more_requests?
|
102
|
-
(@handshake_completed || !@wait_for_handshake) &&
|
103
|
-
@streams.size < @max_concurrent_requests &&
|
104
|
-
@streams.size < @max_requests
|
105
|
-
end
|
106
|
-
|
107
101
|
def send(request, head = false)
|
108
102
|
unless can_buffer_more_requests?
|
109
103
|
head ? @pending.unshift(request) : @pending << request
|
@@ -124,7 +118,7 @@ module HTTPX
|
|
124
118
|
|
125
119
|
def consume
|
126
120
|
@streams.each do |request, stream|
|
127
|
-
next
|
121
|
+
next unless request.can_buffer?
|
128
122
|
|
129
123
|
handle(request, stream)
|
130
124
|
end
|
@@ -152,13 +146,19 @@ module HTTPX
|
|
152
146
|
|
153
147
|
def ping
|
154
148
|
ping = SecureRandom.gen_random(8)
|
155
|
-
@connection.ping(ping)
|
149
|
+
@connection.ping(ping.dup)
|
156
150
|
ensure
|
157
151
|
@pings << ping
|
158
152
|
end
|
159
153
|
|
160
154
|
private
|
161
155
|
|
156
|
+
def can_buffer_more_requests?
|
157
|
+
(@handshake_completed || !@wait_for_handshake) &&
|
158
|
+
@streams.size < @max_concurrent_requests &&
|
159
|
+
@streams.size < @max_requests
|
160
|
+
end
|
161
|
+
|
162
162
|
def send_pending
|
163
163
|
while (request = @pending.shift)
|
164
164
|
break unless send(request, true)
|
@@ -224,12 +224,12 @@ module HTTPX
|
|
224
224
|
extra_headers = set_protocol_headers(request)
|
225
225
|
|
226
226
|
if request.headers.key?("host")
|
227
|
-
log { "forbidden \"host\" header found (#{request.headers["host"]}), will use it as authority..." }
|
227
|
+
log { "forbidden \"host\" header found (#{log_redact(request.headers["host"])}), will use it as authority..." }
|
228
228
|
extra_headers[":authority"] = request.headers["host"]
|
229
229
|
end
|
230
230
|
|
231
231
|
log(level: 1, color: :yellow) do
|
232
|
-
request.headers.merge(extra_headers).each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{v}" }.join("\n")
|
232
|
+
request.headers.merge(extra_headers).each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
233
233
|
end
|
234
234
|
stream.headers(request.headers.each(extra_headers), end_stream: request.body.empty?)
|
235
235
|
end
|
@@ -241,7 +241,7 @@ module HTTPX
|
|
241
241
|
end
|
242
242
|
|
243
243
|
log(level: 1, color: :yellow) do
|
244
|
-
request.trailers.each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{v}" }.join("\n")
|
244
|
+
request.trailers.each.map { |k, v| "#{stream.id}: -> HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
245
245
|
end
|
246
246
|
stream.headers(request.trailers.each, end_stream: true)
|
247
247
|
end
|
@@ -252,13 +252,13 @@ module HTTPX
|
|
252
252
|
chunk = @drains.delete(request) || request.drain_body
|
253
253
|
while chunk
|
254
254
|
next_chunk = request.drain_body
|
255
|
-
|
256
|
-
|
257
|
-
stream.data(chunk, end_stream: !(next_chunk || request.trailers? || request.callbacks_for?(:trailers)))
|
255
|
+
send_chunk(request, stream, chunk, next_chunk)
|
256
|
+
|
258
257
|
if next_chunk && (@buffer.full? || request.body.unbounded_body?)
|
259
258
|
@drains[request] = next_chunk
|
260
259
|
throw(:buffer_full)
|
261
260
|
end
|
261
|
+
|
262
262
|
chunk = next_chunk
|
263
263
|
end
|
264
264
|
|
@@ -267,6 +267,16 @@ module HTTPX
|
|
267
267
|
on_stream_refuse(stream, request, error)
|
268
268
|
end
|
269
269
|
|
270
|
+
def send_chunk(request, stream, chunk, next_chunk)
|
271
|
+
log(level: 1, color: :green) { "#{stream.id}: -> DATA: #{chunk.bytesize} bytes..." }
|
272
|
+
log(level: 2, color: :green) { "#{stream.id}: -> #{log_redact(chunk.inspect)}" }
|
273
|
+
stream.data(chunk, end_stream: end_stream?(request, next_chunk))
|
274
|
+
end
|
275
|
+
|
276
|
+
def end_stream?(request, next_chunk)
|
277
|
+
!(next_chunk || request.trailers? || request.callbacks_for?(:trailers))
|
278
|
+
end
|
279
|
+
|
270
280
|
######
|
271
281
|
# HTTP/2 Callbacks
|
272
282
|
######
|
@@ -280,7 +290,7 @@ module HTTPX
|
|
280
290
|
end
|
281
291
|
|
282
292
|
log(color: :yellow) do
|
283
|
-
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{v}" }.join("\n")
|
293
|
+
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
284
294
|
end
|
285
295
|
_, status = h.shift
|
286
296
|
headers = request.options.headers_class.new(h)
|
@@ -293,14 +303,14 @@ module HTTPX
|
|
293
303
|
|
294
304
|
def on_stream_trailers(stream, response, h)
|
295
305
|
log(color: :yellow) do
|
296
|
-
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{v}" }.join("\n")
|
306
|
+
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{log_redact(v)}" }.join("\n")
|
297
307
|
end
|
298
308
|
response.merge_headers(h)
|
299
309
|
end
|
300
310
|
|
301
311
|
def on_stream_data(stream, request, data)
|
302
312
|
log(level: 1, color: :green) { "#{stream.id}: <- DATA: #{data.bytesize} bytes..." }
|
303
|
-
log(level: 2, color: :green) { "#{stream.id}: <- #{data.inspect}" }
|
313
|
+
log(level: 2, color: :green) { "#{stream.id}: <- #{log_redact(data.inspect)}" }
|
304
314
|
request.response << data
|
305
315
|
end
|
306
316
|
|
@@ -388,8 +398,15 @@ module HTTPX
|
|
388
398
|
def on_frame_sent(frame)
|
389
399
|
log(level: 2) { "#{frame[:stream]}: frame was sent!" }
|
390
400
|
log(level: 2, color: :blue) do
|
391
|
-
payload =
|
392
|
-
|
401
|
+
payload =
|
402
|
+
case frame[:type]
|
403
|
+
when :data
|
404
|
+
frame.merge(payload: frame[:payload].bytesize)
|
405
|
+
when :headers, :ping
|
406
|
+
frame.merge(payload: log_redact(frame[:payload]))
|
407
|
+
else
|
408
|
+
frame
|
409
|
+
end
|
393
410
|
"#{frame[:stream]}: #{payload}"
|
394
411
|
end
|
395
412
|
end
|
@@ -397,15 +414,22 @@ module HTTPX
|
|
397
414
|
def on_frame_received(frame)
|
398
415
|
log(level: 2) { "#{frame[:stream]}: frame was received!" }
|
399
416
|
log(level: 2, color: :magenta) do
|
400
|
-
payload =
|
401
|
-
|
417
|
+
payload =
|
418
|
+
case frame[:type]
|
419
|
+
when :data
|
420
|
+
frame.merge(payload: frame[:payload].bytesize)
|
421
|
+
when :headers, :ping
|
422
|
+
frame.merge(payload: log_redact(frame[:payload]))
|
423
|
+
else
|
424
|
+
frame
|
425
|
+
end
|
402
426
|
"#{frame[:stream]}: #{payload}"
|
403
427
|
end
|
404
428
|
end
|
405
429
|
|
406
430
|
def on_altsvc(origin, frame)
|
407
431
|
log(level: 2) { "#{frame[:stream]}: altsvc frame was received" }
|
408
|
-
log(level: 2) { "#{frame[:stream]}: #{frame.inspect}" }
|
432
|
+
log(level: 2) { "#{frame[:stream]}: #{log_redact(frame.inspect)}" }
|
409
433
|
alt_origin = URI.parse("#{frame[:proto]}://#{frame[:host]}:#{frame[:port]}")
|
410
434
|
params = { "ma" => frame[:max_age] }
|
411
435
|
emit(:altsvc, origin, alt_origin, origin, params)
|
data/lib/httpx/connection.rb
CHANGED
@@ -152,6 +152,14 @@ module HTTPX
|
|
152
152
|
) && @options == connection.options
|
153
153
|
end
|
154
154
|
|
155
|
+
# coalesces +self+ into +connection+.
|
156
|
+
def coalesce!(connection)
|
157
|
+
@coalesced_connection = connection
|
158
|
+
|
159
|
+
close_sibling
|
160
|
+
connection.merge(self)
|
161
|
+
end
|
162
|
+
|
155
163
|
# coalescable connections need to be mergeable!
|
156
164
|
# but internally, #mergeable? is called before #coalescable?
|
157
165
|
def coalescable?(connection)
|
@@ -251,6 +259,7 @@ module HTTPX
|
|
251
259
|
end
|
252
260
|
nil
|
253
261
|
rescue StandardError => e
|
262
|
+
@write_buffer.clear
|
254
263
|
emit(:error, e)
|
255
264
|
raise e
|
256
265
|
end
|
@@ -262,7 +271,13 @@ module HTTPX
|
|
262
271
|
end
|
263
272
|
|
264
273
|
def terminate
|
265
|
-
|
274
|
+
case @state
|
275
|
+
when :idle
|
276
|
+
purge_after_closed
|
277
|
+
emit(:terminate)
|
278
|
+
when :closed
|
279
|
+
@connected_at = nil
|
280
|
+
end
|
266
281
|
|
267
282
|
close
|
268
283
|
end
|
@@ -341,13 +356,6 @@ module HTTPX
|
|
341
356
|
on_error(error)
|
342
357
|
end
|
343
358
|
|
344
|
-
def coalesced_connection=(connection)
|
345
|
-
@coalesced_connection = connection
|
346
|
-
|
347
|
-
close_sibling
|
348
|
-
connection.merge(self)
|
349
|
-
end
|
350
|
-
|
351
359
|
def sibling=(connection)
|
352
360
|
@sibling = connection
|
353
361
|
|
@@ -378,6 +386,16 @@ module HTTPX
|
|
378
386
|
@current_selector = nil
|
379
387
|
end
|
380
388
|
|
389
|
+
# :nocov:
|
390
|
+
def inspect
|
391
|
+
"#<#{self.class}:#{object_id} " \
|
392
|
+
"@origin=#{@origin} " \
|
393
|
+
"@state=#{@state} " \
|
394
|
+
"@pending=#{@pending.size} " \
|
395
|
+
"@io=#{@io}>"
|
396
|
+
end
|
397
|
+
# :nocov:
|
398
|
+
|
381
399
|
private
|
382
400
|
|
383
401
|
def connect
|
@@ -528,17 +546,17 @@ module HTTPX
|
|
528
546
|
def send_request_to_parser(request)
|
529
547
|
@inflight += 1
|
530
548
|
request.peer_address = @io.ip
|
531
|
-
parser.send(request)
|
532
|
-
|
533
549
|
set_request_timeouts(request)
|
534
550
|
|
551
|
+
parser.send(request)
|
552
|
+
|
535
553
|
return unless @state == :inactive
|
536
554
|
|
537
555
|
transition(:active)
|
538
556
|
end
|
539
557
|
|
540
558
|
def build_parser(protocol = @io.protocol)
|
541
|
-
parser =
|
559
|
+
parser = parser_type(protocol).new(@write_buffer, @options)
|
542
560
|
set_parser_callbacks(parser)
|
543
561
|
parser
|
544
562
|
end
|
@@ -550,6 +568,7 @@ module HTTPX
|
|
550
568
|
end
|
551
569
|
@response_received_at = Utils.now
|
552
570
|
@inflight -= 1
|
571
|
+
response.finish!
|
553
572
|
request.emit(:response, response)
|
554
573
|
end
|
555
574
|
parser.on(:altsvc) do |alt_origin, origin, alt_params|
|
@@ -922,14 +941,12 @@ module HTTPX
|
|
922
941
|
end
|
923
942
|
end
|
924
943
|
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
raise Error, "unsupported protocol (##{protocol})"
|
932
|
-
end
|
944
|
+
def parser_type(protocol)
|
945
|
+
case protocol
|
946
|
+
when "h2" then HTTP2
|
947
|
+
when "http/1.1" then HTTP1
|
948
|
+
else
|
949
|
+
raise Error, "unsupported protocol (##{protocol})"
|
933
950
|
end
|
934
951
|
end
|
935
952
|
end
|
data/lib/httpx/errors.rb
CHANGED
@@ -29,17 +29,8 @@ module HTTPX
|
|
29
29
|
end
|
30
30
|
end
|
31
31
|
|
32
|
-
# Raise when it can't acquire a connection
|
33
|
-
class PoolTimeoutError < TimeoutError
|
34
|
-
attr_reader :origin
|
35
|
-
|
36
|
-
# initializes the +origin+ it refers to, and the
|
37
|
-
# +timeout+ causing the error.
|
38
|
-
def initialize(origin, timeout)
|
39
|
-
@origin = origin
|
40
|
-
super(timeout, "Timed out after #{timeout} seconds while waiting for a connection to #{origin}")
|
41
|
-
end
|
42
|
-
end
|
32
|
+
# Raise when it can't acquire a connection from the pool.
|
33
|
+
class PoolTimeoutError < TimeoutError; end
|
43
34
|
|
44
35
|
# Error raised when there was a timeout establishing the connection to a server.
|
45
36
|
# This may be raised due to timeouts during TCP and TLS (when applicable) connection
|