httpx 1.7.2 → 1.7.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +3 -1
- data/doc/release_notes/1_7_3.md +29 -0
- data/doc/release_notes/1_7_4.md +42 -0
- data/doc/release_notes/1_7_5.md +10 -0
- data/doc/release_notes/1_7_6.md +24 -0
- data/lib/httpx/adapters/datadog.rb +37 -64
- data/lib/httpx/adapters/webmock.rb +3 -4
- data/lib/httpx/altsvc.rb +4 -2
- data/lib/httpx/connection/http1.rb +26 -18
- data/lib/httpx/connection/http2.rb +53 -33
- data/lib/httpx/connection.rb +152 -63
- data/lib/httpx/io/ssl.rb +20 -8
- data/lib/httpx/io/tcp.rb +18 -12
- data/lib/httpx/io/unix.rb +13 -9
- data/lib/httpx/options.rb +23 -7
- data/lib/httpx/parser/http1.rb +14 -4
- data/lib/httpx/plugins/auth/digest.rb +2 -1
- data/lib/httpx/plugins/auth.rb +23 -9
- data/lib/httpx/plugins/brotli.rb +33 -5
- data/lib/httpx/plugins/cookies/cookie.rb +34 -11
- data/lib/httpx/plugins/cookies/jar.rb +93 -18
- data/lib/httpx/plugins/cookies.rb +7 -3
- data/lib/httpx/plugins/expect.rb +33 -3
- data/lib/httpx/plugins/fiber_concurrency.rb +2 -4
- data/lib/httpx/plugins/follow_redirects.rb +7 -1
- data/lib/httpx/plugins/h2c.rb +1 -1
- data/lib/httpx/plugins/proxy/http.rb +15 -8
- data/lib/httpx/plugins/proxy.rb +10 -2
- data/lib/httpx/plugins/rate_limiter.rb +19 -19
- data/lib/httpx/plugins/retries.rb +17 -9
- data/lib/httpx/plugins/ssrf_filter.rb +1 -0
- data/lib/httpx/plugins/stream_bidi.rb +6 -0
- data/lib/httpx/plugins/tracing.rb +137 -0
- data/lib/httpx/pool.rb +7 -9
- data/lib/httpx/request.rb +15 -3
- data/lib/httpx/resolver/multi.rb +1 -8
- data/lib/httpx/resolver/native.rb +2 -2
- data/lib/httpx/resolver/resolver.rb +21 -2
- data/lib/httpx/resolver/system.rb +3 -1
- data/lib/httpx/response.rb +5 -1
- data/lib/httpx/selector.rb +19 -16
- data/lib/httpx/session.rb +34 -44
- data/lib/httpx/timers.rb +4 -0
- data/lib/httpx/version.rb +1 -1
- data/sig/altsvc.rbs +2 -0
- data/sig/chainable.rbs +2 -1
- data/sig/connection/http1.rbs +3 -1
- data/sig/connection/http2.rbs +11 -4
- data/sig/connection.rbs +16 -2
- data/sig/io/ssl.rbs +1 -0
- data/sig/io/tcp.rbs +2 -2
- data/sig/options.rbs +8 -3
- data/sig/parser/http1.rbs +1 -1
- data/sig/plugins/auth.rbs +5 -2
- data/sig/plugins/brotli.rbs +11 -6
- data/sig/plugins/cookies/cookie.rbs +3 -2
- data/sig/plugins/cookies/jar.rbs +11 -0
- data/sig/plugins/cookies.rbs +2 -0
- data/sig/plugins/expect.rbs +21 -2
- data/sig/plugins/fiber_concurrency.rbs +2 -2
- data/sig/plugins/proxy/socks4.rbs +4 -0
- data/sig/plugins/rate_limiter.rbs +2 -2
- data/sig/plugins/response_cache.rbs +3 -3
- data/sig/plugins/retries.rbs +17 -13
- data/sig/plugins/tracing.rbs +41 -0
- data/sig/pool.rbs +1 -1
- data/sig/request.rbs +4 -0
- data/sig/resolver/native.rbs +2 -0
- data/sig/resolver/resolver.rbs +4 -2
- data/sig/resolver/system.rbs +0 -2
- data/sig/response/body.rbs +1 -1
- data/sig/selector.rbs +7 -2
- data/sig/session.rbs +2 -0
- data/sig/timers.rbs +2 -0
- data/sig/transcoder/gzip.rbs +1 -1
- data/sig/transcoder.rbs +0 -2
- metadata +13 -3
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: b34abe93b42862837d9131524d6e9663a93474f70c65ac2a10946eb62c9aeb14
|
|
4
|
+
data.tar.gz: '0853b845878005ace796106f79b735d9d75e205b7ab28c03a3786b53233d6a4b'
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 0be425ba5468990b31fb3fe04b97ee8c727cc1f16690decc2057314c3645e5baa21fae9b54585bc6e628410374dbfd9b8676df3aaed180b321a9b59cbbad2853
|
|
7
|
+
data.tar.gz: 3cbe9b0e2952c7d330bffe727f2b478f53152d431a306d48f5f41cb5033e9ba7fa2ddec9a319aed49f051c8e9e6e2d8e489ecccc83a6c622246ef6922c9bbecc
|
data/README.md
CHANGED
|
@@ -46,7 +46,9 @@ And that's the simplest one there is. But you can also do:
|
|
|
46
46
|
HTTPX.post("http://example.com", form: { user: "john", password: "pass" })
|
|
47
47
|
|
|
48
48
|
http = HTTPX.with(headers: { "x-my-name" => "joe" })
|
|
49
|
-
|
|
49
|
+
File.open("path/to/file") do |file|
|
|
50
|
+
http.patch("http://example.com/file", body: file) # request body is streamed
|
|
51
|
+
end
|
|
50
52
|
```
|
|
51
53
|
|
|
52
54
|
If you want to do some more things with the response, you can get an `HTTPX::Response`:
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# 1.7.3
|
|
2
|
+
|
|
3
|
+
## Improvements
|
|
4
|
+
|
|
5
|
+
### cookies plugin: Jar as CookieStore
|
|
6
|
+
|
|
7
|
+
While previously an implementation detail, the cookie jar from a `:cookie` plugin-enabled session can now be manipulated by the end user:
|
|
8
|
+
|
|
9
|
+
```ruby
|
|
10
|
+
cookies_sess = HTTPX.plugin(:cookies)
|
|
11
|
+
|
|
12
|
+
jar = cookies.make_jar
|
|
13
|
+
|
|
14
|
+
sess = cookies_ses.with(cookies: jar)
|
|
15
|
+
|
|
16
|
+
# perform requests using sess, get/set/delete cookies in jar
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
The jar API now closely follows the [Web Cookie Store API](https://developer.mozilla.org/en-US/docs/Web/API/CookieStore), by providing the same set of functions.
|
|
20
|
+
|
|
21
|
+
Some API backwards compatibility is maintained, however since this was an internal implementation detail, this effort isn't meant to be thorough.
|
|
22
|
+
|
|
23
|
+
## Bugfixes
|
|
24
|
+
|
|
25
|
+
* `http-2`: clear buffered data chunks when receiving a `GOAWAY` stream frame; without this, the client kept sending the corresponding `DATA` frames, despite the peer server making it known that it wouldn't process it. While this is valid HTTP/2, this could increase the connection window until a point where it'd go over the max frame size. this issue was observed during large file uploads where the first request could fail and make the client renegotiate.
|
|
26
|
+
* `webmock` adapter: fixed response body length accounting which was making `response.body.empty?` return true for responses with payload.
|
|
27
|
+
* `:rate_limiter` plugin relies on an internal refactoring to be able to wait for the time suggested by the peer server instead of the potentially relying on custom user logic via own `:retry_after`.
|
|
28
|
+
* `:fiber_concurrency`: fix wrong names for native/system resolver overrides.
|
|
29
|
+
* connection: fix for race condition when closing the connection, where the state only transitions to `closed` after checking the connection back in to the pool, potentially corrupting it if another session meanwhile has picked it up and manipulated it.
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# 1.7.4
|
|
2
|
+
|
|
3
|
+
## Features
|
|
4
|
+
|
|
5
|
+
### Tracing plugin
|
|
6
|
+
|
|
7
|
+
A new `:tracing` plugin was introduced. It adds support for a new option, `:tracer`, which accepts an object which responds to the following callbacks:
|
|
8
|
+
|
|
9
|
+
* `#enabled?(request)` - should return true or false depending on whether tracing is enabled
|
|
10
|
+
* `#start(request)` - called when a request is about to be sent
|
|
11
|
+
* `#finish(request, response)` - called when a response is received
|
|
12
|
+
* `#reset(request)` - called when a request is being prepared to be resent, in cases where it makes sense (i.e. when a request is retried).
|
|
13
|
+
|
|
14
|
+
You can pass chain several tracers, and callbacks will be relayed to all of them:
|
|
15
|
+
|
|
16
|
+
```ruby
|
|
17
|
+
HTTP.plugin(:tracing).with(tracer: telemetry_platform_tracer).with(tracer: telemetry2_platform_tracer)
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
This was developed to be the foundation on top of which the datadog and OTel integrations will be built.
|
|
21
|
+
|
|
22
|
+
## Improvements
|
|
23
|
+
|
|
24
|
+
* try fetching response immediately after send the request to the connection; this allows returning from errors much earlier and bug free than doing another round of waits on I/O.
|
|
25
|
+
* when a connection is reconnected, and it was established the first time that the peer can accept only 1 request at a time, the connection will keep that informaation and keep sending requests 1 at a time afterwards.
|
|
26
|
+
|
|
27
|
+
## Bugfixes
|
|
28
|
+
|
|
29
|
+
* fix regression from introducing connection post state transition callbacks, by foregoing disconnect when there's pending backlog.
|
|
30
|
+
* transition requests to `:idle` before routing them to a different connection on merge (this could possibly leave dangling timeout callbacks otherwise).
|
|
31
|
+
* `:brotli` plugin was integrated with the stream writer component which allows writing compressed payload in chunks.
|
|
32
|
+
* `:brotli` plugin integrates with the `brotli` gem v0.8.0, which fixed an issue dealing with large payload responses due to the lack of support for decoding payloads in chunks.
|
|
33
|
+
* http1 parser: reset before early returning on `Upgrade` responses (it was left in an invalid "parsing headers", which in the case of a keep-alive connection, would cause the next request to fail being parsed).
|
|
34
|
+
* `datadog` adapter: fixed initialization of the request start time after connections were opened (it was being set to connection initialization time every time, instead of just on the first request before connection is established).
|
|
35
|
+
* parsers: also reroute non-completed in-flight requests back to the connection so they can be retried (previously, only pending requests were).
|
|
36
|
+
* `:proxy` plugin: do not try disconnecting unnecessarily when resetting may already do so (if conditions apply).
|
|
37
|
+
* `:proxy` plugin: removed call to unexisting `#reset!` function.
|
|
38
|
+
* `:proxy` plugin: also close wrapped sockets.
|
|
39
|
+
* connection: on force_close, move connection disconnection logic below so that, if requests are reenqueued from the parser, this can be halted.
|
|
40
|
+
* connection: when transition to `:idle`, reenqueue requests from parser before resetting it.
|
|
41
|
+
* implement `#lazy_resolve` on resolvers, as when they're picked from the selector (instead of from the pool), they may not be wrapped by a Multi proxy.
|
|
42
|
+
* allow resolvers transitioning from `:idle` to `:closed` and forego disconnecting when the resolver is not able to transition to `:closed` (which protects from a possible fiber scheduler context switch which changed the state under the hood).
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# 1.7.5
|
|
2
|
+
|
|
3
|
+
## Improvements
|
|
4
|
+
|
|
5
|
+
* `:tracing` plugin: make `Request#init_time` a UTC timestamp.
|
|
6
|
+
|
|
7
|
+
## Bugfixes
|
|
8
|
+
|
|
9
|
+
* fixed handling of conditional responses which was making a batch of concurrent requests being handled serially after they failed.
|
|
10
|
+
* `datadog` adapter: use `Request#init_time` as the span start time, which will fix the bug where the span wasn't including the time it takes to open the connection (TCP/TLS handhshakes).
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# 1.7.6
|
|
2
|
+
|
|
3
|
+
## Improvements
|
|
4
|
+
|
|
5
|
+
* `datadog` adapter: support setting custom `peer.service`.
|
|
6
|
+
* stopped doing `Thread.pass` after checking connections back into the pool. The goal of this was to allow threads potentially waiting on a connection for the same origin to have the opportunity to take it before the same thread could check the same connection out, thereby preventing starvation on limited pool sizes; this however backfired for the common case of unbounded pool sizes (the default), and was specially bad when used for multiple concurrent requests on multiple origins, where avoidable lag was introduced on the whole processing every time a connection was checked back in. Thereby, the initial problem needs to be solved at the mutex implementation of CRuby.
|
|
7
|
+
* connection: after keep alive timeout expired, try writing `PING` frame to the socket as soon as it's emitted (previously, the frame was being buffered and the socket would be probed for readiness before writing it, whereas in most cases this is avoidable).
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
## Bugfixes
|
|
11
|
+
|
|
12
|
+
* reuse the same resolver instance when resolving multiple concurrent requests in the same option (it was previously instantiating different ones based on option heuristics, such as when requests had different `:origin`).
|
|
13
|
+
* on successful early name resolution (from cache or hosts file, for example), check the resolver back to the pool (the object was being dereferenced).
|
|
14
|
+
* http1 parser: turn buffer back into a string on reset, instead of preserving its current type (in cases where it was a chunked transfer decoder and connection was kept-alive, it was keeping it around and using it to parse the next request, which broke).
|
|
15
|
+
* http1 parser: enable pipelining based on the max concurrent requests, which the connection may have had pre-set to 1 based on prior knowledge of origin or other heuristics.
|
|
16
|
+
* http1 parser: when disabling pipelining, mark incomplete inlined requests as idle before sending them back to the pending queue.
|
|
17
|
+
* http1 and http2 parser: remove references to requests escalating an error; in scenarios of connection reuse (such as when using the `:persistent` plugin), these requests could be, on a transition to idle state, resent even in the absence of an external session reference, causing avoidable overhead or contention leading to timeouts or errors.
|
|
18
|
+
* connection: rescue and ignore exceptions when calling `OpenSSL::SSL::SSLSocket#close` or `Socket#close`.
|
|
19
|
+
* connection: when sending multiple requests into an open HTTP/2 connection which keep alive may have expired, only a single (instead of multiple, as before) `PING` frame will be sent (in the prior state, only the first `PING` was being acknowledged, which made the connection accumulate unacknowledged ping payloads).
|
|
20
|
+
* connection: on handling errors, preventing potential race condition by copying pending requests into a local var before calling `parser.handle_error` (this may trigger a callback which calls `#reset`, which may call `#disconnect`, which would check the connection back into the pool and make it available for another thread, which could change the state of the ivar containing pending requests).
|
|
21
|
+
* connection: skip resetting a connection when idle and having pending requests; this may happen in situation where parsers may have reset-and-back-to-idle the connection to resend failed requests as protocol (such as in the case of failed HTTP/1 pipelinining requests where incomplete requests are to be resent to the connection once pipelining is disabled).
|
|
22
|
+
* connection: raise request timeout errors for the request the connection is currently in, instead of the connection where it was initially sent on and set timeouts on; this happens in situations where the request may be retried and sent to a different connection, which forces the request to keep a reference to the connection it's currently in (and discard it when it no longer needs it to prevent it from being GC'ed).
|
|
23
|
+
* `:auth` plugin: when used alongside the `:retries` plugin for multiple concurrent requests, it was calling the dynamic token generator block once for each request; fixed to call the block only once per retry window, so retrying requests will reuse the same.
|
|
24
|
+
* `:retries` plugin: retry request immediately if `:retry_after` is negative (while the option is validated for numbers, it can't control proc-based calculation).
|
|
@@ -19,6 +19,7 @@ module Datadog::Tracing
|
|
|
19
19
|
Datadog::Tracing::Contrib::Ext::Metadata::TAG_BASE_SERVICE
|
|
20
20
|
end
|
|
21
21
|
TAG_PEER_HOSTNAME = Datadog::Tracing::Metadata::Ext::TAG_PEER_HOSTNAME
|
|
22
|
+
TAG_PEER_SERVICE = Datadog::Tracing::Metadata::Ext::TAG_PEER_SERVICE
|
|
22
23
|
|
|
23
24
|
TAG_KIND = Datadog::Tracing::Metadata::Ext::TAG_KIND
|
|
24
25
|
TAG_CLIENT = Datadog::Tracing::Metadata::Ext::SpanKind::TAG_CLIENT
|
|
@@ -46,35 +47,25 @@ module Datadog::Tracing
|
|
|
46
47
|
|
|
47
48
|
SPAN_REQUEST = "httpx.request"
|
|
48
49
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
span = nil
|
|
50
|
+
def enabled?(request)
|
|
51
|
+
configuration(request).enabled
|
|
52
|
+
end
|
|
54
53
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
# while a new is required for the latter.
|
|
59
|
-
request.on(:idle) do
|
|
60
|
-
span = nil
|
|
61
|
-
end
|
|
62
|
-
# the span is initialized when the request is buffered in the parser, which is the closest
|
|
63
|
-
# one gets to actually sending the request.
|
|
64
|
-
request.on(:headers) do
|
|
65
|
-
next if span
|
|
54
|
+
def start(request)
|
|
55
|
+
request.datadog_span = initialize_span(request, request.init_time)
|
|
56
|
+
end
|
|
66
57
|
|
|
67
|
-
|
|
68
|
-
|
|
58
|
+
def reset(request)
|
|
59
|
+
request.datadog_span = nil
|
|
60
|
+
end
|
|
69
61
|
|
|
70
|
-
|
|
71
|
-
|
|
62
|
+
def finish(request, response)
|
|
63
|
+
request.datadog_span ||= initialize_span(request, request.init_time) if request.init_time
|
|
72
64
|
|
|
73
|
-
|
|
74
|
-
end
|
|
65
|
+
finish_span(response, request.datadog_span)
|
|
75
66
|
end
|
|
76
67
|
|
|
77
|
-
def
|
|
68
|
+
def finish_span(response, span)
|
|
78
69
|
if response.is_a?(::HTTPX::ErrorResponse)
|
|
79
70
|
span.set_error(response.error)
|
|
80
71
|
else
|
|
@@ -118,7 +109,9 @@ module Datadog::Tracing
|
|
|
118
109
|
span.set_tag(TAG_PEER_HOSTNAME, uri.host)
|
|
119
110
|
|
|
120
111
|
# Tag as an external peer service
|
|
121
|
-
|
|
112
|
+
if (peer_service = config[:peer_service])
|
|
113
|
+
span.set_tag(TAG_PEER_SERVICE, peer_service)
|
|
114
|
+
end
|
|
122
115
|
|
|
123
116
|
if config[:distributed_tracing]
|
|
124
117
|
propagate_trace_http(
|
|
@@ -137,13 +130,9 @@ module Datadog::Tracing
|
|
|
137
130
|
) if Datadog.configuration.tracing.respond_to?(:header_tags)
|
|
138
131
|
|
|
139
132
|
span
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
end
|
|
144
|
-
|
|
145
|
-
def now
|
|
146
|
-
::Datadog::Core::Utils::Time.now.utc
|
|
133
|
+
rescue StandardError => e
|
|
134
|
+
Datadog.logger.error("error preparing span for http request: #{e}")
|
|
135
|
+
Datadog.logger.error(e.backtrace)
|
|
147
136
|
end
|
|
148
137
|
|
|
149
138
|
def configuration(request)
|
|
@@ -179,44 +168,18 @@ module Datadog::Tracing
|
|
|
179
168
|
end
|
|
180
169
|
end
|
|
181
170
|
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
# intercepts request initialization to inject the tracing logic.
|
|
186
|
-
def initialize(*)
|
|
187
|
-
super
|
|
188
|
-
|
|
189
|
-
@init_time = nil
|
|
190
|
-
|
|
191
|
-
return unless Datadog::Tracing.enabled?
|
|
192
|
-
|
|
193
|
-
RequestTracer.call(self)
|
|
171
|
+
class << self
|
|
172
|
+
def load_dependencies(klass)
|
|
173
|
+
klass.plugin(:tracing)
|
|
194
174
|
end
|
|
195
175
|
|
|
196
|
-
def
|
|
197
|
-
|
|
198
|
-
# However, there are situations where connection initialization fails.
|
|
199
|
-
# Example is the :ssrf_filter plugin, which raises an error on
|
|
200
|
-
# initialize if the host is an IP which matches against the known set.
|
|
201
|
-
# in such cases, we'll just set here right here.
|
|
202
|
-
@init_time ||= ::Datadog::Core::Utils::Time.now.utc
|
|
203
|
-
|
|
204
|
-
super
|
|
176
|
+
def extra_options(options)
|
|
177
|
+
options.merge(tracer: RequestTracer)
|
|
205
178
|
end
|
|
206
179
|
end
|
|
207
180
|
|
|
208
|
-
module
|
|
209
|
-
|
|
210
|
-
super
|
|
211
|
-
|
|
212
|
-
@init_time = ::Datadog::Core::Utils::Time.now.utc
|
|
213
|
-
end
|
|
214
|
-
|
|
215
|
-
def send(request)
|
|
216
|
-
request.init_time ||= @init_time
|
|
217
|
-
|
|
218
|
-
super
|
|
219
|
-
end
|
|
181
|
+
module RequestMethods
|
|
182
|
+
attr_accessor :datadog_span
|
|
220
183
|
end
|
|
221
184
|
end
|
|
222
185
|
|
|
@@ -250,6 +213,11 @@ module Datadog::Tracing
|
|
|
250
213
|
o.env "DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE"
|
|
251
214
|
o.default 1.0
|
|
252
215
|
end
|
|
216
|
+
|
|
217
|
+
option :peer_service do |o|
|
|
218
|
+
o.type :string, nilable: true
|
|
219
|
+
o.env "DD_TRACE_HTTPX_PEER_SERVICE"
|
|
220
|
+
end
|
|
253
221
|
else
|
|
254
222
|
option :enabled do |o|
|
|
255
223
|
o.default { env_to_bool("DD_TRACE_HTTPX_ENABLED", true) }
|
|
@@ -265,6 +233,11 @@ module Datadog::Tracing
|
|
|
265
233
|
o.default { env_to_float(%w[DD_TRACE_HTTPX_ANALYTICS_SAMPLE_RATE DD_HTTPX_ANALYTICS_SAMPLE_RATE], 1.0) }
|
|
266
234
|
o.lazy
|
|
267
235
|
end
|
|
236
|
+
|
|
237
|
+
option :peer_service do |o|
|
|
238
|
+
o.default { env_to_string("DD_TRACE_HTTPX_PEER_SERVICE", nil) }
|
|
239
|
+
o.lazy
|
|
240
|
+
end
|
|
268
241
|
end
|
|
269
242
|
|
|
270
243
|
if defined?(Datadog::Tracing::Contrib::SpanAttributeSchema)
|
|
@@ -82,6 +82,7 @@ module WebMock
|
|
|
82
82
|
|
|
83
83
|
def mock!
|
|
84
84
|
@mocked = true
|
|
85
|
+
@body.mock!
|
|
85
86
|
end
|
|
86
87
|
|
|
87
88
|
def mocked?
|
|
@@ -90,10 +91,8 @@ module WebMock
|
|
|
90
91
|
end
|
|
91
92
|
|
|
92
93
|
module ResponseBodyMethods
|
|
93
|
-
def
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
super
|
|
94
|
+
def mock!
|
|
95
|
+
@inflaters = nil
|
|
97
96
|
end
|
|
98
97
|
end
|
|
99
98
|
|
data/lib/httpx/altsvc.rb
CHANGED
|
@@ -10,6 +10,8 @@ module HTTPX
|
|
|
10
10
|
|
|
11
11
|
H2_ALTSVC_SCHEMES = %w[https h2].freeze
|
|
12
12
|
|
|
13
|
+
ALTSVC_IGNORE_IVARS = %i[@ssl].freeze
|
|
14
|
+
|
|
13
15
|
def send(request)
|
|
14
16
|
request.headers["alt-used"] = @origin.authority if @parser && !@write_buffer.full? && match_altsvcs?(request.uri)
|
|
15
17
|
|
|
@@ -35,11 +37,11 @@ module HTTPX
|
|
|
35
37
|
end
|
|
36
38
|
|
|
37
39
|
def match_altsvc_options?(uri, options)
|
|
38
|
-
return @options
|
|
40
|
+
return @options.connection_options_match?(options) unless @options.ssl.all? do |k, v|
|
|
39
41
|
v == (k == :hostname ? uri.host : options.ssl[k])
|
|
40
42
|
end
|
|
41
43
|
|
|
42
|
-
@options.
|
|
44
|
+
@options.connection_options_match?(options, ALTSVC_IGNORE_IVARS)
|
|
43
45
|
end
|
|
44
46
|
|
|
45
47
|
def altsvc_match?(uri, other_uri)
|
|
@@ -28,7 +28,8 @@ module HTTPX
|
|
|
28
28
|
@version = [1, 1]
|
|
29
29
|
@pending = []
|
|
30
30
|
@requests = []
|
|
31
|
-
@
|
|
31
|
+
@request = nil
|
|
32
|
+
@handshake_completed = @pipelining = false
|
|
32
33
|
end
|
|
33
34
|
|
|
34
35
|
def timeout
|
|
@@ -49,7 +50,14 @@ module HTTPX
|
|
|
49
50
|
@max_requests = @options.max_requests || MAX_REQUESTS
|
|
50
51
|
@parser.reset!
|
|
51
52
|
@handshake_completed = false
|
|
52
|
-
|
|
53
|
+
reset_requests
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def reset_requests
|
|
57
|
+
requests = @requests
|
|
58
|
+
requests.each { |r| r.transition(:idle) }
|
|
59
|
+
@pending.unshift(*requests)
|
|
60
|
+
@requests.clear
|
|
53
61
|
end
|
|
54
62
|
|
|
55
63
|
def close
|
|
@@ -85,7 +93,7 @@ module HTTPX
|
|
|
85
93
|
return if @requests.include?(request)
|
|
86
94
|
|
|
87
95
|
@requests << request
|
|
88
|
-
@pipelining =
|
|
96
|
+
@pipelining = @max_concurrent_requests > 1 && @requests.size > 1
|
|
89
97
|
end
|
|
90
98
|
|
|
91
99
|
def consume
|
|
@@ -140,15 +148,17 @@ module HTTPX
|
|
|
140
148
|
|
|
141
149
|
return unless request
|
|
142
150
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
151
|
+
begin
|
|
152
|
+
log(color: :green) { "-> DATA: #{chunk.bytesize} bytes..." }
|
|
153
|
+
log(level: 2, color: :green) { "-> #{log_redact_body(chunk.inspect)}" }
|
|
154
|
+
response = request.response
|
|
146
155
|
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
156
|
+
response << chunk
|
|
157
|
+
rescue StandardError => e
|
|
158
|
+
error_response = ErrorResponse.new(request, e)
|
|
159
|
+
request.response = error_response
|
|
160
|
+
dispatch(request)
|
|
161
|
+
end
|
|
152
162
|
end
|
|
153
163
|
|
|
154
164
|
def on_complete
|
|
@@ -157,12 +167,10 @@ module HTTPX
|
|
|
157
167
|
return unless request
|
|
158
168
|
|
|
159
169
|
log(level: 2) { "parsing complete" }
|
|
160
|
-
dispatch
|
|
170
|
+
dispatch(request)
|
|
161
171
|
end
|
|
162
172
|
|
|
163
|
-
def dispatch
|
|
164
|
-
request = @request
|
|
165
|
-
|
|
173
|
+
def dispatch(request)
|
|
166
174
|
if request.expects?
|
|
167
175
|
@parser.reset!
|
|
168
176
|
return handle(request)
|
|
@@ -175,6 +183,7 @@ module HTTPX
|
|
|
175
183
|
|
|
176
184
|
if @parser.upgrade?
|
|
177
185
|
response << @parser.upgrade_data
|
|
186
|
+
@parser.reset!
|
|
178
187
|
throw(:called)
|
|
179
188
|
end
|
|
180
189
|
|
|
@@ -211,12 +220,12 @@ module HTTPX
|
|
|
211
220
|
if @pipelining
|
|
212
221
|
catch(:called) { disable }
|
|
213
222
|
else
|
|
214
|
-
@requests.
|
|
223
|
+
while (req = @requests.shift)
|
|
215
224
|
next if request && request == req
|
|
216
225
|
|
|
217
226
|
emit(:error, req, ex)
|
|
218
227
|
end
|
|
219
|
-
@pending.
|
|
228
|
+
while (req = @pending.shift)
|
|
220
229
|
next if request && request == req
|
|
221
230
|
|
|
222
231
|
emit(:error, req, ex)
|
|
@@ -280,7 +289,6 @@ module HTTPX
|
|
|
280
289
|
end
|
|
281
290
|
|
|
282
291
|
def disable_pipelining
|
|
283
|
-
return if @requests.empty?
|
|
284
292
|
# do not disable pipelining if already set to 1 request at a time
|
|
285
293
|
return if @max_concurrent_requests == 1
|
|
286
294
|
|
|
@@ -3,8 +3,6 @@
|
|
|
3
3
|
require "securerandom"
|
|
4
4
|
require "http/2"
|
|
5
5
|
|
|
6
|
-
HTTP2::Connection.__send__(:public, :send_buffer) if HTTP2::VERSION < "1.1.1"
|
|
7
|
-
|
|
8
6
|
module HTTPX
|
|
9
7
|
class Connection::HTTP2
|
|
10
8
|
include Callbacks
|
|
@@ -59,29 +57,37 @@ module HTTPX
|
|
|
59
57
|
|
|
60
58
|
return if @buffer.empty?
|
|
61
59
|
|
|
60
|
+
# HTTP/2 GOAWAY frame buffered.
|
|
62
61
|
return :w
|
|
63
62
|
end
|
|
64
63
|
|
|
65
64
|
unless @connection.state == :connected && @handshake_completed
|
|
65
|
+
# HTTP/2 in intermediate state or still completing initialization-
|
|
66
66
|
return @buffer.empty? ? :r : :rw
|
|
67
67
|
end
|
|
68
68
|
|
|
69
69
|
unless @connection.send_buffer.empty?
|
|
70
|
+
# HTTP/2 connection is buffering data chunks and failing to emit DATA frames,
|
|
71
|
+
# most likely because the flow control window is exhausted.
|
|
70
72
|
return :rw unless @buffer.empty?
|
|
71
73
|
|
|
72
74
|
# waiting for WINDOW_UPDATE frames
|
|
73
75
|
return :r
|
|
74
76
|
end
|
|
75
77
|
|
|
78
|
+
# there are pending bufferable requests
|
|
76
79
|
return :w if !@pending.empty? && can_buffer_more_requests?
|
|
77
80
|
|
|
81
|
+
# there are pending frames from the last run
|
|
78
82
|
return :w unless @drains.empty?
|
|
79
83
|
|
|
80
84
|
if @buffer.empty?
|
|
85
|
+
# skip if no more requests or pings to process
|
|
81
86
|
return if @streams.empty? && @pings.empty?
|
|
82
87
|
|
|
83
88
|
:r
|
|
84
89
|
else
|
|
90
|
+
# buffered frames
|
|
85
91
|
:w
|
|
86
92
|
end
|
|
87
93
|
end
|
|
@@ -140,7 +146,7 @@ module HTTPX
|
|
|
140
146
|
settings_ex.set_backtrace(ex.backtrace)
|
|
141
147
|
ex = settings_ex
|
|
142
148
|
end
|
|
143
|
-
@streams.
|
|
149
|
+
while (req, _ = @streams.shift)
|
|
144
150
|
next if request && request == req
|
|
145
151
|
|
|
146
152
|
emit(:error, req, ex)
|
|
@@ -163,6 +169,8 @@ module HTTPX
|
|
|
163
169
|
@pings.any?
|
|
164
170
|
end
|
|
165
171
|
|
|
172
|
+
def reset_requests; end
|
|
173
|
+
|
|
166
174
|
private
|
|
167
175
|
|
|
168
176
|
def can_buffer_more_requests?
|
|
@@ -215,9 +223,7 @@ module HTTPX
|
|
|
215
223
|
def handle_stream(stream, request)
|
|
216
224
|
request.on(:refuse, &method(:on_stream_refuse).curry(3)[stream, request])
|
|
217
225
|
stream.on(:close, &method(:on_stream_close).curry(3)[stream, request])
|
|
218
|
-
stream.on(:half_close)
|
|
219
|
-
log(level: 2) { "#{stream.id}: waiting for response..." }
|
|
220
|
-
end
|
|
226
|
+
stream.on(:half_close) { on_stream_half_close(stream, request) }
|
|
221
227
|
stream.on(:altsvc, &method(:on_altsvc).curry(2)[request.origin])
|
|
222
228
|
stream.on(:headers, &method(:on_stream_headers).curry(3)[stream, request])
|
|
223
229
|
stream.on(:data, &method(:on_stream_data).curry(3)[stream, request])
|
|
@@ -302,7 +308,7 @@ module HTTPX
|
|
|
302
308
|
end
|
|
303
309
|
|
|
304
310
|
log(color: :yellow) do
|
|
305
|
-
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{log_redact_headers(v)}" }.join("\n")
|
|
311
|
+
h.map { |k, v| "#{stream.id}: <- HEADER: #{k}: #{k == ":status" ? v : log_redact_headers(v)}" }.join("\n")
|
|
306
312
|
end
|
|
307
313
|
_, status = h.shift
|
|
308
314
|
headers = request.options.headers_class.new(h)
|
|
@@ -331,6 +337,16 @@ module HTTPX
|
|
|
331
337
|
stream.close
|
|
332
338
|
end
|
|
333
339
|
|
|
340
|
+
def on_stream_half_close(stream, _request)
|
|
341
|
+
unless stream.send_buffer.empty?
|
|
342
|
+
stream.send_buffer.clear
|
|
343
|
+
stream.data("", end_stream: true)
|
|
344
|
+
end
|
|
345
|
+
|
|
346
|
+
# TODO: omit log line if response already here
|
|
347
|
+
log(level: 2) { "#{stream.id}: waiting for response..." }
|
|
348
|
+
end
|
|
349
|
+
|
|
334
350
|
def on_stream_close(stream, request, error)
|
|
335
351
|
return if error == :stream_closed && !@streams.key?(request)
|
|
336
352
|
|
|
@@ -391,10 +407,9 @@ module HTTPX
|
|
|
391
407
|
ex = GoawayError.new(error)
|
|
392
408
|
ex.set_backtrace(caller)
|
|
393
409
|
|
|
394
|
-
|
|
410
|
+
handle_error(ex)
|
|
395
411
|
teardown
|
|
396
412
|
|
|
397
|
-
handle_error(ex)
|
|
398
413
|
end
|
|
399
414
|
end
|
|
400
415
|
return unless is_connection_closed && @streams.empty?
|
|
@@ -404,34 +419,39 @@ module HTTPX
|
|
|
404
419
|
|
|
405
420
|
def on_frame_sent(frame)
|
|
406
421
|
log(level: 2) { "#{frame[:stream]}: frame was sent!" }
|
|
407
|
-
log(level: 2, color: :blue)
|
|
408
|
-
payload =
|
|
409
|
-
case frame[:type]
|
|
410
|
-
when :data
|
|
411
|
-
frame.merge(payload: frame[:payload].bytesize)
|
|
412
|
-
when :headers, :ping
|
|
413
|
-
frame.merge(payload: log_redact_headers(frame[:payload]))
|
|
414
|
-
else
|
|
415
|
-
frame
|
|
416
|
-
end
|
|
417
|
-
"#{frame[:stream]}: #{payload}"
|
|
418
|
-
end
|
|
422
|
+
log(level: 2, color: :blue) { "#{frame[:stream]}: #{frame_with_extra_info(frame)}" }
|
|
419
423
|
end
|
|
420
424
|
|
|
421
425
|
def on_frame_received(frame)
|
|
422
426
|
log(level: 2) { "#{frame[:stream]}: frame was received!" }
|
|
423
|
-
log(level: 2, color: :magenta)
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
427
|
+
log(level: 2, color: :magenta) { "#{frame[:stream]}: #{frame_with_extra_info(frame)}" }
|
|
428
|
+
end
|
|
429
|
+
|
|
430
|
+
def frame_with_extra_info(frame)
|
|
431
|
+
case frame[:type]
|
|
432
|
+
when :data
|
|
433
|
+
frame.merge(payload: frame[:payload].bytesize)
|
|
434
|
+
when :headers, :ping
|
|
435
|
+
frame.merge(payload: log_redact_headers(frame[:payload]))
|
|
436
|
+
when :window_update
|
|
437
|
+
connection_or_stream = if (id = frame[:stream]).zero?
|
|
438
|
+
@connection
|
|
439
|
+
else
|
|
440
|
+
@streams.each_value.find { |s| s.id == id }
|
|
441
|
+
end
|
|
442
|
+
if connection_or_stream
|
|
443
|
+
frame.merge(
|
|
444
|
+
local_window: connection_or_stream.local_window,
|
|
445
|
+
remote_window: connection_or_stream.remote_window,
|
|
446
|
+
buffered_amount: connection_or_stream.buffered_amount,
|
|
447
|
+
stream_state: connection_or_stream.state,
|
|
448
|
+
)
|
|
449
|
+
else
|
|
450
|
+
frame
|
|
451
|
+
end
|
|
452
|
+
else
|
|
453
|
+
frame
|
|
454
|
+
end.merge(connection_state: @connection.state)
|
|
435
455
|
end
|
|
436
456
|
|
|
437
457
|
def on_altsvc(origin, frame)
|