wreq-rb 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Cargo.lock +2688 -0
- data/Cargo.toml +6 -0
- data/README.md +179 -0
- data/ext/wreq_rb/Cargo.toml +39 -0
- data/ext/wreq_rb/extconf.rb +22 -0
- data/ext/wreq_rb/src/client.rs +565 -0
- data/ext/wreq_rb/src/error.rs +25 -0
- data/ext/wreq_rb/src/lib.rs +20 -0
- data/ext/wreq_rb/src/response.rs +132 -0
- data/lib/wreq-rb/version.rb +5 -0
- data/lib/wreq-rb.rb +17 -0
- data/patches/0001-add-transfer-size-tracking.patch +292 -0
- data/vendor/wreq/Cargo.toml +306 -0
- data/vendor/wreq/LICENSE +202 -0
- data/vendor/wreq/README.md +122 -0
- data/vendor/wreq/examples/cert_store.rs +77 -0
- data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
- data/vendor/wreq/examples/emulation.rs +118 -0
- data/vendor/wreq/examples/form.rs +14 -0
- data/vendor/wreq/examples/http1_websocket.rs +37 -0
- data/vendor/wreq/examples/http2_websocket.rs +45 -0
- data/vendor/wreq/examples/json_dynamic.rs +41 -0
- data/vendor/wreq/examples/json_typed.rs +47 -0
- data/vendor/wreq/examples/keylog.rs +16 -0
- data/vendor/wreq/examples/request_with_emulation.rs +115 -0
- data/vendor/wreq/examples/request_with_interface.rs +37 -0
- data/vendor/wreq/examples/request_with_local_address.rs +16 -0
- data/vendor/wreq/examples/request_with_proxy.rs +13 -0
- data/vendor/wreq/examples/request_with_redirect.rs +22 -0
- data/vendor/wreq/examples/request_with_version.rs +15 -0
- data/vendor/wreq/examples/tor_socks.rs +24 -0
- data/vendor/wreq/examples/unix_socket.rs +33 -0
- data/vendor/wreq/src/client/body.rs +304 -0
- data/vendor/wreq/src/client/conn/conn.rs +231 -0
- data/vendor/wreq/src/client/conn/connector.rs +549 -0
- data/vendor/wreq/src/client/conn/http.rs +1023 -0
- data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
- data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
- data/vendor/wreq/src/client/conn/proxy.rs +39 -0
- data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
- data/vendor/wreq/src/client/conn/uds.rs +44 -0
- data/vendor/wreq/src/client/conn/verbose.rs +149 -0
- data/vendor/wreq/src/client/conn.rs +323 -0
- data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
- data/vendor/wreq/src/client/core/body/length.rs +118 -0
- data/vendor/wreq/src/client/core/body.rs +34 -0
- data/vendor/wreq/src/client/core/common/buf.rs +149 -0
- data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
- data/vendor/wreq/src/client/core/common/watch.rs +76 -0
- data/vendor/wreq/src/client/core/common.rs +3 -0
- data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
- data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
- data/vendor/wreq/src/client/core/conn.rs +11 -0
- data/vendor/wreq/src/client/core/dispatch.rs +299 -0
- data/vendor/wreq/src/client/core/error.rs +435 -0
- data/vendor/wreq/src/client/core/ext.rs +201 -0
- data/vendor/wreq/src/client/core/http1.rs +178 -0
- data/vendor/wreq/src/client/core/http2.rs +483 -0
- data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
- data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
- data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
- data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
- data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
- data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
- data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
- data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
- data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
- data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
- data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
- data/vendor/wreq/src/client/core/proto.rs +58 -0
- data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
- data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
- data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
- data/vendor/wreq/src/client/core/rt.rs +25 -0
- data/vendor/wreq/src/client/core/upgrade.rs +267 -0
- data/vendor/wreq/src/client/core.rs +16 -0
- data/vendor/wreq/src/client/emulation.rs +161 -0
- data/vendor/wreq/src/client/http/client/error.rs +142 -0
- data/vendor/wreq/src/client/http/client/exec.rs +29 -0
- data/vendor/wreq/src/client/http/client/extra.rs +77 -0
- data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
- data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
- data/vendor/wreq/src/client/http/client/util.rs +104 -0
- data/vendor/wreq/src/client/http/client.rs +1003 -0
- data/vendor/wreq/src/client/http/future.rs +99 -0
- data/vendor/wreq/src/client/http.rs +1629 -0
- data/vendor/wreq/src/client/layer/config/options.rs +156 -0
- data/vendor/wreq/src/client/layer/config.rs +116 -0
- data/vendor/wreq/src/client/layer/cookie.rs +161 -0
- data/vendor/wreq/src/client/layer/decoder.rs +139 -0
- data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
- data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
- data/vendor/wreq/src/client/layer/redirect.rs +145 -0
- data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
- data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
- data/vendor/wreq/src/client/layer/retry.rs +151 -0
- data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
- data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
- data/vendor/wreq/src/client/layer/timeout.rs +177 -0
- data/vendor/wreq/src/client/layer.rs +15 -0
- data/vendor/wreq/src/client/multipart.rs +717 -0
- data/vendor/wreq/src/client/request.rs +818 -0
- data/vendor/wreq/src/client/response.rs +534 -0
- data/vendor/wreq/src/client/ws/json.rs +99 -0
- data/vendor/wreq/src/client/ws/message.rs +453 -0
- data/vendor/wreq/src/client/ws.rs +714 -0
- data/vendor/wreq/src/client.rs +27 -0
- data/vendor/wreq/src/config.rs +140 -0
- data/vendor/wreq/src/cookie.rs +579 -0
- data/vendor/wreq/src/dns/gai.rs +249 -0
- data/vendor/wreq/src/dns/hickory.rs +78 -0
- data/vendor/wreq/src/dns/resolve.rs +180 -0
- data/vendor/wreq/src/dns.rs +69 -0
- data/vendor/wreq/src/error.rs +502 -0
- data/vendor/wreq/src/ext.rs +398 -0
- data/vendor/wreq/src/hash.rs +143 -0
- data/vendor/wreq/src/header.rs +506 -0
- data/vendor/wreq/src/into_uri.rs +187 -0
- data/vendor/wreq/src/lib.rs +586 -0
- data/vendor/wreq/src/proxy/mac.rs +82 -0
- data/vendor/wreq/src/proxy/matcher.rs +806 -0
- data/vendor/wreq/src/proxy/uds.rs +66 -0
- data/vendor/wreq/src/proxy/win.rs +31 -0
- data/vendor/wreq/src/proxy.rs +569 -0
- data/vendor/wreq/src/redirect.rs +575 -0
- data/vendor/wreq/src/retry.rs +198 -0
- data/vendor/wreq/src/sync.rs +129 -0
- data/vendor/wreq/src/tls/conn/cache.rs +123 -0
- data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
- data/vendor/wreq/src/tls/conn/ext.rs +82 -0
- data/vendor/wreq/src/tls/conn/macros.rs +34 -0
- data/vendor/wreq/src/tls/conn/service.rs +138 -0
- data/vendor/wreq/src/tls/conn.rs +681 -0
- data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
- data/vendor/wreq/src/tls/keylog.rs +99 -0
- data/vendor/wreq/src/tls/options.rs +464 -0
- data/vendor/wreq/src/tls/x509/identity.rs +122 -0
- data/vendor/wreq/src/tls/x509/parser.rs +71 -0
- data/vendor/wreq/src/tls/x509/store.rs +228 -0
- data/vendor/wreq/src/tls/x509.rs +68 -0
- data/vendor/wreq/src/tls.rs +154 -0
- data/vendor/wreq/src/trace.rs +55 -0
- data/vendor/wreq/src/util.rs +122 -0
- data/vendor/wreq/tests/badssl.rs +228 -0
- data/vendor/wreq/tests/brotli.rs +350 -0
- data/vendor/wreq/tests/client.rs +1098 -0
- data/vendor/wreq/tests/connector_layers.rs +227 -0
- data/vendor/wreq/tests/cookie.rs +306 -0
- data/vendor/wreq/tests/deflate.rs +347 -0
- data/vendor/wreq/tests/emulation.rs +260 -0
- data/vendor/wreq/tests/gzip.rs +347 -0
- data/vendor/wreq/tests/layers.rs +261 -0
- data/vendor/wreq/tests/multipart.rs +165 -0
- data/vendor/wreq/tests/proxy.rs +438 -0
- data/vendor/wreq/tests/redirect.rs +629 -0
- data/vendor/wreq/tests/retry.rs +135 -0
- data/vendor/wreq/tests/support/delay_server.rs +117 -0
- data/vendor/wreq/tests/support/error.rs +16 -0
- data/vendor/wreq/tests/support/layer.rs +183 -0
- data/vendor/wreq/tests/support/mod.rs +9 -0
- data/vendor/wreq/tests/support/server.rs +232 -0
- data/vendor/wreq/tests/timeouts.rs +281 -0
- data/vendor/wreq/tests/unix_socket.rs +135 -0
- data/vendor/wreq/tests/upgrade.rs +98 -0
- data/vendor/wreq/tests/zstd.rs +559 -0
- metadata +225 -0
|
@@ -0,0 +1,650 @@
|
|
|
1
|
+
use std::{
|
|
2
|
+
convert::Infallible,
|
|
3
|
+
future::Future,
|
|
4
|
+
marker::PhantomData,
|
|
5
|
+
pin::Pin,
|
|
6
|
+
task::{Context, Poll, ready},
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
use bytes::Bytes;
|
|
10
|
+
use futures_channel::{
|
|
11
|
+
mpsc,
|
|
12
|
+
mpsc::{Receiver, Sender},
|
|
13
|
+
oneshot,
|
|
14
|
+
};
|
|
15
|
+
use futures_util::{
|
|
16
|
+
future::{Either, FusedFuture},
|
|
17
|
+
stream::{FusedStream, Stream},
|
|
18
|
+
};
|
|
19
|
+
use http::{Method, Request, Response, StatusCode};
|
|
20
|
+
use http_body::Body;
|
|
21
|
+
use http2::{
|
|
22
|
+
SendStream,
|
|
23
|
+
client::{Builder, Connection, ResponseFuture, SendRequest},
|
|
24
|
+
};
|
|
25
|
+
use pin_project_lite::pin_project;
|
|
26
|
+
use tokio::io::{AsyncRead, AsyncWrite};
|
|
27
|
+
|
|
28
|
+
use super::{
|
|
29
|
+
H2Upgraded, PipeToSendStream, SendBuf, ping,
|
|
30
|
+
ping::{Ponger, Recorder},
|
|
31
|
+
};
|
|
32
|
+
use crate::{
|
|
33
|
+
client::core::{
|
|
34
|
+
self, Error,
|
|
35
|
+
body::{self, Incoming as IncomingBody},
|
|
36
|
+
dispatch::{self, Callback, SendWhen, TrySendError},
|
|
37
|
+
error::BoxError,
|
|
38
|
+
proto::{Dispatched, headers},
|
|
39
|
+
rt::{Time, bounds::Http2ClientConnExec},
|
|
40
|
+
upgrade::{self, Upgraded},
|
|
41
|
+
},
|
|
42
|
+
config::RequestConfig,
|
|
43
|
+
header::OrigHeaderMap,
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
type ClientRx<B> = dispatch::Receiver<Request<B>, Response<IncomingBody>>;
|
|
47
|
+
|
|
48
|
+
///// An mpsc channel is used to help notify the `Connection` task when *all*
|
|
49
|
+
///// other handles to it have been dropped, so that it can shutdown.
|
|
50
|
+
type ConnDropRef = mpsc::Sender<Infallible>;
|
|
51
|
+
|
|
52
|
+
///// A oneshot channel watches the `Connection` task, and when it completes,
|
|
53
|
+
///// the "dispatch" task will be notified and can shutdown sooner.
|
|
54
|
+
type ConnEof = oneshot::Receiver<Infallible>;
|
|
55
|
+
|
|
56
|
+
pub(crate) async fn handshake<T, B, E>(
|
|
57
|
+
io: T,
|
|
58
|
+
req_rx: ClientRx<B>,
|
|
59
|
+
builder: Builder,
|
|
60
|
+
ping_config: ping::Config,
|
|
61
|
+
mut exec: E,
|
|
62
|
+
timer: Time,
|
|
63
|
+
) -> core::Result<ClientTask<B, E, T>>
|
|
64
|
+
where
|
|
65
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
66
|
+
B: Body + 'static,
|
|
67
|
+
B::Data: Send + 'static,
|
|
68
|
+
E: Http2ClientConnExec<B, T> + Unpin,
|
|
69
|
+
B::Error: Into<BoxError>,
|
|
70
|
+
{
|
|
71
|
+
let (h2_tx, mut conn) = builder
|
|
72
|
+
.handshake::<_, SendBuf<B::Data>>(io)
|
|
73
|
+
.await
|
|
74
|
+
.map_err(Error::new_h2)?;
|
|
75
|
+
|
|
76
|
+
// An mpsc channel is used entirely to detect when the
|
|
77
|
+
// 'Client' has been dropped. This is to get around a bug
|
|
78
|
+
// in h2 where dropping all SendRequests won't notify a
|
|
79
|
+
// parked Connection.
|
|
80
|
+
let (conn_drop_ref, conn_drop_rx) = mpsc::channel(1);
|
|
81
|
+
let (cancel_tx, conn_eof) = oneshot::channel();
|
|
82
|
+
|
|
83
|
+
let (conn, ping) = if ping_config.is_enabled() {
|
|
84
|
+
let pp = conn.ping_pong().expect("conn.ping_pong");
|
|
85
|
+
let (recorder, ponger) = ping::channel(pp, ping_config, timer);
|
|
86
|
+
|
|
87
|
+
let conn: Conn<_, B> = Conn::new(ponger, conn);
|
|
88
|
+
(Either::Left(conn), recorder)
|
|
89
|
+
} else {
|
|
90
|
+
(Either::Right(conn), ping::disabled())
|
|
91
|
+
};
|
|
92
|
+
let conn: ConnMapErr<T, B> = ConnMapErr {
|
|
93
|
+
conn,
|
|
94
|
+
is_terminated: false,
|
|
95
|
+
};
|
|
96
|
+
|
|
97
|
+
exec.execute_h2_future(H2ClientFuture::Task {
|
|
98
|
+
task: ConnTask::new(conn, conn_drop_rx, cancel_tx),
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
Ok(ClientTask {
|
|
102
|
+
ping,
|
|
103
|
+
conn_drop_ref,
|
|
104
|
+
conn_eof,
|
|
105
|
+
executor: exec,
|
|
106
|
+
h2_tx,
|
|
107
|
+
req_rx,
|
|
108
|
+
fut_ctx: None,
|
|
109
|
+
marker: PhantomData,
|
|
110
|
+
})
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
pin_project! {
|
|
114
|
+
struct Conn<T, B>
|
|
115
|
+
where
|
|
116
|
+
B: Body,
|
|
117
|
+
{
|
|
118
|
+
#[pin]
|
|
119
|
+
ponger: Ponger,
|
|
120
|
+
#[pin]
|
|
121
|
+
conn: Connection<T, SendBuf<<B as Body>::Data>>,
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
impl<T, B> Conn<T, B>
|
|
126
|
+
where
|
|
127
|
+
B: Body,
|
|
128
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
129
|
+
{
|
|
130
|
+
fn new(ponger: Ponger, conn: Connection<T, SendBuf<<B as Body>::Data>>) -> Self {
|
|
131
|
+
Conn { ponger, conn }
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
impl<T, B> Future for Conn<T, B>
|
|
136
|
+
where
|
|
137
|
+
B: Body,
|
|
138
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
139
|
+
{
|
|
140
|
+
type Output = Result<(), http2::Error>;
|
|
141
|
+
|
|
142
|
+
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
143
|
+
let mut this = self.project();
|
|
144
|
+
match this.ponger.poll(cx) {
|
|
145
|
+
Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {
|
|
146
|
+
this.conn.set_target_window_size(wnd);
|
|
147
|
+
this.conn.set_initial_window_size(wnd)?;
|
|
148
|
+
}
|
|
149
|
+
Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
|
|
150
|
+
debug!("connection keep-alive timed out");
|
|
151
|
+
return Poll::Ready(Ok(()));
|
|
152
|
+
}
|
|
153
|
+
Poll::Pending => {}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
Pin::new(&mut this.conn).poll(cx)
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
pin_project! {
|
|
161
|
+
struct ConnMapErr<T, B>
|
|
162
|
+
where
|
|
163
|
+
B: Body,
|
|
164
|
+
T: AsyncRead,
|
|
165
|
+
T: AsyncWrite,
|
|
166
|
+
T: Unpin,
|
|
167
|
+
{
|
|
168
|
+
#[pin]
|
|
169
|
+
conn: Either<Conn<T, B>, Connection<T, SendBuf<<B as Body>::Data>>>,
|
|
170
|
+
#[pin]
|
|
171
|
+
is_terminated: bool,
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
impl<T, B> Future for ConnMapErr<T, B>
|
|
176
|
+
where
|
|
177
|
+
B: Body,
|
|
178
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
179
|
+
{
|
|
180
|
+
type Output = Result<(), ()>;
|
|
181
|
+
|
|
182
|
+
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
183
|
+
let mut this = self.project();
|
|
184
|
+
|
|
185
|
+
if *this.is_terminated {
|
|
186
|
+
return Poll::Pending;
|
|
187
|
+
}
|
|
188
|
+
let polled = this.conn.poll(cx);
|
|
189
|
+
if polled.is_ready() {
|
|
190
|
+
*this.is_terminated = true;
|
|
191
|
+
}
|
|
192
|
+
polled.map_err(|_e| {
|
|
193
|
+
debug!(error = %_e, "connection error");
|
|
194
|
+
})
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
impl<T, B> FusedFuture for ConnMapErr<T, B>
|
|
199
|
+
where
|
|
200
|
+
B: Body,
|
|
201
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
202
|
+
{
|
|
203
|
+
fn is_terminated(&self) -> bool {
|
|
204
|
+
self.is_terminated
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
pin_project! {
|
|
209
|
+
pub struct ConnTask<T, B>
|
|
210
|
+
where
|
|
211
|
+
B: Body,
|
|
212
|
+
T: AsyncRead,
|
|
213
|
+
T: AsyncWrite,
|
|
214
|
+
T: Unpin,
|
|
215
|
+
{
|
|
216
|
+
#[pin]
|
|
217
|
+
drop_rx: Receiver<Infallible>,
|
|
218
|
+
#[pin]
|
|
219
|
+
cancel_tx: Option<oneshot::Sender<Infallible>>,
|
|
220
|
+
#[pin]
|
|
221
|
+
conn: ConnMapErr<T, B>,
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
impl<T, B> ConnTask<T, B>
|
|
226
|
+
where
|
|
227
|
+
B: Body,
|
|
228
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
229
|
+
{
|
|
230
|
+
fn new(
|
|
231
|
+
conn: ConnMapErr<T, B>,
|
|
232
|
+
drop_rx: Receiver<Infallible>,
|
|
233
|
+
cancel_tx: oneshot::Sender<Infallible>,
|
|
234
|
+
) -> Self {
|
|
235
|
+
Self {
|
|
236
|
+
drop_rx,
|
|
237
|
+
cancel_tx: Some(cancel_tx),
|
|
238
|
+
conn,
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
impl<T, B> Future for ConnTask<T, B>
|
|
244
|
+
where
|
|
245
|
+
B: Body,
|
|
246
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
247
|
+
{
|
|
248
|
+
type Output = ();
|
|
249
|
+
|
|
250
|
+
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
251
|
+
let mut this = self.project();
|
|
252
|
+
|
|
253
|
+
if !this.conn.is_terminated() && Pin::new(&mut this.conn).poll(cx).is_ready() {
|
|
254
|
+
// ok or err, the `conn` has finished.
|
|
255
|
+
return Poll::Ready(());
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
if !this.drop_rx.is_terminated() && Pin::new(&mut this.drop_rx).poll_next(cx).is_ready() {
|
|
259
|
+
// mpsc has been dropped, hopefully polling
|
|
260
|
+
// the connection some more should start shutdown
|
|
261
|
+
// and then close.
|
|
262
|
+
trace!("send_request dropped, starting conn shutdown");
|
|
263
|
+
drop(this.cancel_tx.take().expect("ConnTask Future polled twice"));
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
Poll::Pending
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
pin_project! {
|
|
271
|
+
#[project = H2ClientFutureProject]
|
|
272
|
+
pub enum H2ClientFuture<B, T>
|
|
273
|
+
where
|
|
274
|
+
B: http_body::Body,
|
|
275
|
+
B: 'static,
|
|
276
|
+
B::Error: Into<BoxError>,
|
|
277
|
+
T: AsyncRead,
|
|
278
|
+
T: AsyncWrite,
|
|
279
|
+
T: Unpin,
|
|
280
|
+
{
|
|
281
|
+
Pipe {
|
|
282
|
+
#[pin]
|
|
283
|
+
pipe: PipeMap<B>,
|
|
284
|
+
},
|
|
285
|
+
Send {
|
|
286
|
+
#[pin]
|
|
287
|
+
send_when: SendWhen<B>,
|
|
288
|
+
},
|
|
289
|
+
Task {
|
|
290
|
+
#[pin]
|
|
291
|
+
task: ConnTask<T, B>,
|
|
292
|
+
},
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
impl<B, T> Future for H2ClientFuture<B, T>
|
|
297
|
+
where
|
|
298
|
+
B: Body + 'static,
|
|
299
|
+
B::Data: Send,
|
|
300
|
+
B::Error: Into<BoxError>,
|
|
301
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
302
|
+
{
|
|
303
|
+
type Output = ();
|
|
304
|
+
|
|
305
|
+
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
|
|
306
|
+
let this = self.project();
|
|
307
|
+
|
|
308
|
+
match this {
|
|
309
|
+
H2ClientFutureProject::Pipe { pipe } => pipe.poll(cx),
|
|
310
|
+
H2ClientFutureProject::Send { send_when } => send_when.poll(cx),
|
|
311
|
+
H2ClientFutureProject::Task { task } => task.poll(cx),
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
struct FutCtx<B>
|
|
317
|
+
where
|
|
318
|
+
B: Body,
|
|
319
|
+
{
|
|
320
|
+
is_connect: bool,
|
|
321
|
+
eos: bool,
|
|
322
|
+
fut: ResponseFuture,
|
|
323
|
+
body_tx: SendStream<SendBuf<B::Data>>,
|
|
324
|
+
body: B,
|
|
325
|
+
cb: Callback<Request<B>, Response<IncomingBody>>,
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
impl<B: Body> Unpin for FutCtx<B> {}
|
|
329
|
+
|
|
330
|
+
pub(crate) struct ClientTask<B, E, T>
|
|
331
|
+
where
|
|
332
|
+
B: Body,
|
|
333
|
+
E: Unpin,
|
|
334
|
+
{
|
|
335
|
+
ping: ping::Recorder,
|
|
336
|
+
conn_drop_ref: ConnDropRef,
|
|
337
|
+
conn_eof: ConnEof,
|
|
338
|
+
executor: E,
|
|
339
|
+
h2_tx: SendRequest<SendBuf<B::Data>>,
|
|
340
|
+
req_rx: ClientRx<B>,
|
|
341
|
+
fut_ctx: Option<FutCtx<B>>,
|
|
342
|
+
marker: PhantomData<T>,
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
pin_project! {
|
|
346
|
+
pub struct PipeMap<S>
|
|
347
|
+
where
|
|
348
|
+
S: Body,
|
|
349
|
+
{
|
|
350
|
+
#[pin]
|
|
351
|
+
pipe: PipeToSendStream<S>,
|
|
352
|
+
#[pin]
|
|
353
|
+
conn_drop_ref: Option<Sender<Infallible>>,
|
|
354
|
+
#[pin]
|
|
355
|
+
ping: Option<Recorder>,
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
impl<B> Future for PipeMap<B>
|
|
360
|
+
where
|
|
361
|
+
B: http_body::Body,
|
|
362
|
+
B::Error: Into<BoxError>,
|
|
363
|
+
{
|
|
364
|
+
type Output = ();
|
|
365
|
+
|
|
366
|
+
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
|
|
367
|
+
let mut this = self.project();
|
|
368
|
+
|
|
369
|
+
match Pin::new(&mut this.pipe).poll(cx) {
|
|
370
|
+
Poll::Ready(result) => {
|
|
371
|
+
if let Err(_e) = result {
|
|
372
|
+
debug!("client request body error: {}", _e);
|
|
373
|
+
}
|
|
374
|
+
drop(this.conn_drop_ref.take().expect("Future polled twice"));
|
|
375
|
+
drop(this.ping.take().expect("Future polled twice"));
|
|
376
|
+
return Poll::Ready(());
|
|
377
|
+
}
|
|
378
|
+
Poll::Pending => (),
|
|
379
|
+
};
|
|
380
|
+
Poll::Pending
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
impl<B, E, T> ClientTask<B, E, T>
|
|
385
|
+
where
|
|
386
|
+
B: Body + 'static + Unpin,
|
|
387
|
+
B::Data: Send,
|
|
388
|
+
E: Http2ClientConnExec<B, T> + Unpin,
|
|
389
|
+
B::Error: Into<BoxError>,
|
|
390
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
391
|
+
{
|
|
392
|
+
fn poll_pipe(&mut self, f: FutCtx<B>, cx: &mut Context<'_>) {
|
|
393
|
+
let ping = self.ping.clone();
|
|
394
|
+
|
|
395
|
+
let send_stream = if !f.is_connect {
|
|
396
|
+
if !f.eos {
|
|
397
|
+
let mut pipe = PipeToSendStream::new(f.body, f.body_tx);
|
|
398
|
+
|
|
399
|
+
// eagerly see if the body pipe is ready and
|
|
400
|
+
// can thus skip allocating in the executor
|
|
401
|
+
match Pin::new(&mut pipe).poll(cx) {
|
|
402
|
+
Poll::Ready(_) => (),
|
|
403
|
+
Poll::Pending => {
|
|
404
|
+
let conn_drop_ref = self.conn_drop_ref.clone();
|
|
405
|
+
// keep the ping recorder's knowledge of an
|
|
406
|
+
// "open stream" alive while this body is
|
|
407
|
+
// still sending...
|
|
408
|
+
let ping = ping.clone();
|
|
409
|
+
|
|
410
|
+
let pipe = PipeMap {
|
|
411
|
+
pipe,
|
|
412
|
+
conn_drop_ref: Some(conn_drop_ref),
|
|
413
|
+
ping: Some(ping),
|
|
414
|
+
};
|
|
415
|
+
// Clear send task
|
|
416
|
+
self.executor
|
|
417
|
+
.execute_h2_future(H2ClientFuture::Pipe { pipe });
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
None
|
|
423
|
+
} else {
|
|
424
|
+
Some(f.body_tx)
|
|
425
|
+
};
|
|
426
|
+
|
|
427
|
+
self.executor.execute_h2_future(H2ClientFuture::Send {
|
|
428
|
+
send_when: SendWhen {
|
|
429
|
+
when: ResponseFutMap {
|
|
430
|
+
fut: f.fut,
|
|
431
|
+
ping: Some(ping),
|
|
432
|
+
send_stream: Some(send_stream),
|
|
433
|
+
},
|
|
434
|
+
call_back: Some(f.cb),
|
|
435
|
+
},
|
|
436
|
+
});
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
pin_project! {
|
|
441
|
+
pub(crate) struct ResponseFutMap<B>
|
|
442
|
+
where
|
|
443
|
+
B: Body,
|
|
444
|
+
B: 'static,
|
|
445
|
+
{
|
|
446
|
+
#[pin]
|
|
447
|
+
fut: ResponseFuture,
|
|
448
|
+
#[pin]
|
|
449
|
+
ping: Option<Recorder>,
|
|
450
|
+
#[pin]
|
|
451
|
+
send_stream: Option<Option<SendStream<SendBuf<<B as Body>::Data>>>>,
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
impl<B> Future for ResponseFutMap<B>
|
|
456
|
+
where
|
|
457
|
+
B: Body + 'static,
|
|
458
|
+
B::Data: Send,
|
|
459
|
+
{
|
|
460
|
+
type Output = Result<Response<body::Incoming>, (Error, Option<Request<B>>)>;
|
|
461
|
+
|
|
462
|
+
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
463
|
+
let mut this = self.project();
|
|
464
|
+
|
|
465
|
+
let result = ready!(this.fut.poll(cx));
|
|
466
|
+
|
|
467
|
+
let ping = this.ping.take().expect("Future polled twice");
|
|
468
|
+
let send_stream = this.send_stream.take().expect("Future polled twice");
|
|
469
|
+
|
|
470
|
+
match result {
|
|
471
|
+
Ok(res) => {
|
|
472
|
+
// record that we got the response headers
|
|
473
|
+
ping.record_non_data();
|
|
474
|
+
|
|
475
|
+
let content_length = headers::content_length_parse_all(res.headers());
|
|
476
|
+
if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) {
|
|
477
|
+
if content_length.is_some_and(|len| len != 0) {
|
|
478
|
+
warn!("h2 connect response with non-zero body not supported");
|
|
479
|
+
|
|
480
|
+
send_stream.send_reset(http2::Reason::INTERNAL_ERROR);
|
|
481
|
+
return Poll::Ready(Err((
|
|
482
|
+
Error::new_h2(http2::Reason::INTERNAL_ERROR.into()),
|
|
483
|
+
None::<Request<B>>,
|
|
484
|
+
)));
|
|
485
|
+
}
|
|
486
|
+
let (parts, recv_stream) = res.into_parts();
|
|
487
|
+
let mut res = Response::from_parts(parts, IncomingBody::empty());
|
|
488
|
+
|
|
489
|
+
let (pending, on_upgrade) = upgrade::pending();
|
|
490
|
+
let io = H2Upgraded {
|
|
491
|
+
ping,
|
|
492
|
+
send_stream,
|
|
493
|
+
recv_stream,
|
|
494
|
+
buf: Bytes::new(),
|
|
495
|
+
};
|
|
496
|
+
let upgraded = Upgraded::new(io, Bytes::new());
|
|
497
|
+
|
|
498
|
+
pending.fulfill(upgraded);
|
|
499
|
+
res.extensions_mut().insert(on_upgrade);
|
|
500
|
+
|
|
501
|
+
Poll::Ready(Ok(res))
|
|
502
|
+
} else {
|
|
503
|
+
let res = res.map(|stream| {
|
|
504
|
+
let ping = ping.for_stream(&stream);
|
|
505
|
+
IncomingBody::h2(stream, content_length.into(), ping)
|
|
506
|
+
});
|
|
507
|
+
Poll::Ready(Ok(res))
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
Err(err) => {
|
|
511
|
+
ping.ensure_not_timed_out().map_err(|e| (e, None))?;
|
|
512
|
+
|
|
513
|
+
debug!("client response error: {}", err);
|
|
514
|
+
Poll::Ready(Err((Error::new_h2(err), None::<Request<B>>)))
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
impl<B, E, T> Future for ClientTask<B, E, T>
|
|
521
|
+
where
|
|
522
|
+
B: Body + 'static + Unpin,
|
|
523
|
+
B::Data: Send,
|
|
524
|
+
B::Error: Into<BoxError>,
|
|
525
|
+
E: Http2ClientConnExec<B, T> + Unpin,
|
|
526
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
527
|
+
{
|
|
528
|
+
type Output = core::Result<Dispatched>;
|
|
529
|
+
|
|
530
|
+
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
531
|
+
loop {
|
|
532
|
+
match ready!(self.h2_tx.poll_ready(cx)) {
|
|
533
|
+
Ok(()) => (),
|
|
534
|
+
Err(err) => {
|
|
535
|
+
self.ping.ensure_not_timed_out()?;
|
|
536
|
+
return if err.reason() == Some(::http2::Reason::NO_ERROR) {
|
|
537
|
+
trace!("connection gracefully shutdown");
|
|
538
|
+
Poll::Ready(Ok(Dispatched::Shutdown))
|
|
539
|
+
} else {
|
|
540
|
+
Poll::Ready(Err(Error::new_h2(err)))
|
|
541
|
+
};
|
|
542
|
+
}
|
|
543
|
+
};
|
|
544
|
+
|
|
545
|
+
// If we were waiting on pending open
|
|
546
|
+
// continue where we left off.
|
|
547
|
+
if let Some(f) = self.fut_ctx.take() {
|
|
548
|
+
self.poll_pipe(f, cx);
|
|
549
|
+
continue;
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
match self.req_rx.poll_recv(cx) {
|
|
553
|
+
Poll::Ready(Some((req, cb))) => {
|
|
554
|
+
// Check that future hasn't been canceled already
|
|
555
|
+
if cb.is_canceled() {
|
|
556
|
+
trace!("request callback is canceled");
|
|
557
|
+
continue;
|
|
558
|
+
}
|
|
559
|
+
let (head, body) = req.into_parts();
|
|
560
|
+
let mut req = ::http::Request::from_parts(head, ());
|
|
561
|
+
super::strip_connection_headers(req.headers_mut(), true);
|
|
562
|
+
if let Some(len) = body.size_hint().exact() {
|
|
563
|
+
if len != 0 || headers::method_has_defined_payload_semantics(req.method()) {
|
|
564
|
+
headers::set_content_length_if_missing(req.headers_mut(), len);
|
|
565
|
+
}
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
// Sort headers if we have the original headers
|
|
569
|
+
if let Some(orig_headers) =
|
|
570
|
+
RequestConfig::<OrigHeaderMap>::remove(req.extensions_mut())
|
|
571
|
+
{
|
|
572
|
+
orig_headers.sort_headers(req.headers_mut());
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
let is_connect = req.method() == Method::CONNECT;
|
|
576
|
+
let eos = body.is_end_stream();
|
|
577
|
+
|
|
578
|
+
if is_connect
|
|
579
|
+
&& headers::content_length_parse_all(req.headers())
|
|
580
|
+
.is_some_and(|len| len != 0)
|
|
581
|
+
{
|
|
582
|
+
debug!("h2 connect request with non-zero body not supported");
|
|
583
|
+
cb.send(Err(TrySendError {
|
|
584
|
+
error: Error::new_user_invalid_connect(),
|
|
585
|
+
message: None,
|
|
586
|
+
}));
|
|
587
|
+
continue;
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) {
|
|
591
|
+
Ok(ok) => ok,
|
|
592
|
+
Err(err) => {
|
|
593
|
+
debug!("client send request error: {}", err);
|
|
594
|
+
cb.send(Err(TrySendError {
|
|
595
|
+
error: Error::new_h2(err),
|
|
596
|
+
message: None,
|
|
597
|
+
}));
|
|
598
|
+
continue;
|
|
599
|
+
}
|
|
600
|
+
};
|
|
601
|
+
|
|
602
|
+
let f = FutCtx {
|
|
603
|
+
is_connect,
|
|
604
|
+
eos,
|
|
605
|
+
fut,
|
|
606
|
+
body_tx,
|
|
607
|
+
body,
|
|
608
|
+
cb,
|
|
609
|
+
};
|
|
610
|
+
|
|
611
|
+
// Check poll_ready() again.
|
|
612
|
+
// If the call to send_request() resulted in the new stream being pending open
|
|
613
|
+
// we have to wait for the open to complete before accepting new requests.
|
|
614
|
+
match self.h2_tx.poll_ready(cx) {
|
|
615
|
+
Poll::Pending => {
|
|
616
|
+
// Save Context
|
|
617
|
+
self.fut_ctx = Some(f);
|
|
618
|
+
return Poll::Pending;
|
|
619
|
+
}
|
|
620
|
+
Poll::Ready(Ok(())) => (),
|
|
621
|
+
Poll::Ready(Err(err)) => {
|
|
622
|
+
f.cb.send(Err(TrySendError {
|
|
623
|
+
error: Error::new_h2(err),
|
|
624
|
+
message: None,
|
|
625
|
+
}));
|
|
626
|
+
continue;
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
self.poll_pipe(f, cx);
|
|
630
|
+
continue;
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
Poll::Ready(None) => {
|
|
634
|
+
trace!("client::dispatch::Sender dropped");
|
|
635
|
+
return Poll::Ready(Ok(Dispatched::Shutdown));
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
Poll::Pending => match ready!(Pin::new(&mut self.conn_eof).poll(cx)) {
|
|
639
|
+
// As of Rust 1.82, this pattern is no longer needed, and emits a warning.
|
|
640
|
+
// But we cannot remove it as long as MSRV is less than that.
|
|
641
|
+
Ok(never) => match never {},
|
|
642
|
+
Err(_conn_is_eof) => {
|
|
643
|
+
trace!("connection task is closed, closing dispatch task");
|
|
644
|
+
return Poll::Ready(Ok(Dispatched::Shutdown));
|
|
645
|
+
}
|
|
646
|
+
},
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
}
|
|
650
|
+
}
|