wreq-rb 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. checksums.yaml +7 -0
  2. data/Cargo.lock +2688 -0
  3. data/Cargo.toml +6 -0
  4. data/README.md +179 -0
  5. data/ext/wreq_rb/Cargo.toml +39 -0
  6. data/ext/wreq_rb/extconf.rb +22 -0
  7. data/ext/wreq_rb/src/client.rs +565 -0
  8. data/ext/wreq_rb/src/error.rs +25 -0
  9. data/ext/wreq_rb/src/lib.rs +20 -0
  10. data/ext/wreq_rb/src/response.rs +132 -0
  11. data/lib/wreq-rb/version.rb +5 -0
  12. data/lib/wreq-rb.rb +17 -0
  13. data/patches/0001-add-transfer-size-tracking.patch +292 -0
  14. data/vendor/wreq/Cargo.toml +306 -0
  15. data/vendor/wreq/LICENSE +202 -0
  16. data/vendor/wreq/README.md +122 -0
  17. data/vendor/wreq/examples/cert_store.rs +77 -0
  18. data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
  19. data/vendor/wreq/examples/emulation.rs +118 -0
  20. data/vendor/wreq/examples/form.rs +14 -0
  21. data/vendor/wreq/examples/http1_websocket.rs +37 -0
  22. data/vendor/wreq/examples/http2_websocket.rs +45 -0
  23. data/vendor/wreq/examples/json_dynamic.rs +41 -0
  24. data/vendor/wreq/examples/json_typed.rs +47 -0
  25. data/vendor/wreq/examples/keylog.rs +16 -0
  26. data/vendor/wreq/examples/request_with_emulation.rs +115 -0
  27. data/vendor/wreq/examples/request_with_interface.rs +37 -0
  28. data/vendor/wreq/examples/request_with_local_address.rs +16 -0
  29. data/vendor/wreq/examples/request_with_proxy.rs +13 -0
  30. data/vendor/wreq/examples/request_with_redirect.rs +22 -0
  31. data/vendor/wreq/examples/request_with_version.rs +15 -0
  32. data/vendor/wreq/examples/tor_socks.rs +24 -0
  33. data/vendor/wreq/examples/unix_socket.rs +33 -0
  34. data/vendor/wreq/src/client/body.rs +304 -0
  35. data/vendor/wreq/src/client/conn/conn.rs +231 -0
  36. data/vendor/wreq/src/client/conn/connector.rs +549 -0
  37. data/vendor/wreq/src/client/conn/http.rs +1023 -0
  38. data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
  39. data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
  40. data/vendor/wreq/src/client/conn/proxy.rs +39 -0
  41. data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
  42. data/vendor/wreq/src/client/conn/uds.rs +44 -0
  43. data/vendor/wreq/src/client/conn/verbose.rs +149 -0
  44. data/vendor/wreq/src/client/conn.rs +323 -0
  45. data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
  46. data/vendor/wreq/src/client/core/body/length.rs +118 -0
  47. data/vendor/wreq/src/client/core/body.rs +34 -0
  48. data/vendor/wreq/src/client/core/common/buf.rs +149 -0
  49. data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
  50. data/vendor/wreq/src/client/core/common/watch.rs +76 -0
  51. data/vendor/wreq/src/client/core/common.rs +3 -0
  52. data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
  53. data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
  54. data/vendor/wreq/src/client/core/conn.rs +11 -0
  55. data/vendor/wreq/src/client/core/dispatch.rs +299 -0
  56. data/vendor/wreq/src/client/core/error.rs +435 -0
  57. data/vendor/wreq/src/client/core/ext.rs +201 -0
  58. data/vendor/wreq/src/client/core/http1.rs +178 -0
  59. data/vendor/wreq/src/client/core/http2.rs +483 -0
  60. data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
  61. data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
  62. data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
  63. data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
  64. data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
  65. data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
  66. data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
  67. data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
  68. data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
  69. data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
  70. data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
  71. data/vendor/wreq/src/client/core/proto.rs +58 -0
  72. data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
  73. data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
  74. data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
  75. data/vendor/wreq/src/client/core/rt.rs +25 -0
  76. data/vendor/wreq/src/client/core/upgrade.rs +267 -0
  77. data/vendor/wreq/src/client/core.rs +16 -0
  78. data/vendor/wreq/src/client/emulation.rs +161 -0
  79. data/vendor/wreq/src/client/http/client/error.rs +142 -0
  80. data/vendor/wreq/src/client/http/client/exec.rs +29 -0
  81. data/vendor/wreq/src/client/http/client/extra.rs +77 -0
  82. data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
  83. data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
  84. data/vendor/wreq/src/client/http/client/util.rs +104 -0
  85. data/vendor/wreq/src/client/http/client.rs +1003 -0
  86. data/vendor/wreq/src/client/http/future.rs +99 -0
  87. data/vendor/wreq/src/client/http.rs +1629 -0
  88. data/vendor/wreq/src/client/layer/config/options.rs +156 -0
  89. data/vendor/wreq/src/client/layer/config.rs +116 -0
  90. data/vendor/wreq/src/client/layer/cookie.rs +161 -0
  91. data/vendor/wreq/src/client/layer/decoder.rs +139 -0
  92. data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
  93. data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
  94. data/vendor/wreq/src/client/layer/redirect.rs +145 -0
  95. data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
  96. data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
  97. data/vendor/wreq/src/client/layer/retry.rs +151 -0
  98. data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
  99. data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
  100. data/vendor/wreq/src/client/layer/timeout.rs +177 -0
  101. data/vendor/wreq/src/client/layer.rs +15 -0
  102. data/vendor/wreq/src/client/multipart.rs +717 -0
  103. data/vendor/wreq/src/client/request.rs +818 -0
  104. data/vendor/wreq/src/client/response.rs +534 -0
  105. data/vendor/wreq/src/client/ws/json.rs +99 -0
  106. data/vendor/wreq/src/client/ws/message.rs +453 -0
  107. data/vendor/wreq/src/client/ws.rs +714 -0
  108. data/vendor/wreq/src/client.rs +27 -0
  109. data/vendor/wreq/src/config.rs +140 -0
  110. data/vendor/wreq/src/cookie.rs +579 -0
  111. data/vendor/wreq/src/dns/gai.rs +249 -0
  112. data/vendor/wreq/src/dns/hickory.rs +78 -0
  113. data/vendor/wreq/src/dns/resolve.rs +180 -0
  114. data/vendor/wreq/src/dns.rs +69 -0
  115. data/vendor/wreq/src/error.rs +502 -0
  116. data/vendor/wreq/src/ext.rs +398 -0
  117. data/vendor/wreq/src/hash.rs +143 -0
  118. data/vendor/wreq/src/header.rs +506 -0
  119. data/vendor/wreq/src/into_uri.rs +187 -0
  120. data/vendor/wreq/src/lib.rs +586 -0
  121. data/vendor/wreq/src/proxy/mac.rs +82 -0
  122. data/vendor/wreq/src/proxy/matcher.rs +806 -0
  123. data/vendor/wreq/src/proxy/uds.rs +66 -0
  124. data/vendor/wreq/src/proxy/win.rs +31 -0
  125. data/vendor/wreq/src/proxy.rs +569 -0
  126. data/vendor/wreq/src/redirect.rs +575 -0
  127. data/vendor/wreq/src/retry.rs +198 -0
  128. data/vendor/wreq/src/sync.rs +129 -0
  129. data/vendor/wreq/src/tls/conn/cache.rs +123 -0
  130. data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
  131. data/vendor/wreq/src/tls/conn/ext.rs +82 -0
  132. data/vendor/wreq/src/tls/conn/macros.rs +34 -0
  133. data/vendor/wreq/src/tls/conn/service.rs +138 -0
  134. data/vendor/wreq/src/tls/conn.rs +681 -0
  135. data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
  136. data/vendor/wreq/src/tls/keylog.rs +99 -0
  137. data/vendor/wreq/src/tls/options.rs +464 -0
  138. data/vendor/wreq/src/tls/x509/identity.rs +122 -0
  139. data/vendor/wreq/src/tls/x509/parser.rs +71 -0
  140. data/vendor/wreq/src/tls/x509/store.rs +228 -0
  141. data/vendor/wreq/src/tls/x509.rs +68 -0
  142. data/vendor/wreq/src/tls.rs +154 -0
  143. data/vendor/wreq/src/trace.rs +55 -0
  144. data/vendor/wreq/src/util.rs +122 -0
  145. data/vendor/wreq/tests/badssl.rs +228 -0
  146. data/vendor/wreq/tests/brotli.rs +350 -0
  147. data/vendor/wreq/tests/client.rs +1098 -0
  148. data/vendor/wreq/tests/connector_layers.rs +227 -0
  149. data/vendor/wreq/tests/cookie.rs +306 -0
  150. data/vendor/wreq/tests/deflate.rs +347 -0
  151. data/vendor/wreq/tests/emulation.rs +260 -0
  152. data/vendor/wreq/tests/gzip.rs +347 -0
  153. data/vendor/wreq/tests/layers.rs +261 -0
  154. data/vendor/wreq/tests/multipart.rs +165 -0
  155. data/vendor/wreq/tests/proxy.rs +438 -0
  156. data/vendor/wreq/tests/redirect.rs +629 -0
  157. data/vendor/wreq/tests/retry.rs +135 -0
  158. data/vendor/wreq/tests/support/delay_server.rs +117 -0
  159. data/vendor/wreq/tests/support/error.rs +16 -0
  160. data/vendor/wreq/tests/support/layer.rs +183 -0
  161. data/vendor/wreq/tests/support/mod.rs +9 -0
  162. data/vendor/wreq/tests/support/server.rs +232 -0
  163. data/vendor/wreq/tests/timeouts.rs +281 -0
  164. data/vendor/wreq/tests/unix_socket.rs +135 -0
  165. data/vendor/wreq/tests/upgrade.rs +98 -0
  166. data/vendor/wreq/tests/zstd.rs +559 -0
  167. metadata +225 -0
@@ -0,0 +1,684 @@
1
+ use std::{
2
+ future::Future,
3
+ marker::Unpin,
4
+ pin::Pin,
5
+ task::{Context, Poll, ready},
6
+ };
7
+
8
+ use bytes::{Buf, Bytes};
9
+ use http::Request;
10
+ use http_body::Body;
11
+ use tokio::io::{AsyncRead, AsyncWrite};
12
+
13
+ use super::{Http1Transaction, Wants};
14
+ use crate::client::core::{
15
+ Error, Result,
16
+ body::{self, DecodedLength, Incoming as IncomingBody},
17
+ dispatch::{self, TrySendError},
18
+ error::BoxError,
19
+ proto::{self, BodyLength, Conn, Dispatched, MessageHead, RequestHead},
20
+ upgrade::OnUpgrade,
21
+ };
22
+
23
+ pub(crate) struct Dispatcher<D, Bs: Body, I, T> {
24
+ conn: Conn<I, Bs::Data, T>,
25
+ dispatch: D,
26
+ body_tx: Option<body::Sender>,
27
+ body_rx: Pin<Box<Option<Bs>>>,
28
+ is_closing: bool,
29
+ }
30
+
31
+ pub(crate) trait Dispatch {
32
+ type PollItem;
33
+ type PollBody;
34
+ type PollError;
35
+ type RecvItem;
36
+
37
+ #[allow(clippy::type_complexity)]
38
+ fn poll_msg(
39
+ self: Pin<&mut Self>,
40
+ cx: &mut Context<'_>,
41
+ ) -> Poll<Option<std::result::Result<(Self::PollItem, Self::PollBody), Self::PollError>>>;
42
+ fn recv_msg(&mut self, msg: Result<(Self::RecvItem, IncomingBody)>) -> Result<()>;
43
+ fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<std::result::Result<(), ()>>;
44
+ fn should_poll(&self) -> bool;
45
+ }
46
+
47
+ pin_project_lite::pin_project! {
48
+ pub(crate) struct Client<B> {
49
+ callback: Option<dispatch::Callback<Request<B>, http::Response<IncomingBody>>>,
50
+ #[pin]
51
+ rx: ClientRx<B>,
52
+ rx_closed: bool,
53
+ }
54
+ }
55
+
56
+ type ClientRx<B> = dispatch::Receiver<Request<B>, http::Response<IncomingBody>>;
57
+
58
+ impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
59
+ where
60
+ D: Dispatch<
61
+ PollItem = MessageHead<T::Outgoing>,
62
+ PollBody = Bs,
63
+ RecvItem = MessageHead<T::Incoming>,
64
+ > + Unpin,
65
+ D::PollError: Into<BoxError>,
66
+ I: AsyncRead + AsyncWrite + Unpin,
67
+ T: Http1Transaction + Unpin,
68
+ Bs: Body + 'static,
69
+ Bs::Error: Into<BoxError>,
70
+ {
71
+ pub(crate) fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
72
+ Dispatcher {
73
+ conn,
74
+ dispatch,
75
+ body_tx: None,
76
+ body_rx: Box::pin(None),
77
+ is_closing: false,
78
+ }
79
+ }
80
+
81
+ pub(crate) fn into_inner(self) -> (I, Bytes, D) {
82
+ let (io, buf) = self.conn.into_inner();
83
+ (io, buf, self.dispatch)
84
+ }
85
+
86
+ fn poll_catch(
87
+ &mut self,
88
+ cx: &mut Context<'_>,
89
+ should_shutdown: bool,
90
+ ) -> Poll<Result<Dispatched>> {
91
+ Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| {
92
+ // Be sure to alert a streaming body of the failure.
93
+ if let Some(mut body) = self.body_tx.take() {
94
+ body.send_error(Error::new_body("connection error"));
95
+ }
96
+ // An error means we're shutting down either way.
97
+ // We just try to give the error to the user,
98
+ // and close the connection with an Ok. If we
99
+ // cannot give it to the user, then return the Err.
100
+ self.dispatch.recv_msg(Err(e))?;
101
+ Ok(Dispatched::Shutdown)
102
+ }))
103
+ }
104
+
105
+ fn poll_inner(
106
+ &mut self,
107
+ cx: &mut Context<'_>,
108
+ should_shutdown: bool,
109
+ ) -> Poll<Result<Dispatched>> {
110
+ T::update_date();
111
+
112
+ ready!(self.poll_loop(cx))?;
113
+
114
+ if self.is_done() {
115
+ if let Some(pending) = self.conn.pending_upgrade() {
116
+ self.conn.take_error()?;
117
+ return Poll::Ready(Ok(Dispatched::Upgrade(pending)));
118
+ } else if should_shutdown {
119
+ ready!(self.conn.poll_shutdown(cx)).map_err(Error::new_shutdown)?;
120
+ }
121
+ self.conn.take_error()?;
122
+ Poll::Ready(Ok(Dispatched::Shutdown))
123
+ } else {
124
+ Poll::Pending
125
+ }
126
+ }
127
+
128
+ fn poll_loop(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
129
+ // Limit the looping on this connection, in case it is ready far too
130
+ // often, so that other futures don't starve.
131
+ //
132
+ // 16 was chosen arbitrarily, as that is number of pipelined requests
133
+ // benchmarks often use. Perhaps it should be a config option instead.
134
+ for _ in 0..16 {
135
+ let _ = self.poll_read(cx)?;
136
+ let _ = self.poll_write(cx)?;
137
+ let _ = self.poll_flush(cx)?;
138
+
139
+ // This could happen if reading paused before blocking on IO,
140
+ // such as getting to the end of a framed message, but then
141
+ // writing/flushing set the state back to Init. In that case,
142
+ // if the read buffer still had bytes, we'd want to try poll_read
143
+ // again, or else we wouldn't ever be woken up again.
144
+ //
145
+ // Using this instead of task::current() and notify() inside
146
+ // the Conn is noticeably faster in pipelined benchmarks.
147
+ if !self.conn.wants_read_again() {
148
+ //break;
149
+ return Poll::Ready(Ok(()));
150
+ }
151
+ }
152
+
153
+ trace!("poll_loop yielding (self = {:p})", self);
154
+
155
+ // Now we yield to allow other tasks to run.
156
+ cx.waker().wake_by_ref();
157
+ Poll::Pending
158
+ }
159
+
160
+ fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
161
+ loop {
162
+ if self.is_closing {
163
+ return Poll::Ready(Ok(()));
164
+ } else if self.conn.can_read_head() {
165
+ ready!(self.poll_read_head(cx))?;
166
+ } else if let Some(mut body) = self.body_tx.take() {
167
+ if self.conn.can_read_body() {
168
+ match body.poll_ready(cx) {
169
+ Poll::Ready(Ok(())) => (),
170
+ Poll::Pending => {
171
+ self.body_tx = Some(body);
172
+ return Poll::Pending;
173
+ }
174
+ Poll::Ready(Err(_canceled)) => {
175
+ // user doesn't care about the body
176
+ // so we should stop reading
177
+ trace!("body receiver dropped before eof, draining or closing");
178
+ self.conn.poll_drain_or_close_read(cx);
179
+ continue;
180
+ }
181
+ }
182
+ match self.conn.poll_read_body(cx) {
183
+ Poll::Ready(Some(Ok(frame))) => {
184
+ if frame.is_data() {
185
+ let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());
186
+ match body.try_send_data(chunk) {
187
+ Ok(()) => {
188
+ self.body_tx = Some(body);
189
+ }
190
+ Err(_canceled) => {
191
+ if self.conn.can_read_body() {
192
+ trace!("body receiver dropped before eof, closing");
193
+ self.conn.close_read();
194
+ }
195
+ }
196
+ }
197
+ } else if frame.is_trailers() {
198
+ let trailers =
199
+ frame.into_trailers().unwrap_or_else(|_| unreachable!());
200
+ match body.try_send_trailers(trailers) {
201
+ Ok(()) => {
202
+ self.body_tx = Some(body);
203
+ }
204
+ Err(_canceled) => {
205
+ if self.conn.can_read_body() {
206
+ trace!("body receiver dropped before eof, closing");
207
+ self.conn.close_read();
208
+ }
209
+ }
210
+ }
211
+ } else {
212
+ // we should have dropped all unknown frames in poll_read_body
213
+ error!("unexpected frame");
214
+ }
215
+ }
216
+ Poll::Ready(None) => {
217
+ // just drop, the body will close automatically
218
+ }
219
+ Poll::Pending => {
220
+ self.body_tx = Some(body);
221
+ return Poll::Pending;
222
+ }
223
+ Poll::Ready(Some(Err(e))) => {
224
+ body.send_error(Error::new_body(e));
225
+ }
226
+ }
227
+ } else {
228
+ // just drop, the body will close automatically
229
+ }
230
+ } else {
231
+ return self.conn.poll_read_keep_alive(cx);
232
+ }
233
+ }
234
+ }
235
+
236
+ fn poll_read_head(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
237
+ // can dispatch receive, or does it still care about other incoming message?
238
+ match ready!(self.dispatch.poll_ready(cx)) {
239
+ Ok(()) => (),
240
+ Err(()) => {
241
+ trace!("dispatch no longer receiving messages");
242
+ self.close();
243
+ return Poll::Ready(Ok(()));
244
+ }
245
+ }
246
+
247
+ // dispatch is ready for a message, try to read one
248
+ match ready!(self.conn.poll_read_head(cx)) {
249
+ Some(Ok((mut head, body_len, wants))) => {
250
+ let body = match body_len {
251
+ DecodedLength::ZERO => IncomingBody::empty(),
252
+ other => {
253
+ let (tx, rx) =
254
+ IncomingBody::new_channel(other, wants.contains(Wants::EXPECT));
255
+ self.body_tx = Some(tx);
256
+ rx
257
+ }
258
+ };
259
+ if wants.contains(Wants::UPGRADE) {
260
+ let upgrade = self.conn.on_upgrade();
261
+ debug_assert!(!upgrade.is_none(), "empty upgrade");
262
+ debug_assert!(
263
+ head.extensions.get::<OnUpgrade>().is_none(),
264
+ "OnUpgrade already set"
265
+ );
266
+ head.extensions.insert(upgrade);
267
+ }
268
+ self.dispatch.recv_msg(Ok((head, body)))?;
269
+ Poll::Ready(Ok(()))
270
+ }
271
+ Some(Err(err)) => {
272
+ debug!("read_head error: {}", err);
273
+ self.dispatch.recv_msg(Err(err))?;
274
+ // if here, the dispatcher gave the user the error
275
+ // somewhere else. we still need to shutdown, but
276
+ // not as a second error.
277
+ self.close();
278
+ Poll::Ready(Ok(()))
279
+ }
280
+ None => {
281
+ // read eof, the write side will have been closed too unless
282
+ // allow_read_close was set to true, in which case just do
283
+ // nothing...
284
+ debug_assert!(self.conn.is_read_closed());
285
+ if self.conn.is_write_closed() {
286
+ self.close();
287
+ }
288
+ Poll::Ready(Ok(()))
289
+ }
290
+ }
291
+ }
292
+
293
+ fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
294
+ loop {
295
+ if self.is_closing {
296
+ return Poll::Ready(Ok(()));
297
+ } else if self.body_rx.is_none()
298
+ && self.conn.can_write_head()
299
+ && self.dispatch.should_poll()
300
+ {
301
+ if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) {
302
+ let (head, body) = msg.map_err(Error::new_user_service)?;
303
+
304
+ let body_type = if body.is_end_stream() {
305
+ self.body_rx.set(None);
306
+ None
307
+ } else {
308
+ let btype = body
309
+ .size_hint()
310
+ .exact()
311
+ .map(BodyLength::Known)
312
+ .or(Some(BodyLength::Unknown));
313
+ self.body_rx.set(Some(body));
314
+ btype
315
+ };
316
+ self.conn.write_head(head, body_type);
317
+ } else {
318
+ self.close();
319
+ return Poll::Ready(Ok(()));
320
+ }
321
+ } else if !self.conn.can_buffer_body() {
322
+ ready!(self.poll_flush(cx))?;
323
+ } else {
324
+ // A new scope is needed :(
325
+ if let (Some(mut body), clear_body) =
326
+ OptGuard::new(self.body_rx.as_mut()).guard_mut()
327
+ {
328
+ debug_assert!(!*clear_body, "opt guard defaults to keeping body");
329
+ if !self.conn.can_write_body() {
330
+ trace!(
331
+ "no more write body allowed, user body is_end_stream = {}",
332
+ body.is_end_stream(),
333
+ );
334
+ *clear_body = true;
335
+ continue;
336
+ }
337
+
338
+ let item = ready!(body.as_mut().poll_frame(cx));
339
+ if let Some(item) = item {
340
+ let frame = item.map_err(|e| {
341
+ *clear_body = true;
342
+ Error::new_user_body(e)
343
+ })?;
344
+
345
+ if frame.is_data() {
346
+ let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());
347
+ let eos = body.is_end_stream();
348
+ if eos {
349
+ *clear_body = true;
350
+ if chunk.remaining() == 0 {
351
+ trace!("discarding empty chunk");
352
+ self.conn.end_body()?;
353
+ } else {
354
+ self.conn.write_body_and_end(chunk);
355
+ }
356
+ } else {
357
+ if chunk.remaining() == 0 {
358
+ trace!("discarding empty chunk");
359
+ continue;
360
+ }
361
+ self.conn.write_body(chunk);
362
+ }
363
+ } else if frame.is_trailers() {
364
+ *clear_body = true;
365
+ self.conn.write_trailers(
366
+ frame.into_trailers().unwrap_or_else(|_| unreachable!()),
367
+ );
368
+ } else {
369
+ trace!("discarding unknown frame");
370
+ continue;
371
+ }
372
+ } else {
373
+ *clear_body = true;
374
+ self.conn.end_body()?;
375
+ }
376
+ } else {
377
+ // If there's no body_rx, end the body
378
+ if self.conn.can_write_body() {
379
+ self.conn.end_body()?;
380
+ } else {
381
+ return Poll::Pending;
382
+ }
383
+ }
384
+ }
385
+ }
386
+ }
387
+
388
+ fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
389
+ self.conn.poll_flush(cx).map_err(|err| {
390
+ debug!("error writing: {}", err);
391
+ Error::new_body_write(err)
392
+ })
393
+ }
394
+
395
+ fn close(&mut self) {
396
+ self.is_closing = true;
397
+ self.conn.close_read();
398
+ self.conn.close_write();
399
+ }
400
+
401
+ fn is_done(&self) -> bool {
402
+ if self.is_closing {
403
+ return true;
404
+ }
405
+
406
+ let read_done = self.conn.is_read_closed();
407
+
408
+ if !T::should_read_first() && read_done {
409
+ // a client that cannot read may was well be done.
410
+ true
411
+ } else {
412
+ let write_done = self.conn.is_write_closed()
413
+ || (!self.dispatch.should_poll() && self.body_rx.is_none());
414
+ read_done && write_done
415
+ }
416
+ }
417
+ }
418
+
419
+ impl<D, Bs, I, T> Future for Dispatcher<D, Bs, I, T>
420
+ where
421
+ D: Dispatch<
422
+ PollItem = MessageHead<T::Outgoing>,
423
+ PollBody = Bs,
424
+ RecvItem = MessageHead<T::Incoming>,
425
+ > + Unpin,
426
+ D::PollError: Into<BoxError>,
427
+ I: AsyncRead + AsyncWrite + Unpin,
428
+ T: Http1Transaction + Unpin,
429
+ Bs: Body + 'static,
430
+ Bs::Error: Into<BoxError>,
431
+ {
432
+ type Output = Result<Dispatched>;
433
+
434
+ #[inline]
435
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
436
+ self.poll_catch(cx, true)
437
+ }
438
+ }
439
+
440
+ // ===== impl OptGuard =====
441
+
442
+ /// A drop guard to allow a mutable borrow of an Option while being able to
443
+ /// set whether the `Option` should be cleared on drop.
444
+ struct OptGuard<'a, T>(Pin<&'a mut Option<T>>, bool);
445
+
446
+ impl<'a, T> OptGuard<'a, T> {
447
+ fn new(pin: Pin<&'a mut Option<T>>) -> Self {
448
+ OptGuard(pin, false)
449
+ }
450
+
451
+ fn guard_mut(&mut self) -> (Option<Pin<&mut T>>, &mut bool) {
452
+ (self.0.as_mut().as_pin_mut(), &mut self.1)
453
+ }
454
+ }
455
+
456
+ impl<T> Drop for OptGuard<'_, T> {
457
+ fn drop(&mut self) {
458
+ if self.1 {
459
+ self.0.set(None);
460
+ }
461
+ }
462
+ }
463
+
464
+ // ===== impl Client =====
465
+
466
+ use std::convert::Infallible;
467
+
468
+ impl<B> Client<B> {
469
+ pub(crate) fn new(rx: ClientRx<B>) -> Client<B> {
470
+ Client {
471
+ callback: None,
472
+ rx,
473
+ rx_closed: false,
474
+ }
475
+ }
476
+ }
477
+
478
+ impl<B> Dispatch for Client<B>
479
+ where
480
+ B: Body,
481
+ {
482
+ type PollItem = RequestHead;
483
+ type PollBody = B;
484
+ type PollError = Infallible;
485
+ type RecvItem = proto::ResponseHead;
486
+
487
+ fn poll_msg(
488
+ mut self: Pin<&mut Self>,
489
+ cx: &mut Context<'_>,
490
+ ) -> Poll<Option<std::result::Result<(Self::PollItem, Self::PollBody), Infallible>>> {
491
+ let mut this = self.as_mut();
492
+ debug_assert!(!this.rx_closed);
493
+ match this.rx.poll_recv(cx) {
494
+ Poll::Ready(Some((req, mut cb))) => {
495
+ // check that future hasn't been canceled already
496
+ match cb.poll_canceled(cx) {
497
+ Poll::Ready(()) => {
498
+ trace!("request canceled");
499
+ Poll::Ready(None)
500
+ }
501
+ Poll::Pending => {
502
+ let (parts, body) = req.into_parts();
503
+ let head = RequestHead {
504
+ version: parts.version,
505
+ subject: proto::RequestLine(parts.method, parts.uri),
506
+ headers: parts.headers,
507
+ extensions: parts.extensions,
508
+ };
509
+ this.callback = Some(cb);
510
+ Poll::Ready(Some(Ok((head, body))))
511
+ }
512
+ }
513
+ }
514
+ Poll::Ready(None) => {
515
+ // user has dropped sender handle
516
+ trace!("client tx closed");
517
+ this.rx_closed = true;
518
+ Poll::Ready(None)
519
+ }
520
+ Poll::Pending => Poll::Pending,
521
+ }
522
+ }
523
+
524
+ fn recv_msg(&mut self, msg: Result<(Self::RecvItem, IncomingBody)>) -> Result<()> {
525
+ match msg {
526
+ Ok((msg, body)) => {
527
+ if let Some(cb) = self.callback.take() {
528
+ let res = msg.into_response(body);
529
+ cb.send(Ok(res));
530
+ Ok(())
531
+ } else {
532
+ // Getting here is likely a bug! An error should have happened
533
+ // in Conn::require_empty_read() before ever parsing a
534
+ // full message!
535
+ Err(Error::new_unexpected_message())
536
+ }
537
+ }
538
+ Err(err) => {
539
+ if let Some(cb) = self.callback.take() {
540
+ cb.send(Err(TrySendError {
541
+ error: err,
542
+ message: None,
543
+ }));
544
+ Ok(())
545
+ } else if !self.rx_closed {
546
+ self.rx.close();
547
+ if let Some((req, cb)) = self.rx.try_recv() {
548
+ trace!("canceling queued request with connection error: {}", err);
549
+ // in this case, the message was never even started, so it's safe to tell
550
+ // the user that the request was completely canceled
551
+ cb.send(Err(TrySendError {
552
+ error: Error::new_canceled().with(err),
553
+ message: Some(req),
554
+ }));
555
+ Ok(())
556
+ } else {
557
+ Err(err)
558
+ }
559
+ } else {
560
+ Err(err)
561
+ }
562
+ }
563
+ }
564
+ }
565
+
566
+ fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<std::result::Result<(), ()>> {
567
+ match self.callback {
568
+ Some(ref mut cb) => match cb.poll_canceled(cx) {
569
+ Poll::Ready(()) => {
570
+ trace!("callback receiver has dropped");
571
+ Poll::Ready(Err(()))
572
+ }
573
+ Poll::Pending => Poll::Ready(Ok(())),
574
+ },
575
+ None => Poll::Ready(Err(())),
576
+ }
577
+ }
578
+
579
+ fn should_poll(&self) -> bool {
580
+ self.callback.is_none()
581
+ }
582
+ }
583
+
584
+ #[cfg(test)]
585
+ mod tests {
586
+ use std::time::Duration;
587
+
588
+ use super::{proto::h1::ClientTransaction, *};
589
+
590
+ #[test]
591
+ fn client_read_bytes_before_writing_request() {
592
+ let _ = pretty_env_logger::try_init();
593
+
594
+ tokio_test::task::spawn(()).enter(|cx, _| {
595
+ let (io, mut handle) = tokio_test::io::Builder::new().build_with_handle();
596
+
597
+ // Block at 0 for now, but we will release this response before
598
+ // the request is ready to write later...
599
+ let (mut tx, rx) = dispatch::channel();
600
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
601
+ let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
602
+
603
+ // First poll is needed to allow tx to send...
604
+ assert!(Pin::new(&mut dispatcher).poll(cx).is_pending());
605
+
606
+ // Unblock our IO, which has a response before we've sent request!
607
+ //
608
+ handle.read(b"HTTP/1.1 200 OK\r\n\r\n");
609
+
610
+ let mut res_rx = tx
611
+ .try_send(http::Request::new(IncomingBody::empty()))
612
+ .unwrap();
613
+
614
+ tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx));
615
+ let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx))
616
+ .expect_err("callback should send error");
617
+
618
+ match (err.error.is_canceled(), err.message.as_ref()) {
619
+ (true, Some(_)) => (),
620
+ _ => panic!("expected Canceled, got {err:?}"),
621
+ }
622
+ });
623
+ }
624
+
625
+ #[tokio::test]
626
+ async fn client_flushing_is_not_ready_for_next_request() {
627
+ let _ = pretty_env_logger::try_init();
628
+
629
+ let (io, _handle) = tokio_test::io::Builder::new()
630
+ .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n")
631
+ .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
632
+ .wait(std::time::Duration::from_secs(2))
633
+ .build_with_handle();
634
+
635
+ let (mut tx, rx) = dispatch::channel();
636
+ let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
637
+ conn.set_write_strategy_queue();
638
+
639
+ let dispatcher = Dispatcher::new(Client::new(rx), conn);
640
+ let _dispatcher = tokio::spawn(dispatcher);
641
+
642
+ let body = {
643
+ let (mut tx, body) = IncomingBody::new_channel(DecodedLength::new(4), false);
644
+ tx.try_send_data("reee".into()).unwrap();
645
+ body
646
+ };
647
+
648
+ let req = http::Request::builder().method("POST").body(body).unwrap();
649
+
650
+ let res = tx.try_send(req).unwrap().await.expect("response");
651
+ drop(res);
652
+
653
+ assert!(!tx.is_ready());
654
+ }
655
+
656
+ #[tokio::test]
657
+ async fn body_empty_chunks_ignored() {
658
+ let _ = pretty_env_logger::try_init();
659
+
660
+ let io = tokio_test::io::Builder::new()
661
+ // no reading or writing, just be blocked for the test...
662
+ .wait(Duration::from_secs(5))
663
+ .build();
664
+
665
+ let (mut tx, rx) = dispatch::channel();
666
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
667
+ let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn));
668
+
669
+ // First poll is needed to allow tx to send...
670
+ assert!(dispatcher.poll().is_pending());
671
+
672
+ let body = {
673
+ let (mut tx, body) = IncomingBody::channel();
674
+ tx.try_send_data("".into()).unwrap();
675
+ body
676
+ };
677
+
678
+ let _res_rx = tx.try_send(http::Request::new(body)).unwrap();
679
+
680
+ // Ensure conn.write_body wasn't called with the empty chunk.
681
+ // If it is, it will trigger an assertion.
682
+ assert!(dispatcher.poll().is_pending());
683
+ }
684
+ }