wreq-rb 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Cargo.lock +2688 -0
- data/Cargo.toml +6 -0
- data/README.md +179 -0
- data/ext/wreq_rb/Cargo.toml +39 -0
- data/ext/wreq_rb/extconf.rb +22 -0
- data/ext/wreq_rb/src/client.rs +565 -0
- data/ext/wreq_rb/src/error.rs +25 -0
- data/ext/wreq_rb/src/lib.rs +20 -0
- data/ext/wreq_rb/src/response.rs +132 -0
- data/lib/wreq-rb/version.rb +5 -0
- data/lib/wreq-rb.rb +17 -0
- data/patches/0001-add-transfer-size-tracking.patch +292 -0
- data/vendor/wreq/Cargo.toml +306 -0
- data/vendor/wreq/LICENSE +202 -0
- data/vendor/wreq/README.md +122 -0
- data/vendor/wreq/examples/cert_store.rs +77 -0
- data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
- data/vendor/wreq/examples/emulation.rs +118 -0
- data/vendor/wreq/examples/form.rs +14 -0
- data/vendor/wreq/examples/http1_websocket.rs +37 -0
- data/vendor/wreq/examples/http2_websocket.rs +45 -0
- data/vendor/wreq/examples/json_dynamic.rs +41 -0
- data/vendor/wreq/examples/json_typed.rs +47 -0
- data/vendor/wreq/examples/keylog.rs +16 -0
- data/vendor/wreq/examples/request_with_emulation.rs +115 -0
- data/vendor/wreq/examples/request_with_interface.rs +37 -0
- data/vendor/wreq/examples/request_with_local_address.rs +16 -0
- data/vendor/wreq/examples/request_with_proxy.rs +13 -0
- data/vendor/wreq/examples/request_with_redirect.rs +22 -0
- data/vendor/wreq/examples/request_with_version.rs +15 -0
- data/vendor/wreq/examples/tor_socks.rs +24 -0
- data/vendor/wreq/examples/unix_socket.rs +33 -0
- data/vendor/wreq/src/client/body.rs +304 -0
- data/vendor/wreq/src/client/conn/conn.rs +231 -0
- data/vendor/wreq/src/client/conn/connector.rs +549 -0
- data/vendor/wreq/src/client/conn/http.rs +1023 -0
- data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
- data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
- data/vendor/wreq/src/client/conn/proxy.rs +39 -0
- data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
- data/vendor/wreq/src/client/conn/uds.rs +44 -0
- data/vendor/wreq/src/client/conn/verbose.rs +149 -0
- data/vendor/wreq/src/client/conn.rs +323 -0
- data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
- data/vendor/wreq/src/client/core/body/length.rs +118 -0
- data/vendor/wreq/src/client/core/body.rs +34 -0
- data/vendor/wreq/src/client/core/common/buf.rs +149 -0
- data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
- data/vendor/wreq/src/client/core/common/watch.rs +76 -0
- data/vendor/wreq/src/client/core/common.rs +3 -0
- data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
- data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
- data/vendor/wreq/src/client/core/conn.rs +11 -0
- data/vendor/wreq/src/client/core/dispatch.rs +299 -0
- data/vendor/wreq/src/client/core/error.rs +435 -0
- data/vendor/wreq/src/client/core/ext.rs +201 -0
- data/vendor/wreq/src/client/core/http1.rs +178 -0
- data/vendor/wreq/src/client/core/http2.rs +483 -0
- data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
- data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
- data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
- data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
- data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
- data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
- data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
- data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
- data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
- data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
- data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
- data/vendor/wreq/src/client/core/proto.rs +58 -0
- data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
- data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
- data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
- data/vendor/wreq/src/client/core/rt.rs +25 -0
- data/vendor/wreq/src/client/core/upgrade.rs +267 -0
- data/vendor/wreq/src/client/core.rs +16 -0
- data/vendor/wreq/src/client/emulation.rs +161 -0
- data/vendor/wreq/src/client/http/client/error.rs +142 -0
- data/vendor/wreq/src/client/http/client/exec.rs +29 -0
- data/vendor/wreq/src/client/http/client/extra.rs +77 -0
- data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
- data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
- data/vendor/wreq/src/client/http/client/util.rs +104 -0
- data/vendor/wreq/src/client/http/client.rs +1003 -0
- data/vendor/wreq/src/client/http/future.rs +99 -0
- data/vendor/wreq/src/client/http.rs +1629 -0
- data/vendor/wreq/src/client/layer/config/options.rs +156 -0
- data/vendor/wreq/src/client/layer/config.rs +116 -0
- data/vendor/wreq/src/client/layer/cookie.rs +161 -0
- data/vendor/wreq/src/client/layer/decoder.rs +139 -0
- data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
- data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
- data/vendor/wreq/src/client/layer/redirect.rs +145 -0
- data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
- data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
- data/vendor/wreq/src/client/layer/retry.rs +151 -0
- data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
- data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
- data/vendor/wreq/src/client/layer/timeout.rs +177 -0
- data/vendor/wreq/src/client/layer.rs +15 -0
- data/vendor/wreq/src/client/multipart.rs +717 -0
- data/vendor/wreq/src/client/request.rs +818 -0
- data/vendor/wreq/src/client/response.rs +534 -0
- data/vendor/wreq/src/client/ws/json.rs +99 -0
- data/vendor/wreq/src/client/ws/message.rs +453 -0
- data/vendor/wreq/src/client/ws.rs +714 -0
- data/vendor/wreq/src/client.rs +27 -0
- data/vendor/wreq/src/config.rs +140 -0
- data/vendor/wreq/src/cookie.rs +579 -0
- data/vendor/wreq/src/dns/gai.rs +249 -0
- data/vendor/wreq/src/dns/hickory.rs +78 -0
- data/vendor/wreq/src/dns/resolve.rs +180 -0
- data/vendor/wreq/src/dns.rs +69 -0
- data/vendor/wreq/src/error.rs +502 -0
- data/vendor/wreq/src/ext.rs +398 -0
- data/vendor/wreq/src/hash.rs +143 -0
- data/vendor/wreq/src/header.rs +506 -0
- data/vendor/wreq/src/into_uri.rs +187 -0
- data/vendor/wreq/src/lib.rs +586 -0
- data/vendor/wreq/src/proxy/mac.rs +82 -0
- data/vendor/wreq/src/proxy/matcher.rs +806 -0
- data/vendor/wreq/src/proxy/uds.rs +66 -0
- data/vendor/wreq/src/proxy/win.rs +31 -0
- data/vendor/wreq/src/proxy.rs +569 -0
- data/vendor/wreq/src/redirect.rs +575 -0
- data/vendor/wreq/src/retry.rs +198 -0
- data/vendor/wreq/src/sync.rs +129 -0
- data/vendor/wreq/src/tls/conn/cache.rs +123 -0
- data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
- data/vendor/wreq/src/tls/conn/ext.rs +82 -0
- data/vendor/wreq/src/tls/conn/macros.rs +34 -0
- data/vendor/wreq/src/tls/conn/service.rs +138 -0
- data/vendor/wreq/src/tls/conn.rs +681 -0
- data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
- data/vendor/wreq/src/tls/keylog.rs +99 -0
- data/vendor/wreq/src/tls/options.rs +464 -0
- data/vendor/wreq/src/tls/x509/identity.rs +122 -0
- data/vendor/wreq/src/tls/x509/parser.rs +71 -0
- data/vendor/wreq/src/tls/x509/store.rs +228 -0
- data/vendor/wreq/src/tls/x509.rs +68 -0
- data/vendor/wreq/src/tls.rs +154 -0
- data/vendor/wreq/src/trace.rs +55 -0
- data/vendor/wreq/src/util.rs +122 -0
- data/vendor/wreq/tests/badssl.rs +228 -0
- data/vendor/wreq/tests/brotli.rs +350 -0
- data/vendor/wreq/tests/client.rs +1098 -0
- data/vendor/wreq/tests/connector_layers.rs +227 -0
- data/vendor/wreq/tests/cookie.rs +306 -0
- data/vendor/wreq/tests/deflate.rs +347 -0
- data/vendor/wreq/tests/emulation.rs +260 -0
- data/vendor/wreq/tests/gzip.rs +347 -0
- data/vendor/wreq/tests/layers.rs +261 -0
- data/vendor/wreq/tests/multipart.rs +165 -0
- data/vendor/wreq/tests/proxy.rs +438 -0
- data/vendor/wreq/tests/redirect.rs +629 -0
- data/vendor/wreq/tests/retry.rs +135 -0
- data/vendor/wreq/tests/support/delay_server.rs +117 -0
- data/vendor/wreq/tests/support/error.rs +16 -0
- data/vendor/wreq/tests/support/layer.rs +183 -0
- data/vendor/wreq/tests/support/mod.rs +9 -0
- data/vendor/wreq/tests/support/server.rs +232 -0
- data/vendor/wreq/tests/timeouts.rs +281 -0
- data/vendor/wreq/tests/unix_socket.rs +135 -0
- data/vendor/wreq/tests/upgrade.rs +98 -0
- data/vendor/wreq/tests/zstd.rs +559 -0
- metadata +225 -0
|
@@ -0,0 +1,988 @@
|
|
|
1
|
+
use std::{
|
|
2
|
+
fmt, io,
|
|
3
|
+
marker::{PhantomData, Unpin},
|
|
4
|
+
pin::Pin,
|
|
5
|
+
task::{Context, Poll, ready},
|
|
6
|
+
};
|
|
7
|
+
|
|
8
|
+
use bytes::{Buf, Bytes};
|
|
9
|
+
use http::{
|
|
10
|
+
HeaderMap, Method, Version,
|
|
11
|
+
header::{CONNECTION, HeaderValue, TE},
|
|
12
|
+
};
|
|
13
|
+
use http_body::Frame;
|
|
14
|
+
use httparse::ParserConfig;
|
|
15
|
+
use tokio::io::{AsyncRead, AsyncWrite};
|
|
16
|
+
|
|
17
|
+
use super::{
|
|
18
|
+
Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants, io::Buffered,
|
|
19
|
+
};
|
|
20
|
+
use crate::client::core::{
|
|
21
|
+
Error, Result,
|
|
22
|
+
body::DecodedLength,
|
|
23
|
+
proto::{BodyLength, MessageHead, headers},
|
|
24
|
+
upgrade,
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
|
|
28
|
+
|
|
29
|
+
/// This handles a connection, which will have been established over an
|
|
30
|
+
/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple
|
|
31
|
+
/// `Transaction`s over HTTP.
|
|
32
|
+
///
|
|
33
|
+
/// The connection will determine when a message begins and ends as well as
|
|
34
|
+
/// determine if this connection can be kept alive after the message,
|
|
35
|
+
/// or if it is complete.
|
|
36
|
+
pub(crate) struct Conn<I, B, T> {
|
|
37
|
+
io: Buffered<I, EncodedBuf<B>>,
|
|
38
|
+
state: State,
|
|
39
|
+
_marker: PhantomData<fn(T)>,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
impl<I, B, T> Conn<I, B, T>
|
|
43
|
+
where
|
|
44
|
+
I: AsyncRead + AsyncWrite + Unpin,
|
|
45
|
+
B: Buf,
|
|
46
|
+
T: Http1Transaction,
|
|
47
|
+
{
|
|
48
|
+
pub(crate) fn new(io: I) -> Conn<I, B, T> {
|
|
49
|
+
Conn {
|
|
50
|
+
io: Buffered::new(io),
|
|
51
|
+
state: State {
|
|
52
|
+
allow_half_close: false,
|
|
53
|
+
cached_headers: None,
|
|
54
|
+
error: None,
|
|
55
|
+
keep_alive: KA::Busy,
|
|
56
|
+
method: None,
|
|
57
|
+
h1_parser_config: ParserConfig::default(),
|
|
58
|
+
h1_max_headers: None,
|
|
59
|
+
h09_responses: false,
|
|
60
|
+
notify_read: false,
|
|
61
|
+
reading: Reading::Init,
|
|
62
|
+
writing: Writing::Init,
|
|
63
|
+
upgrade: None,
|
|
64
|
+
// We assume a modern world where the remote speaks HTTP/1.1.
|
|
65
|
+
// If they tell us otherwise, we'll downgrade in `read_head`.
|
|
66
|
+
version: Version::HTTP_11,
|
|
67
|
+
allow_trailer_fields: false,
|
|
68
|
+
},
|
|
69
|
+
_marker: PhantomData,
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
pub(crate) fn set_write_strategy_queue(&mut self) {
|
|
74
|
+
self.io.set_write_strategy_queue();
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
pub(crate) fn set_max_buf_size(&mut self, max: usize) {
|
|
78
|
+
self.io.set_max_buf_size(max);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {
|
|
82
|
+
self.io.set_read_buf_exact_size(sz);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
pub(crate) fn set_write_strategy_flatten(&mut self) {
|
|
86
|
+
self.io.set_write_strategy_flatten();
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) {
|
|
90
|
+
self.state.h1_parser_config = parser_config;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
pub(crate) fn set_h09_responses(&mut self) {
|
|
94
|
+
self.state.h09_responses = true;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
pub(crate) fn set_http1_max_headers(&mut self, val: usize) {
|
|
98
|
+
self.state.h1_max_headers = Some(val);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
pub(crate) fn into_inner(self) -> (I, Bytes) {
|
|
102
|
+
self.io.into_inner()
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
pub(crate) fn pending_upgrade(&mut self) -> Option<upgrade::Pending> {
|
|
106
|
+
self.state.upgrade.take()
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
pub(crate) fn is_read_closed(&self) -> bool {
|
|
110
|
+
self.state.is_read_closed()
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
pub(crate) fn is_write_closed(&self) -> bool {
|
|
114
|
+
self.state.is_write_closed()
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
pub(crate) fn can_read_head(&self) -> bool {
|
|
118
|
+
if !matches!(self.state.reading, Reading::Init) {
|
|
119
|
+
return false;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
if T::should_read_first() {
|
|
123
|
+
return true;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
!matches!(self.state.writing, Writing::Init)
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
pub(crate) fn can_read_body(&self) -> bool {
|
|
130
|
+
matches!(
|
|
131
|
+
self.state.reading,
|
|
132
|
+
Reading::Body(..) | Reading::Continue(..)
|
|
133
|
+
)
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
fn should_error_on_eof(&self) -> bool {
|
|
137
|
+
// If we're idle, it's probably just the connection closing gracefully.
|
|
138
|
+
T::should_error_on_parse_eof() && !self.state.is_idle()
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
fn has_h2_prefix(&self) -> bool {
|
|
142
|
+
let read_buf = self.io.read_buf();
|
|
143
|
+
read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
#[allow(clippy::type_complexity)]
|
|
147
|
+
pub(super) fn poll_read_head(
|
|
148
|
+
&mut self,
|
|
149
|
+
cx: &mut Context<'_>,
|
|
150
|
+
) -> Poll<Option<Result<(MessageHead<T::Incoming>, DecodedLength, Wants)>>> {
|
|
151
|
+
debug_assert!(self.can_read_head());
|
|
152
|
+
trace!("Conn::read_head");
|
|
153
|
+
|
|
154
|
+
let msg = match self.io.parse::<T>(
|
|
155
|
+
cx,
|
|
156
|
+
ParseContext {
|
|
157
|
+
cached_headers: &mut self.state.cached_headers,
|
|
158
|
+
req_method: &mut self.state.method,
|
|
159
|
+
h1_parser_config: self.state.h1_parser_config.clone(),
|
|
160
|
+
h1_max_headers: self.state.h1_max_headers,
|
|
161
|
+
h09_responses: self.state.h09_responses,
|
|
162
|
+
},
|
|
163
|
+
) {
|
|
164
|
+
Poll::Ready(Ok(msg)) => msg,
|
|
165
|
+
Poll::Ready(Err(e)) => return self.on_read_head_error(e),
|
|
166
|
+
Poll::Pending => {
|
|
167
|
+
return Poll::Pending;
|
|
168
|
+
}
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
// Note: don't deconstruct `msg` into local variables, it appears
|
|
172
|
+
// the optimizer doesn't remove the extra copies.
|
|
173
|
+
|
|
174
|
+
debug!("incoming body is {}", msg.decode);
|
|
175
|
+
|
|
176
|
+
// Prevent accepting HTTP/0.9 responses after the initial one, if any.
|
|
177
|
+
self.state.h09_responses = false;
|
|
178
|
+
|
|
179
|
+
self.state.busy();
|
|
180
|
+
self.state.keep_alive &= msg.keep_alive;
|
|
181
|
+
self.state.version = msg.head.version;
|
|
182
|
+
|
|
183
|
+
let mut wants = if msg.wants_upgrade {
|
|
184
|
+
Wants::UPGRADE
|
|
185
|
+
} else {
|
|
186
|
+
Wants::EMPTY
|
|
187
|
+
};
|
|
188
|
+
|
|
189
|
+
if msg.decode == DecodedLength::ZERO {
|
|
190
|
+
if msg.expect_continue {
|
|
191
|
+
debug!("ignoring expect-continue since body is empty");
|
|
192
|
+
}
|
|
193
|
+
self.state.reading = Reading::KeepAlive;
|
|
194
|
+
if !T::should_read_first() {
|
|
195
|
+
self.try_keep_alive(cx);
|
|
196
|
+
}
|
|
197
|
+
} else if msg.expect_continue && msg.head.version.gt(&Version::HTTP_10) {
|
|
198
|
+
let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support
|
|
199
|
+
self.state.reading = Reading::Continue(Decoder::new(
|
|
200
|
+
msg.decode,
|
|
201
|
+
self.state.h1_max_headers,
|
|
202
|
+
h1_max_header_size,
|
|
203
|
+
));
|
|
204
|
+
wants = wants.add(Wants::EXPECT);
|
|
205
|
+
} else {
|
|
206
|
+
let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support
|
|
207
|
+
self.state.reading = Reading::Body(Decoder::new(
|
|
208
|
+
msg.decode,
|
|
209
|
+
self.state.h1_max_headers,
|
|
210
|
+
h1_max_header_size,
|
|
211
|
+
));
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
self.state.allow_trailer_fields = msg
|
|
215
|
+
.head
|
|
216
|
+
.headers
|
|
217
|
+
.get(TE)
|
|
218
|
+
.is_some_and(|te_header| te_header == "trailers");
|
|
219
|
+
|
|
220
|
+
Poll::Ready(Some(Ok((msg.head, msg.decode, wants))))
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
fn on_read_head_error<Z>(&mut self, e: Error) -> Poll<Option<Result<Z>>> {
|
|
224
|
+
// If we are currently waiting on a message, then an empty
|
|
225
|
+
// message should be reported as an error. If not, it is just
|
|
226
|
+
// the connection closing gracefully.
|
|
227
|
+
let must_error = self.should_error_on_eof();
|
|
228
|
+
self.close_read();
|
|
229
|
+
self.io.consume_leading_lines();
|
|
230
|
+
let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty();
|
|
231
|
+
if was_mid_parse || must_error {
|
|
232
|
+
// We check if the buf contains the h2 Preface
|
|
233
|
+
debug!(
|
|
234
|
+
"parse error ({}) with {} bytes",
|
|
235
|
+
e,
|
|
236
|
+
self.io.read_buf().len()
|
|
237
|
+
);
|
|
238
|
+
match self.on_parse_error(e) {
|
|
239
|
+
Ok(()) => Poll::Pending, // XXX: wat?
|
|
240
|
+
Err(e) => Poll::Ready(Some(Err(e))),
|
|
241
|
+
}
|
|
242
|
+
} else {
|
|
243
|
+
debug!("read eof");
|
|
244
|
+
self.close_write();
|
|
245
|
+
Poll::Ready(None)
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
pub(crate) fn poll_read_body(
|
|
250
|
+
&mut self,
|
|
251
|
+
cx: &mut Context<'_>,
|
|
252
|
+
) -> Poll<Option<io::Result<Frame<Bytes>>>> {
|
|
253
|
+
debug_assert!(self.can_read_body());
|
|
254
|
+
|
|
255
|
+
let (reading, ret) = match self.state.reading {
|
|
256
|
+
Reading::Body(ref mut decoder) => {
|
|
257
|
+
match ready!(decoder.decode(cx, &mut self.io)) {
|
|
258
|
+
Ok(frame) => {
|
|
259
|
+
if frame.is_data() {
|
|
260
|
+
let slice = frame.data_ref().unwrap_or_else(|| unreachable!());
|
|
261
|
+
let (reading, maybe_frame) = if decoder.is_eof() {
|
|
262
|
+
debug!("incoming body completed");
|
|
263
|
+
(
|
|
264
|
+
Reading::KeepAlive,
|
|
265
|
+
if !slice.is_empty() {
|
|
266
|
+
Some(Ok(frame))
|
|
267
|
+
} else {
|
|
268
|
+
None
|
|
269
|
+
},
|
|
270
|
+
)
|
|
271
|
+
} else if slice.is_empty() {
|
|
272
|
+
error!("incoming body unexpectedly ended");
|
|
273
|
+
// This should be unreachable, since all 3 decoders
|
|
274
|
+
// either set eof=true or return an Err when reading
|
|
275
|
+
// an empty slice...
|
|
276
|
+
(Reading::Closed, None)
|
|
277
|
+
} else {
|
|
278
|
+
return Poll::Ready(Some(Ok(frame)));
|
|
279
|
+
};
|
|
280
|
+
(reading, Poll::Ready(maybe_frame))
|
|
281
|
+
} else if frame.is_trailers() {
|
|
282
|
+
(Reading::Closed, Poll::Ready(Some(Ok(frame))))
|
|
283
|
+
} else {
|
|
284
|
+
trace!("discarding unknown frame");
|
|
285
|
+
(Reading::Closed, Poll::Ready(None))
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
Err(e) => {
|
|
289
|
+
debug!("incoming body decode error: {}", e);
|
|
290
|
+
(Reading::Closed, Poll::Ready(Some(Err(e))))
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
Reading::Continue(ref decoder) => {
|
|
295
|
+
// Write the 100 Continue if not already responded...
|
|
296
|
+
if let Writing::Init = self.state.writing {
|
|
297
|
+
trace!("automatically sending 100 Continue");
|
|
298
|
+
let cont = b"HTTP/1.1 100 Continue\r\n\r\n";
|
|
299
|
+
self.io.headers_buf().extend_from_slice(cont);
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
// And now recurse once in the Reading::Body state...
|
|
303
|
+
self.state.reading = Reading::Body(decoder.clone());
|
|
304
|
+
return self.poll_read_body(cx);
|
|
305
|
+
}
|
|
306
|
+
_ => unreachable!("poll_read_body invalid state: {:?}", self.state.reading),
|
|
307
|
+
};
|
|
308
|
+
|
|
309
|
+
self.state.reading = reading;
|
|
310
|
+
self.try_keep_alive(cx);
|
|
311
|
+
ret
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
pub(crate) fn wants_read_again(&mut self) -> bool {
|
|
315
|
+
let ret = self.state.notify_read;
|
|
316
|
+
self.state.notify_read = false;
|
|
317
|
+
ret
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
pub(crate) fn poll_read_keep_alive(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
|
|
321
|
+
debug_assert!(!self.can_read_head() && !self.can_read_body());
|
|
322
|
+
|
|
323
|
+
if self.is_read_closed() {
|
|
324
|
+
Poll::Pending
|
|
325
|
+
} else if self.is_mid_message() {
|
|
326
|
+
self.mid_message_detect_eof(cx)
|
|
327
|
+
} else {
|
|
328
|
+
self.require_empty_read(cx)
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
fn is_mid_message(&self) -> bool {
|
|
333
|
+
!matches!(
|
|
334
|
+
(&self.state.reading, &self.state.writing),
|
|
335
|
+
(&Reading::Init, &Writing::Init)
|
|
336
|
+
)
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// This will check to make sure the io object read is empty.
|
|
340
|
+
//
|
|
341
|
+
// This should only be called for Clients wanting to enter the idle
|
|
342
|
+
// state.
|
|
343
|
+
fn require_empty_read(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
|
|
344
|
+
debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed());
|
|
345
|
+
debug_assert!(!self.is_mid_message());
|
|
346
|
+
debug_assert!(T::is_client());
|
|
347
|
+
|
|
348
|
+
if !self.io.read_buf().is_empty() {
|
|
349
|
+
debug!("received an unexpected {} bytes", self.io.read_buf().len());
|
|
350
|
+
return Poll::Ready(Err(Error::new_unexpected_message()));
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
let num_read = ready!(self.force_io_read(cx)).map_err(Error::new_io)?;
|
|
354
|
+
|
|
355
|
+
if num_read == 0 {
|
|
356
|
+
let ret = if self.should_error_on_eof() {
|
|
357
|
+
trace!("found unexpected EOF on busy connection: {:?}", self.state);
|
|
358
|
+
Poll::Ready(Err(Error::new_incomplete()))
|
|
359
|
+
} else {
|
|
360
|
+
trace!("found EOF on idle connection, closing");
|
|
361
|
+
Poll::Ready(Ok(()))
|
|
362
|
+
};
|
|
363
|
+
|
|
364
|
+
// order is important: should_error needs state BEFORE close_read
|
|
365
|
+
self.state.close_read();
|
|
366
|
+
return ret;
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
debug!(
|
|
370
|
+
"received unexpected {} bytes on an idle connection",
|
|
371
|
+
num_read
|
|
372
|
+
);
|
|
373
|
+
Poll::Ready(Err(Error::new_unexpected_message()))
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
fn mid_message_detect_eof(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
|
|
377
|
+
debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed());
|
|
378
|
+
debug_assert!(self.is_mid_message());
|
|
379
|
+
|
|
380
|
+
if self.state.allow_half_close || !self.io.read_buf().is_empty() {
|
|
381
|
+
return Poll::Pending;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
let num_read = ready!(self.force_io_read(cx)).map_err(Error::new_io)?;
|
|
385
|
+
|
|
386
|
+
if num_read == 0 {
|
|
387
|
+
trace!("found unexpected EOF on busy connection: {:?}", self.state);
|
|
388
|
+
self.state.close_read();
|
|
389
|
+
Poll::Ready(Err(Error::new_incomplete()))
|
|
390
|
+
} else {
|
|
391
|
+
Poll::Ready(Ok(()))
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
fn force_io_read(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
|
|
396
|
+
debug_assert!(!self.state.is_read_closed());
|
|
397
|
+
|
|
398
|
+
let result = ready!(self.io.poll_read_from_io(cx));
|
|
399
|
+
#[allow(clippy::manual_inspect)]
|
|
400
|
+
Poll::Ready(result.map_err(|e| {
|
|
401
|
+
trace!(error = %e, "force_io_read; io error");
|
|
402
|
+
self.state.close();
|
|
403
|
+
e
|
|
404
|
+
}))
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
fn maybe_notify(&mut self, cx: &mut Context<'_>) {
|
|
408
|
+
// its possible that we returned NotReady from poll() without having
|
|
409
|
+
// exhausted the underlying Io. We would have done this when we
|
|
410
|
+
// determined we couldn't keep reading until we knew how writing
|
|
411
|
+
// would finish.
|
|
412
|
+
|
|
413
|
+
match self.state.reading {
|
|
414
|
+
Reading::Continue(..) | Reading::Body(..) | Reading::KeepAlive | Reading::Closed => {
|
|
415
|
+
return;
|
|
416
|
+
}
|
|
417
|
+
Reading::Init => (),
|
|
418
|
+
};
|
|
419
|
+
|
|
420
|
+
match self.state.writing {
|
|
421
|
+
Writing::Body(..) => return,
|
|
422
|
+
Writing::Init | Writing::KeepAlive | Writing::Closed => (),
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
if !self.io.is_read_blocked() {
|
|
426
|
+
if self.io.read_buf().is_empty() {
|
|
427
|
+
match self.io.poll_read_from_io(cx) {
|
|
428
|
+
Poll::Ready(Ok(n)) => {
|
|
429
|
+
if n == 0 {
|
|
430
|
+
trace!("maybe_notify; read eof");
|
|
431
|
+
if self.state.is_idle() {
|
|
432
|
+
self.state.close();
|
|
433
|
+
} else {
|
|
434
|
+
self.close_read()
|
|
435
|
+
}
|
|
436
|
+
return;
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
Poll::Pending => {
|
|
440
|
+
trace!("maybe_notify; read_from_io blocked");
|
|
441
|
+
return;
|
|
442
|
+
}
|
|
443
|
+
Poll::Ready(Err(e)) => {
|
|
444
|
+
trace!("maybe_notify; read_from_io error: {}", e);
|
|
445
|
+
self.state.close();
|
|
446
|
+
self.state.error = Some(Error::new_io(e));
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
self.state.notify_read = true;
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
fn try_keep_alive(&mut self, cx: &mut Context<'_>) {
|
|
455
|
+
self.state.try_keep_alive::<T>();
|
|
456
|
+
self.maybe_notify(cx);
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
pub(crate) fn can_write_head(&self) -> bool {
|
|
460
|
+
if !T::should_read_first() && matches!(self.state.reading, Reading::Closed) {
|
|
461
|
+
return false;
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
match self.state.writing {
|
|
465
|
+
Writing::Init => self.io.can_headers_buf(),
|
|
466
|
+
_ => false,
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
pub(crate) fn can_write_body(&self) -> bool {
|
|
471
|
+
match self.state.writing {
|
|
472
|
+
Writing::Body(..) => true,
|
|
473
|
+
Writing::Init | Writing::KeepAlive | Writing::Closed => false,
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
pub(crate) fn can_buffer_body(&self) -> bool {
|
|
478
|
+
self.io.can_buffer()
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
pub(crate) fn write_head(&mut self, head: MessageHead<T::Outgoing>, body: Option<BodyLength>) {
|
|
482
|
+
if let Some(encoder) = self.encode_head(head, body) {
|
|
483
|
+
self.state.writing = if !encoder.is_eof() {
|
|
484
|
+
Writing::Body(encoder)
|
|
485
|
+
} else if encoder.is_last() {
|
|
486
|
+
Writing::Closed
|
|
487
|
+
} else {
|
|
488
|
+
Writing::KeepAlive
|
|
489
|
+
};
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
fn encode_head(
|
|
494
|
+
&mut self,
|
|
495
|
+
mut head: MessageHead<T::Outgoing>,
|
|
496
|
+
body: Option<BodyLength>,
|
|
497
|
+
) -> Option<Encoder> {
|
|
498
|
+
debug_assert!(self.can_write_head());
|
|
499
|
+
|
|
500
|
+
if !T::should_read_first() {
|
|
501
|
+
self.state.busy();
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
self.enforce_version(&mut head);
|
|
505
|
+
|
|
506
|
+
let buf = self.io.headers_buf();
|
|
507
|
+
match super::role::encode_headers::<T>(
|
|
508
|
+
Encode {
|
|
509
|
+
head: &mut head,
|
|
510
|
+
body,
|
|
511
|
+
req_method: &mut self.state.method,
|
|
512
|
+
},
|
|
513
|
+
buf,
|
|
514
|
+
) {
|
|
515
|
+
Ok(encoder) => {
|
|
516
|
+
debug_assert!(self.state.cached_headers.is_none());
|
|
517
|
+
debug_assert!(head.headers.is_empty());
|
|
518
|
+
self.state.cached_headers = Some(head.headers);
|
|
519
|
+
|
|
520
|
+
Some(encoder)
|
|
521
|
+
}
|
|
522
|
+
Err(err) => {
|
|
523
|
+
self.state.error = Some(err);
|
|
524
|
+
self.state.writing = Writing::Closed;
|
|
525
|
+
None
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// Fix keep-alive when Connection: keep-alive header is not present
|
|
531
|
+
fn fix_keep_alive(&mut self, head: &mut MessageHead<T::Outgoing>) {
|
|
532
|
+
let outgoing_is_keep_alive = head
|
|
533
|
+
.headers
|
|
534
|
+
.get(CONNECTION)
|
|
535
|
+
.is_some_and(headers::connection_keep_alive);
|
|
536
|
+
|
|
537
|
+
if !outgoing_is_keep_alive {
|
|
538
|
+
match head.version {
|
|
539
|
+
// If response is version 1.0 and keep-alive is not present in the response,
|
|
540
|
+
// disable keep-alive so the server closes the connection
|
|
541
|
+
Version::HTTP_10 => self.state.disable_keep_alive(),
|
|
542
|
+
// If response is version 1.1 and keep-alive is wanted, add
|
|
543
|
+
// Connection: keep-alive header when not present
|
|
544
|
+
Version::HTTP_11 => {
|
|
545
|
+
if self.state.wants_keep_alive() {
|
|
546
|
+
head.headers
|
|
547
|
+
.insert(CONNECTION, HeaderValue::from_static("keep-alive"));
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
_ => (),
|
|
551
|
+
}
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
// If we know the remote speaks an older version, we try to fix up any messages
|
|
556
|
+
// to work with our older peer.
|
|
557
|
+
fn enforce_version(&mut self, head: &mut MessageHead<T::Outgoing>) {
|
|
558
|
+
match self.state.version {
|
|
559
|
+
Version::HTTP_10 => {
|
|
560
|
+
// Fixes response or connection when keep-alive header is not present
|
|
561
|
+
self.fix_keep_alive(head);
|
|
562
|
+
// If the remote only knows HTTP/1.0, we should force ourselves
|
|
563
|
+
// to do only speak HTTP/1.0 as well.
|
|
564
|
+
head.version = Version::HTTP_10;
|
|
565
|
+
}
|
|
566
|
+
Version::HTTP_11 => {
|
|
567
|
+
if let KA::Disabled = self.state.keep_alive.status() {
|
|
568
|
+
head.headers
|
|
569
|
+
.insert(CONNECTION, HeaderValue::from_static("close"));
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
_ => (),
|
|
573
|
+
}
|
|
574
|
+
// If the remote speaks HTTP/1.1, then it *should* be fine with
|
|
575
|
+
// both HTTP/1.0 and HTTP/1.1 from us. So again, we just let
|
|
576
|
+
// the user's headers be.
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
pub(crate) fn write_body(&mut self, chunk: B) {
|
|
580
|
+
debug_assert!(self.can_write_body() && self.can_buffer_body());
|
|
581
|
+
// empty chunks should be discarded at Dispatcher level
|
|
582
|
+
debug_assert!(chunk.remaining() != 0);
|
|
583
|
+
|
|
584
|
+
let state = match self.state.writing {
|
|
585
|
+
Writing::Body(ref mut encoder) => {
|
|
586
|
+
self.io.buffer(encoder.encode(chunk));
|
|
587
|
+
|
|
588
|
+
if !encoder.is_eof() {
|
|
589
|
+
return;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
if encoder.is_last() {
|
|
593
|
+
Writing::Closed
|
|
594
|
+
} else {
|
|
595
|
+
Writing::KeepAlive
|
|
596
|
+
}
|
|
597
|
+
}
|
|
598
|
+
_ => unreachable!("write_body invalid state: {:?}", self.state.writing),
|
|
599
|
+
};
|
|
600
|
+
|
|
601
|
+
self.state.writing = state;
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
pub(crate) fn write_trailers(&mut self, trailers: HeaderMap) {
|
|
605
|
+
if T::is_server() && !self.state.allow_trailer_fields {
|
|
606
|
+
debug!("trailers not allowed to be sent");
|
|
607
|
+
return;
|
|
608
|
+
}
|
|
609
|
+
debug_assert!(self.can_write_body() && self.can_buffer_body());
|
|
610
|
+
|
|
611
|
+
match self.state.writing {
|
|
612
|
+
Writing::Body(ref encoder) => {
|
|
613
|
+
if let Some(enc_buf) = encoder.encode_trailers(trailers) {
|
|
614
|
+
self.io.buffer(enc_buf);
|
|
615
|
+
|
|
616
|
+
self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {
|
|
617
|
+
Writing::Closed
|
|
618
|
+
} else {
|
|
619
|
+
Writing::KeepAlive
|
|
620
|
+
};
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
_ => unreachable!("write_trailers invalid state: {:?}", self.state.writing),
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
pub(crate) fn write_body_and_end(&mut self, chunk: B) {
|
|
628
|
+
debug_assert!(self.can_write_body() && self.can_buffer_body());
|
|
629
|
+
// empty chunks should be discarded at Dispatcher level
|
|
630
|
+
debug_assert!(chunk.remaining() != 0);
|
|
631
|
+
|
|
632
|
+
let state = match self.state.writing {
|
|
633
|
+
Writing::Body(ref encoder) => {
|
|
634
|
+
let can_keep_alive = encoder.encode_and_end(chunk, self.io.write_buf());
|
|
635
|
+
if can_keep_alive {
|
|
636
|
+
Writing::KeepAlive
|
|
637
|
+
} else {
|
|
638
|
+
Writing::Closed
|
|
639
|
+
}
|
|
640
|
+
}
|
|
641
|
+
_ => unreachable!("write_body invalid state: {:?}", self.state.writing),
|
|
642
|
+
};
|
|
643
|
+
|
|
644
|
+
self.state.writing = state;
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
pub(crate) fn end_body(&mut self) -> Result<()> {
|
|
648
|
+
debug_assert!(self.can_write_body());
|
|
649
|
+
|
|
650
|
+
let encoder = match self.state.writing {
|
|
651
|
+
Writing::Body(ref mut enc) => enc,
|
|
652
|
+
_ => return Ok(()),
|
|
653
|
+
};
|
|
654
|
+
|
|
655
|
+
// end of stream, that means we should try to eof
|
|
656
|
+
match encoder.end() {
|
|
657
|
+
Ok(end) => {
|
|
658
|
+
if let Some(end) = end {
|
|
659
|
+
self.io.buffer(end);
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {
|
|
663
|
+
Writing::Closed
|
|
664
|
+
} else {
|
|
665
|
+
Writing::KeepAlive
|
|
666
|
+
};
|
|
667
|
+
|
|
668
|
+
Ok(())
|
|
669
|
+
}
|
|
670
|
+
Err(not_eof) => {
|
|
671
|
+
self.state.writing = Writing::Closed;
|
|
672
|
+
Err(Error::new_body_write_aborted().with(not_eof))
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
// When we get a parse error, depending on what side we are, we might be able
|
|
678
|
+
// to write a response before closing the connection.
|
|
679
|
+
//
|
|
680
|
+
// - Client: there is nothing we can do
|
|
681
|
+
// - Server: if Response hasn't been written yet, we can send a 4xx response
|
|
682
|
+
fn on_parse_error(&mut self, err: Error) -> Result<()> {
|
|
683
|
+
if let Writing::Init = self.state.writing {
|
|
684
|
+
if self.has_h2_prefix() {
|
|
685
|
+
return Err(Error::new_version_h2());
|
|
686
|
+
}
|
|
687
|
+
if let Some(msg) = T::on_error(&err) {
|
|
688
|
+
// Drop the cached headers so as to not trigger a debug
|
|
689
|
+
// assert in `write_head`...
|
|
690
|
+
self.state.cached_headers.take();
|
|
691
|
+
self.write_head(msg, None);
|
|
692
|
+
self.state.error = Some(err);
|
|
693
|
+
return Ok(());
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
// fallback is pass the error back up
|
|
698
|
+
Err(err)
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
702
|
+
ready!(Pin::new(&mut self.io).poll_flush(cx))?;
|
|
703
|
+
self.try_keep_alive(cx);
|
|
704
|
+
trace!("flushed({}): {:?}", T::LOG, self.state);
|
|
705
|
+
Poll::Ready(Ok(()))
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
pub(crate) fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
709
|
+
match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) {
|
|
710
|
+
Ok(()) => {
|
|
711
|
+
trace!("shut down IO complete");
|
|
712
|
+
Poll::Ready(Ok(()))
|
|
713
|
+
}
|
|
714
|
+
Err(e) => {
|
|
715
|
+
debug!("error shutting down IO: {}", e);
|
|
716
|
+
Poll::Ready(Err(e))
|
|
717
|
+
}
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
/// If the read side can be cheaply drained, do so. Otherwise, close.
|
|
722
|
+
pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut Context<'_>) {
|
|
723
|
+
if let Reading::Continue(ref decoder) = self.state.reading {
|
|
724
|
+
// skip sending the 100-continue
|
|
725
|
+
// just move forward to a read, in case a tiny body was included
|
|
726
|
+
self.state.reading = Reading::Body(decoder.clone());
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
let _ = self.poll_read_body(cx);
|
|
730
|
+
|
|
731
|
+
// If still in Reading::Body, just give up
|
|
732
|
+
match self.state.reading {
|
|
733
|
+
Reading::Init | Reading::KeepAlive => {
|
|
734
|
+
trace!("body drained")
|
|
735
|
+
}
|
|
736
|
+
_ => self.close_read(),
|
|
737
|
+
}
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
pub(crate) fn close_read(&mut self) {
|
|
741
|
+
self.state.close_read();
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
pub(crate) fn close_write(&mut self) {
|
|
745
|
+
self.state.close_write();
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
pub(crate) fn take_error(&mut self) -> Result<()> {
|
|
749
|
+
if let Some(err) = self.state.error.take() {
|
|
750
|
+
Err(err)
|
|
751
|
+
} else {
|
|
752
|
+
Ok(())
|
|
753
|
+
}
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
pub(super) fn on_upgrade(&mut self) -> upgrade::OnUpgrade {
|
|
757
|
+
trace!("{}: prepare possible HTTP upgrade", T::LOG);
|
|
758
|
+
self.state.prepare_upgrade()
|
|
759
|
+
}
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
impl<I, B: Buf, T> fmt::Debug for Conn<I, B, T> {
|
|
763
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
764
|
+
f.debug_struct("Conn")
|
|
765
|
+
.field("state", &self.state)
|
|
766
|
+
.field("io", &self.io)
|
|
767
|
+
.finish()
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
// B and T are never pinned
|
|
772
|
+
impl<I: Unpin, B, T> Unpin for Conn<I, B, T> {}
|
|
773
|
+
|
|
774
|
+
struct State {
|
|
775
|
+
allow_half_close: bool,
|
|
776
|
+
/// Re-usable HeaderMap to reduce allocating new ones.
|
|
777
|
+
cached_headers: Option<HeaderMap>,
|
|
778
|
+
/// If an error occurs when there wasn't a direct way to return it
|
|
779
|
+
/// back to the user, this is set.
|
|
780
|
+
error: Option<Error>,
|
|
781
|
+
/// Current keep-alive status.
|
|
782
|
+
keep_alive: KA,
|
|
783
|
+
/// If mid-message, the HTTP Method that started it.
|
|
784
|
+
///
|
|
785
|
+
/// This is used to know things such as if the message can include
|
|
786
|
+
/// a body or not.
|
|
787
|
+
method: Option<Method>,
|
|
788
|
+
h1_parser_config: ParserConfig,
|
|
789
|
+
h1_max_headers: Option<usize>,
|
|
790
|
+
h09_responses: bool,
|
|
791
|
+
/// Set to true when the Dispatcher should poll read operations
|
|
792
|
+
/// again. See the `maybe_notify` method for more.
|
|
793
|
+
notify_read: bool,
|
|
794
|
+
/// State of allowed reads
|
|
795
|
+
reading: Reading,
|
|
796
|
+
/// State of allowed writes
|
|
797
|
+
writing: Writing,
|
|
798
|
+
/// An expected pending HTTP upgrade.
|
|
799
|
+
upgrade: Option<upgrade::Pending>,
|
|
800
|
+
/// Either HTTP/1.0 or 1.1 connection
|
|
801
|
+
version: Version,
|
|
802
|
+
/// Flag to track if trailer fields are allowed to be sent
|
|
803
|
+
allow_trailer_fields: bool,
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
#[derive(Debug)]
|
|
807
|
+
enum Reading {
|
|
808
|
+
Init,
|
|
809
|
+
Continue(Decoder),
|
|
810
|
+
Body(Decoder),
|
|
811
|
+
KeepAlive,
|
|
812
|
+
Closed,
|
|
813
|
+
}
|
|
814
|
+
|
|
815
|
+
enum Writing {
|
|
816
|
+
Init,
|
|
817
|
+
Body(Encoder),
|
|
818
|
+
KeepAlive,
|
|
819
|
+
Closed,
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
impl fmt::Debug for State {
|
|
823
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
824
|
+
let mut builder = f.debug_struct("State");
|
|
825
|
+
builder
|
|
826
|
+
.field("reading", &self.reading)
|
|
827
|
+
.field("writing", &self.writing)
|
|
828
|
+
.field("keep_alive", &self.keep_alive);
|
|
829
|
+
|
|
830
|
+
// Only show error field if it's interesting...
|
|
831
|
+
if let Some(ref error) = self.error {
|
|
832
|
+
builder.field("error", error);
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
if self.allow_half_close {
|
|
836
|
+
builder.field("allow_half_close", &true);
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
// Purposefully leaving off other fields..
|
|
840
|
+
|
|
841
|
+
builder.finish()
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
impl fmt::Debug for Writing {
|
|
846
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
847
|
+
match *self {
|
|
848
|
+
Writing::Init => f.write_str("Init"),
|
|
849
|
+
Writing::Body(ref enc) => f.debug_tuple("Body").field(enc).finish(),
|
|
850
|
+
Writing::KeepAlive => f.write_str("KeepAlive"),
|
|
851
|
+
Writing::Closed => f.write_str("Closed"),
|
|
852
|
+
}
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
impl std::ops::BitAndAssign<bool> for KA {
|
|
857
|
+
fn bitand_assign(&mut self, enabled: bool) {
|
|
858
|
+
if !enabled {
|
|
859
|
+
trace!("remote disabling keep-alive");
|
|
860
|
+
*self = KA::Disabled;
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
#[derive(Clone, Copy, Debug, Default)]
|
|
866
|
+
enum KA {
|
|
867
|
+
Idle,
|
|
868
|
+
#[default]
|
|
869
|
+
Busy,
|
|
870
|
+
Disabled,
|
|
871
|
+
}
|
|
872
|
+
|
|
873
|
+
impl KA {
|
|
874
|
+
fn idle(&mut self) {
|
|
875
|
+
*self = KA::Idle;
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
fn busy(&mut self) {
|
|
879
|
+
*self = KA::Busy;
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
fn disable(&mut self) {
|
|
883
|
+
*self = KA::Disabled;
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
fn status(&self) -> KA {
|
|
887
|
+
*self
|
|
888
|
+
}
|
|
889
|
+
}
|
|
890
|
+
|
|
891
|
+
impl State {
|
|
892
|
+
fn close(&mut self) {
|
|
893
|
+
trace!("State::close()");
|
|
894
|
+
self.reading = Reading::Closed;
|
|
895
|
+
self.writing = Writing::Closed;
|
|
896
|
+
self.keep_alive.disable();
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
fn close_read(&mut self) {
|
|
900
|
+
trace!("State::close_read()");
|
|
901
|
+
self.reading = Reading::Closed;
|
|
902
|
+
self.keep_alive.disable();
|
|
903
|
+
}
|
|
904
|
+
|
|
905
|
+
fn close_write(&mut self) {
|
|
906
|
+
trace!("State::close_write()");
|
|
907
|
+
self.writing = Writing::Closed;
|
|
908
|
+
self.keep_alive.disable();
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
fn wants_keep_alive(&self) -> bool {
|
|
912
|
+
!matches!(self.keep_alive.status(), KA::Disabled)
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
fn try_keep_alive<T: Http1Transaction>(&mut self) {
|
|
916
|
+
match (&self.reading, &self.writing) {
|
|
917
|
+
(&Reading::KeepAlive, &Writing::KeepAlive) => {
|
|
918
|
+
if let KA::Busy = self.keep_alive.status() {
|
|
919
|
+
self.idle::<T>();
|
|
920
|
+
} else {
|
|
921
|
+
trace!(
|
|
922
|
+
"try_keep_alive({}): could keep-alive, but status = {:?}",
|
|
923
|
+
T::LOG,
|
|
924
|
+
self.keep_alive
|
|
925
|
+
);
|
|
926
|
+
self.close();
|
|
927
|
+
}
|
|
928
|
+
}
|
|
929
|
+
(&Reading::Closed, &Writing::KeepAlive) | (&Reading::KeepAlive, &Writing::Closed) => {
|
|
930
|
+
self.close()
|
|
931
|
+
}
|
|
932
|
+
_ => (),
|
|
933
|
+
}
|
|
934
|
+
}
|
|
935
|
+
|
|
936
|
+
fn disable_keep_alive(&mut self) {
|
|
937
|
+
self.keep_alive.disable()
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
fn busy(&mut self) {
|
|
941
|
+
if let KA::Disabled = self.keep_alive.status() {
|
|
942
|
+
return;
|
|
943
|
+
}
|
|
944
|
+
self.keep_alive.busy();
|
|
945
|
+
}
|
|
946
|
+
|
|
947
|
+
fn idle<T: Http1Transaction>(&mut self) {
|
|
948
|
+
debug_assert!(!self.is_idle(), "State::idle() called while idle");
|
|
949
|
+
|
|
950
|
+
self.method = None;
|
|
951
|
+
self.keep_alive.idle();
|
|
952
|
+
|
|
953
|
+
if !self.is_idle() {
|
|
954
|
+
self.close();
|
|
955
|
+
return;
|
|
956
|
+
}
|
|
957
|
+
|
|
958
|
+
self.reading = Reading::Init;
|
|
959
|
+
self.writing = Writing::Init;
|
|
960
|
+
|
|
961
|
+
// !T::should_read_first() means Client.
|
|
962
|
+
//
|
|
963
|
+
// If Client connection has just gone idle, the Dispatcher
|
|
964
|
+
// should try the poll loop one more time, so as to poll the
|
|
965
|
+
// pending requests stream.
|
|
966
|
+
if !T::should_read_first() {
|
|
967
|
+
self.notify_read = true;
|
|
968
|
+
}
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
fn is_idle(&self) -> bool {
|
|
972
|
+
matches!(self.keep_alive.status(), KA::Idle)
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
fn is_read_closed(&self) -> bool {
|
|
976
|
+
matches!(self.reading, Reading::Closed)
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
fn is_write_closed(&self) -> bool {
|
|
980
|
+
matches!(self.writing, Writing::Closed)
|
|
981
|
+
}
|
|
982
|
+
|
|
983
|
+
fn prepare_upgrade(&mut self) -> upgrade::OnUpgrade {
|
|
984
|
+
let (tx, rx) = upgrade::pending();
|
|
985
|
+
self.upgrade = Some(tx);
|
|
986
|
+
rx
|
|
987
|
+
}
|
|
988
|
+
}
|