wreq-rb 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Cargo.lock +2688 -0
- data/Cargo.toml +6 -0
- data/README.md +179 -0
- data/ext/wreq_rb/Cargo.toml +39 -0
- data/ext/wreq_rb/extconf.rb +22 -0
- data/ext/wreq_rb/src/client.rs +565 -0
- data/ext/wreq_rb/src/error.rs +25 -0
- data/ext/wreq_rb/src/lib.rs +20 -0
- data/ext/wreq_rb/src/response.rs +132 -0
- data/lib/wreq-rb/version.rb +5 -0
- data/lib/wreq-rb.rb +17 -0
- data/patches/0001-add-transfer-size-tracking.patch +292 -0
- data/vendor/wreq/Cargo.toml +306 -0
- data/vendor/wreq/LICENSE +202 -0
- data/vendor/wreq/README.md +122 -0
- data/vendor/wreq/examples/cert_store.rs +77 -0
- data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
- data/vendor/wreq/examples/emulation.rs +118 -0
- data/vendor/wreq/examples/form.rs +14 -0
- data/vendor/wreq/examples/http1_websocket.rs +37 -0
- data/vendor/wreq/examples/http2_websocket.rs +45 -0
- data/vendor/wreq/examples/json_dynamic.rs +41 -0
- data/vendor/wreq/examples/json_typed.rs +47 -0
- data/vendor/wreq/examples/keylog.rs +16 -0
- data/vendor/wreq/examples/request_with_emulation.rs +115 -0
- data/vendor/wreq/examples/request_with_interface.rs +37 -0
- data/vendor/wreq/examples/request_with_local_address.rs +16 -0
- data/vendor/wreq/examples/request_with_proxy.rs +13 -0
- data/vendor/wreq/examples/request_with_redirect.rs +22 -0
- data/vendor/wreq/examples/request_with_version.rs +15 -0
- data/vendor/wreq/examples/tor_socks.rs +24 -0
- data/vendor/wreq/examples/unix_socket.rs +33 -0
- data/vendor/wreq/src/client/body.rs +304 -0
- data/vendor/wreq/src/client/conn/conn.rs +231 -0
- data/vendor/wreq/src/client/conn/connector.rs +549 -0
- data/vendor/wreq/src/client/conn/http.rs +1023 -0
- data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
- data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
- data/vendor/wreq/src/client/conn/proxy.rs +39 -0
- data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
- data/vendor/wreq/src/client/conn/uds.rs +44 -0
- data/vendor/wreq/src/client/conn/verbose.rs +149 -0
- data/vendor/wreq/src/client/conn.rs +323 -0
- data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
- data/vendor/wreq/src/client/core/body/length.rs +118 -0
- data/vendor/wreq/src/client/core/body.rs +34 -0
- data/vendor/wreq/src/client/core/common/buf.rs +149 -0
- data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
- data/vendor/wreq/src/client/core/common/watch.rs +76 -0
- data/vendor/wreq/src/client/core/common.rs +3 -0
- data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
- data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
- data/vendor/wreq/src/client/core/conn.rs +11 -0
- data/vendor/wreq/src/client/core/dispatch.rs +299 -0
- data/vendor/wreq/src/client/core/error.rs +435 -0
- data/vendor/wreq/src/client/core/ext.rs +201 -0
- data/vendor/wreq/src/client/core/http1.rs +178 -0
- data/vendor/wreq/src/client/core/http2.rs +483 -0
- data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
- data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
- data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
- data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
- data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
- data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
- data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
- data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
- data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
- data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
- data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
- data/vendor/wreq/src/client/core/proto.rs +58 -0
- data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
- data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
- data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
- data/vendor/wreq/src/client/core/rt.rs +25 -0
- data/vendor/wreq/src/client/core/upgrade.rs +267 -0
- data/vendor/wreq/src/client/core.rs +16 -0
- data/vendor/wreq/src/client/emulation.rs +161 -0
- data/vendor/wreq/src/client/http/client/error.rs +142 -0
- data/vendor/wreq/src/client/http/client/exec.rs +29 -0
- data/vendor/wreq/src/client/http/client/extra.rs +77 -0
- data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
- data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
- data/vendor/wreq/src/client/http/client/util.rs +104 -0
- data/vendor/wreq/src/client/http/client.rs +1003 -0
- data/vendor/wreq/src/client/http/future.rs +99 -0
- data/vendor/wreq/src/client/http.rs +1629 -0
- data/vendor/wreq/src/client/layer/config/options.rs +156 -0
- data/vendor/wreq/src/client/layer/config.rs +116 -0
- data/vendor/wreq/src/client/layer/cookie.rs +161 -0
- data/vendor/wreq/src/client/layer/decoder.rs +139 -0
- data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
- data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
- data/vendor/wreq/src/client/layer/redirect.rs +145 -0
- data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
- data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
- data/vendor/wreq/src/client/layer/retry.rs +151 -0
- data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
- data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
- data/vendor/wreq/src/client/layer/timeout.rs +177 -0
- data/vendor/wreq/src/client/layer.rs +15 -0
- data/vendor/wreq/src/client/multipart.rs +717 -0
- data/vendor/wreq/src/client/request.rs +818 -0
- data/vendor/wreq/src/client/response.rs +534 -0
- data/vendor/wreq/src/client/ws/json.rs +99 -0
- data/vendor/wreq/src/client/ws/message.rs +453 -0
- data/vendor/wreq/src/client/ws.rs +714 -0
- data/vendor/wreq/src/client.rs +27 -0
- data/vendor/wreq/src/config.rs +140 -0
- data/vendor/wreq/src/cookie.rs +579 -0
- data/vendor/wreq/src/dns/gai.rs +249 -0
- data/vendor/wreq/src/dns/hickory.rs +78 -0
- data/vendor/wreq/src/dns/resolve.rs +180 -0
- data/vendor/wreq/src/dns.rs +69 -0
- data/vendor/wreq/src/error.rs +502 -0
- data/vendor/wreq/src/ext.rs +398 -0
- data/vendor/wreq/src/hash.rs +143 -0
- data/vendor/wreq/src/header.rs +506 -0
- data/vendor/wreq/src/into_uri.rs +187 -0
- data/vendor/wreq/src/lib.rs +586 -0
- data/vendor/wreq/src/proxy/mac.rs +82 -0
- data/vendor/wreq/src/proxy/matcher.rs +806 -0
- data/vendor/wreq/src/proxy/uds.rs +66 -0
- data/vendor/wreq/src/proxy/win.rs +31 -0
- data/vendor/wreq/src/proxy.rs +569 -0
- data/vendor/wreq/src/redirect.rs +575 -0
- data/vendor/wreq/src/retry.rs +198 -0
- data/vendor/wreq/src/sync.rs +129 -0
- data/vendor/wreq/src/tls/conn/cache.rs +123 -0
- data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
- data/vendor/wreq/src/tls/conn/ext.rs +82 -0
- data/vendor/wreq/src/tls/conn/macros.rs +34 -0
- data/vendor/wreq/src/tls/conn/service.rs +138 -0
- data/vendor/wreq/src/tls/conn.rs +681 -0
- data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
- data/vendor/wreq/src/tls/keylog.rs +99 -0
- data/vendor/wreq/src/tls/options.rs +464 -0
- data/vendor/wreq/src/tls/x509/identity.rs +122 -0
- data/vendor/wreq/src/tls/x509/parser.rs +71 -0
- data/vendor/wreq/src/tls/x509/store.rs +228 -0
- data/vendor/wreq/src/tls/x509.rs +68 -0
- data/vendor/wreq/src/tls.rs +154 -0
- data/vendor/wreq/src/trace.rs +55 -0
- data/vendor/wreq/src/util.rs +122 -0
- data/vendor/wreq/tests/badssl.rs +228 -0
- data/vendor/wreq/tests/brotli.rs +350 -0
- data/vendor/wreq/tests/client.rs +1098 -0
- data/vendor/wreq/tests/connector_layers.rs +227 -0
- data/vendor/wreq/tests/cookie.rs +306 -0
- data/vendor/wreq/tests/deflate.rs +347 -0
- data/vendor/wreq/tests/emulation.rs +260 -0
- data/vendor/wreq/tests/gzip.rs +347 -0
- data/vendor/wreq/tests/layers.rs +261 -0
- data/vendor/wreq/tests/multipart.rs +165 -0
- data/vendor/wreq/tests/proxy.rs +438 -0
- data/vendor/wreq/tests/redirect.rs +629 -0
- data/vendor/wreq/tests/retry.rs +135 -0
- data/vendor/wreq/tests/support/delay_server.rs +117 -0
- data/vendor/wreq/tests/support/error.rs +16 -0
- data/vendor/wreq/tests/support/layer.rs +183 -0
- data/vendor/wreq/tests/support/mod.rs +9 -0
- data/vendor/wreq/tests/support/server.rs +232 -0
- data/vendor/wreq/tests/timeouts.rs +281 -0
- data/vendor/wreq/tests/unix_socket.rs +135 -0
- data/vendor/wreq/tests/upgrade.rs +98 -0
- data/vendor/wreq/tests/zstd.rs +559 -0
- metadata +225 -0
|
@@ -0,0 +1,879 @@
|
|
|
1
|
+
use std::{
|
|
2
|
+
cmp, fmt,
|
|
3
|
+
io::{self, IoSlice},
|
|
4
|
+
pin::Pin,
|
|
5
|
+
task::{Context, Poll, ready},
|
|
6
|
+
};
|
|
7
|
+
|
|
8
|
+
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
|
9
|
+
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
|
10
|
+
|
|
11
|
+
use super::{Http1Transaction, ParseContext, ParsedMessage};
|
|
12
|
+
use crate::client::core::{self, Error, common::buf::BufList};
|
|
13
|
+
|
|
14
|
+
/// The initial buffer size allocated before trying to read from IO.
|
|
15
|
+
pub(crate) const INIT_BUFFER_SIZE: usize = 8192;
|
|
16
|
+
|
|
17
|
+
/// The minimum value that can be set to max buffer size.
|
|
18
|
+
pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE;
|
|
19
|
+
|
|
20
|
+
/// The default maximum read buffer size. If the buffer gets this big and
|
|
21
|
+
/// a message is still not complete, a `TooLarge` error is triggered.
|
|
22
|
+
// Note: if this changes, update server::conn::Http::max_buf_size docs.
|
|
23
|
+
pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
|
|
24
|
+
|
|
25
|
+
/// The maximum number of distinct `Buf`s to hold in a list before requiring
|
|
26
|
+
/// a flush. Only affects when the buffer strategy is to queue buffers.
|
|
27
|
+
///
|
|
28
|
+
/// Note that a flush can happen before reaching the maximum. This simply
|
|
29
|
+
/// forces a flush if the queue gets this big.
|
|
30
|
+
const MAX_BUF_LIST_BUFFERS: usize = 16;
|
|
31
|
+
|
|
32
|
+
pub(crate) struct Buffered<T, B> {
|
|
33
|
+
flush_pipeline: bool,
|
|
34
|
+
io: T,
|
|
35
|
+
partial_len: Option<usize>,
|
|
36
|
+
read_blocked: bool,
|
|
37
|
+
read_buf: BytesMut,
|
|
38
|
+
read_buf_strategy: ReadStrategy,
|
|
39
|
+
write_buf: WriteBuf<B>,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
impl<T, B> fmt::Debug for Buffered<T, B>
|
|
43
|
+
where
|
|
44
|
+
B: Buf,
|
|
45
|
+
{
|
|
46
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
47
|
+
f.debug_struct("Buffered")
|
|
48
|
+
.field("read_buf", &self.read_buf)
|
|
49
|
+
.field("write_buf", &self.write_buf)
|
|
50
|
+
.finish()
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
impl<T, B> Buffered<T, B>
|
|
55
|
+
where
|
|
56
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
57
|
+
B: Buf,
|
|
58
|
+
{
|
|
59
|
+
pub(crate) fn new(io: T) -> Buffered<T, B> {
|
|
60
|
+
let strategy = if io.is_write_vectored() {
|
|
61
|
+
WriteStrategy::Queue
|
|
62
|
+
} else {
|
|
63
|
+
WriteStrategy::Flatten
|
|
64
|
+
};
|
|
65
|
+
let write_buf = WriteBuf::new(strategy);
|
|
66
|
+
Buffered {
|
|
67
|
+
flush_pipeline: false,
|
|
68
|
+
io,
|
|
69
|
+
partial_len: None,
|
|
70
|
+
read_blocked: false,
|
|
71
|
+
read_buf: BytesMut::with_capacity(0),
|
|
72
|
+
read_buf_strategy: ReadStrategy::default(),
|
|
73
|
+
write_buf,
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
pub(crate) fn set_max_buf_size(&mut self, max: usize) {
|
|
78
|
+
assert!(
|
|
79
|
+
max >= MINIMUM_MAX_BUFFER_SIZE,
|
|
80
|
+
"The max_buf_size cannot be smaller than {MINIMUM_MAX_BUFFER_SIZE}.",
|
|
81
|
+
);
|
|
82
|
+
self.read_buf_strategy = ReadStrategy::with_max(max);
|
|
83
|
+
self.write_buf.max_buf_size = max;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {
|
|
87
|
+
self.read_buf_strategy = ReadStrategy::Exact(sz);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
pub(crate) fn set_write_strategy_flatten(&mut self) {
|
|
91
|
+
// this should always be called only at construction time,
|
|
92
|
+
// so this assert is here to catch myself
|
|
93
|
+
debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
|
|
94
|
+
self.write_buf.set_strategy(WriteStrategy::Flatten);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
pub(crate) fn set_write_strategy_queue(&mut self) {
|
|
98
|
+
// this should always be called only at construction time,
|
|
99
|
+
// so this assert is here to catch myself
|
|
100
|
+
debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
|
|
101
|
+
self.write_buf.set_strategy(WriteStrategy::Queue);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
pub(crate) fn read_buf(&self) -> &[u8] {
|
|
105
|
+
self.read_buf.as_ref()
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/// Return the "allocated" available space, not the potential space
|
|
109
|
+
/// that could be allocated in the future.
|
|
110
|
+
fn read_buf_remaining_mut(&self) -> usize {
|
|
111
|
+
self.read_buf.capacity() - self.read_buf.len()
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/// Return whether we can append to the headers buffer.
|
|
115
|
+
///
|
|
116
|
+
/// Reasons we can't:
|
|
117
|
+
/// - The write buf is in queue mode, and some of the past body is still needing to be flushed.
|
|
118
|
+
pub(crate) fn can_headers_buf(&self) -> bool {
|
|
119
|
+
!self.write_buf.queue.has_remaining()
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
pub(crate) fn headers_buf(&mut self) -> &mut Vec<u8> {
|
|
123
|
+
let buf = self.write_buf.headers_mut();
|
|
124
|
+
&mut buf.bytes
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
pub(super) fn write_buf(&mut self) -> &mut WriteBuf<B> {
|
|
128
|
+
&mut self.write_buf
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
pub(crate) fn buffer<BB: Buf + Into<B>>(&mut self, buf: BB) {
|
|
132
|
+
self.write_buf.buffer(buf)
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
pub(crate) fn can_buffer(&self) -> bool {
|
|
136
|
+
self.flush_pipeline || self.write_buf.can_buffer()
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
pub(crate) fn consume_leading_lines(&mut self) {
|
|
140
|
+
if !self.read_buf.is_empty() {
|
|
141
|
+
let mut i = 0;
|
|
142
|
+
while i < self.read_buf.len() {
|
|
143
|
+
match self.read_buf[i] {
|
|
144
|
+
b'\r' | b'\n' => i += 1,
|
|
145
|
+
_ => break,
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
self.read_buf.advance(i);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
pub(super) fn parse<S>(
|
|
153
|
+
&mut self,
|
|
154
|
+
cx: &mut Context<'_>,
|
|
155
|
+
parse_ctx: ParseContext<'_>,
|
|
156
|
+
) -> Poll<core::Result<ParsedMessage<S::Incoming>>>
|
|
157
|
+
where
|
|
158
|
+
S: Http1Transaction,
|
|
159
|
+
{
|
|
160
|
+
loop {
|
|
161
|
+
match super::role::parse_headers::<S>(
|
|
162
|
+
&mut self.read_buf,
|
|
163
|
+
self.partial_len,
|
|
164
|
+
ParseContext {
|
|
165
|
+
cached_headers: parse_ctx.cached_headers,
|
|
166
|
+
req_method: parse_ctx.req_method,
|
|
167
|
+
h1_parser_config: parse_ctx.h1_parser_config.clone(),
|
|
168
|
+
h1_max_headers: parse_ctx.h1_max_headers,
|
|
169
|
+
h09_responses: parse_ctx.h09_responses,
|
|
170
|
+
},
|
|
171
|
+
)? {
|
|
172
|
+
Some(msg) => {
|
|
173
|
+
debug!("parsed {} headers", msg.head.headers.len());
|
|
174
|
+
self.partial_len = None;
|
|
175
|
+
return Poll::Ready(Ok(msg));
|
|
176
|
+
}
|
|
177
|
+
None => {
|
|
178
|
+
let max = self.read_buf_strategy.max();
|
|
179
|
+
let curr_len = self.read_buf.len();
|
|
180
|
+
if curr_len >= max {
|
|
181
|
+
debug!("max_buf_size ({}) reached, closing", max);
|
|
182
|
+
return Poll::Ready(Err(Error::new_too_large()));
|
|
183
|
+
}
|
|
184
|
+
if curr_len > 0 {
|
|
185
|
+
trace!("partial headers; {} bytes so far", curr_len);
|
|
186
|
+
self.partial_len = Some(curr_len);
|
|
187
|
+
} else {
|
|
188
|
+
// 1xx gobled some bytes
|
|
189
|
+
self.partial_len = None;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
if ready!(self.poll_read_from_io(cx)).map_err(Error::new_io)? == 0 {
|
|
194
|
+
trace!("parse eof");
|
|
195
|
+
return Poll::Ready(Err(Error::new_incomplete()));
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
pub(crate) fn poll_read_from_io(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
|
|
201
|
+
self.read_blocked = false;
|
|
202
|
+
let next = self.read_buf_strategy.next();
|
|
203
|
+
if self.read_buf_remaining_mut() < next {
|
|
204
|
+
self.read_buf.reserve(next);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// SAFETY: ReadBuf and poll_read promise not to set any uninitialized
|
|
208
|
+
// bytes onto `dst`.
|
|
209
|
+
#[allow(unsafe_code)]
|
|
210
|
+
let dst = unsafe { self.read_buf.chunk_mut().as_uninit_slice_mut() };
|
|
211
|
+
let mut buf = ReadBuf::uninit(dst);
|
|
212
|
+
match Pin::new(&mut self.io).poll_read(cx, &mut buf) {
|
|
213
|
+
Poll::Ready(Ok(_)) => {
|
|
214
|
+
let n = buf.filled().len();
|
|
215
|
+
trace!("received {} bytes", n);
|
|
216
|
+
#[allow(unsafe_code)]
|
|
217
|
+
unsafe {
|
|
218
|
+
// Safety: we just read that many bytes into the
|
|
219
|
+
// uninitialized part of the buffer, so this is okay.
|
|
220
|
+
// @tokio pls give me back `poll_read_buf` thanks
|
|
221
|
+
self.read_buf.advance_mut(n);
|
|
222
|
+
}
|
|
223
|
+
self.read_buf_strategy.record(n);
|
|
224
|
+
Poll::Ready(Ok(n))
|
|
225
|
+
}
|
|
226
|
+
Poll::Pending => {
|
|
227
|
+
self.read_blocked = true;
|
|
228
|
+
Poll::Pending
|
|
229
|
+
}
|
|
230
|
+
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
pub(crate) fn into_inner(self) -> (T, Bytes) {
|
|
235
|
+
(self.io, self.read_buf.freeze())
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
pub(crate) fn io_mut(&mut self) -> &mut T {
|
|
239
|
+
&mut self.io
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
pub(crate) fn is_read_blocked(&self) -> bool {
|
|
243
|
+
self.read_blocked
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
247
|
+
if self.flush_pipeline && !self.read_buf.is_empty() {
|
|
248
|
+
Poll::Ready(Ok(()))
|
|
249
|
+
} else if self.write_buf.remaining() == 0 {
|
|
250
|
+
Pin::new(&mut self.io).poll_flush(cx)
|
|
251
|
+
} else {
|
|
252
|
+
if let WriteStrategy::Flatten = self.write_buf.strategy {
|
|
253
|
+
return self.poll_flush_flattened(cx);
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
const MAX_WRITEV_BUFS: usize = 64;
|
|
257
|
+
loop {
|
|
258
|
+
let n = {
|
|
259
|
+
let mut iovs = [IoSlice::new(&[]); MAX_WRITEV_BUFS];
|
|
260
|
+
let len = self.write_buf.chunks_vectored(&mut iovs);
|
|
261
|
+
ready!(Pin::new(&mut self.io).poll_write_vectored(cx, &iovs[..len]))?
|
|
262
|
+
};
|
|
263
|
+
// TODO(eliza): we have to do this manually because
|
|
264
|
+
// `poll_write_buf` doesn't exist in Tokio 0.3 yet...when
|
|
265
|
+
// `poll_write_buf` comes back, the manual advance will need to leave!
|
|
266
|
+
self.write_buf.advance(n);
|
|
267
|
+
debug!("flushed {} bytes", n);
|
|
268
|
+
if self.write_buf.remaining() == 0 {
|
|
269
|
+
break;
|
|
270
|
+
} else if n == 0 {
|
|
271
|
+
trace!(
|
|
272
|
+
"write returned zero, but {} bytes remaining",
|
|
273
|
+
self.write_buf.remaining()
|
|
274
|
+
);
|
|
275
|
+
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
Pin::new(&mut self.io).poll_flush(cx)
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
/// Specialized version of `flush` when strategy is Flatten.
|
|
283
|
+
///
|
|
284
|
+
/// Since all buffered bytes are flattened into the single headers buffer,
|
|
285
|
+
/// that skips some bookkeeping around using multiple buffers.
|
|
286
|
+
fn poll_flush_flattened(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
287
|
+
loop {
|
|
288
|
+
let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?;
|
|
289
|
+
debug!("flushed {} bytes", n);
|
|
290
|
+
self.write_buf.headers.advance(n);
|
|
291
|
+
if self.write_buf.headers.remaining() == 0 {
|
|
292
|
+
self.write_buf.headers.reset();
|
|
293
|
+
break;
|
|
294
|
+
} else if n == 0 {
|
|
295
|
+
trace!(
|
|
296
|
+
"write returned zero, but {} bytes remaining",
|
|
297
|
+
self.write_buf.remaining()
|
|
298
|
+
);
|
|
299
|
+
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
Pin::new(&mut self.io).poll_flush(cx)
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
#[cfg(test)]
|
|
306
|
+
fn flush(&mut self) -> impl std::future::Future<Output = io::Result<()>> + '_ {
|
|
307
|
+
std::future::poll_fn(move |cx| self.poll_flush(cx))
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
// The `B` is a `Buf`, we never project a pin to it
|
|
312
|
+
impl<T: Unpin, B> Unpin for Buffered<T, B> {}
|
|
313
|
+
|
|
314
|
+
// TODO: This trait is old... at least rename to PollBytes or something...
|
|
315
|
+
pub(crate) trait MemRead {
|
|
316
|
+
fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>>;
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
impl<T, B> MemRead for Buffered<T, B>
|
|
320
|
+
where
|
|
321
|
+
T: AsyncRead + AsyncWrite + Unpin,
|
|
322
|
+
B: Buf,
|
|
323
|
+
{
|
|
324
|
+
fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
|
|
325
|
+
if !self.read_buf.is_empty() {
|
|
326
|
+
let n = std::cmp::min(len, self.read_buf.len());
|
|
327
|
+
Poll::Ready(Ok(self.read_buf.split_to(n).freeze()))
|
|
328
|
+
} else {
|
|
329
|
+
let n = ready!(self.poll_read_from_io(cx))?;
|
|
330
|
+
Poll::Ready(Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze()))
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
#[derive(Clone, Copy, Debug)]
|
|
336
|
+
enum ReadStrategy {
|
|
337
|
+
Adaptive {
|
|
338
|
+
decrease_now: bool,
|
|
339
|
+
next: usize,
|
|
340
|
+
max: usize,
|
|
341
|
+
},
|
|
342
|
+
Exact(usize),
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
impl ReadStrategy {
|
|
346
|
+
fn with_max(max: usize) -> ReadStrategy {
|
|
347
|
+
ReadStrategy::Adaptive {
|
|
348
|
+
decrease_now: false,
|
|
349
|
+
next: INIT_BUFFER_SIZE,
|
|
350
|
+
max,
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
fn next(&self) -> usize {
|
|
355
|
+
match *self {
|
|
356
|
+
ReadStrategy::Adaptive { next, .. } => next,
|
|
357
|
+
ReadStrategy::Exact(exact) => exact,
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
fn max(&self) -> usize {
|
|
362
|
+
match *self {
|
|
363
|
+
ReadStrategy::Adaptive { max, .. } => max,
|
|
364
|
+
ReadStrategy::Exact(exact) => exact,
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
fn record(&mut self, bytes_read: usize) {
|
|
369
|
+
match *self {
|
|
370
|
+
ReadStrategy::Adaptive {
|
|
371
|
+
ref mut decrease_now,
|
|
372
|
+
ref mut next,
|
|
373
|
+
max,
|
|
374
|
+
..
|
|
375
|
+
} => {
|
|
376
|
+
if bytes_read >= *next {
|
|
377
|
+
*next = cmp::min(incr_power_of_two(*next), max);
|
|
378
|
+
*decrease_now = false;
|
|
379
|
+
} else {
|
|
380
|
+
let decr_to = prev_power_of_two(*next);
|
|
381
|
+
if bytes_read < decr_to {
|
|
382
|
+
if *decrease_now {
|
|
383
|
+
*next = cmp::max(decr_to, INIT_BUFFER_SIZE);
|
|
384
|
+
*decrease_now = false;
|
|
385
|
+
} else {
|
|
386
|
+
// Decreasing is a two "record" process.
|
|
387
|
+
*decrease_now = true;
|
|
388
|
+
}
|
|
389
|
+
} else {
|
|
390
|
+
// A read within the current range should cancel
|
|
391
|
+
// a potential decrease, since we just saw proof
|
|
392
|
+
// that we still need this size.
|
|
393
|
+
*decrease_now = false;
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
ReadStrategy::Exact(_) => (),
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
fn incr_power_of_two(n: usize) -> usize {
|
|
403
|
+
n.saturating_mul(2)
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
fn prev_power_of_two(n: usize) -> usize {
|
|
407
|
+
// Only way this shift can underflow is if n is less than 4.
|
|
408
|
+
// (Which would means `usize::MAX >> 64` and underflowed!)
|
|
409
|
+
debug_assert!(n >= 4);
|
|
410
|
+
(usize::MAX >> (n.leading_zeros() + 2)) + 1
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
impl Default for ReadStrategy {
|
|
414
|
+
fn default() -> ReadStrategy {
|
|
415
|
+
ReadStrategy::with_max(DEFAULT_MAX_BUFFER_SIZE)
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
#[derive(Clone)]
|
|
420
|
+
pub(crate) struct Cursor<T> {
|
|
421
|
+
bytes: T,
|
|
422
|
+
pos: usize,
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
impl<T: AsRef<[u8]>> Cursor<T> {
|
|
426
|
+
#[inline]
|
|
427
|
+
pub(crate) fn new(bytes: T) -> Cursor<T> {
|
|
428
|
+
Cursor { bytes, pos: 0 }
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
impl Cursor<Vec<u8>> {
|
|
433
|
+
/// If we've advanced the position a bit in this cursor, and wish to
|
|
434
|
+
/// extend the underlying vector, we may wish to unshift the "read" bytes
|
|
435
|
+
/// off, and move everything else over.
|
|
436
|
+
fn maybe_unshift(&mut self, additional: usize) {
|
|
437
|
+
if self.pos == 0 {
|
|
438
|
+
// nothing to do
|
|
439
|
+
return;
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
if self.bytes.capacity() - self.bytes.len() >= additional {
|
|
443
|
+
// there's room!
|
|
444
|
+
return;
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
self.bytes.drain(0..self.pos);
|
|
448
|
+
self.pos = 0;
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
fn reset(&mut self) {
|
|
452
|
+
self.pos = 0;
|
|
453
|
+
self.bytes.clear();
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
impl<T: AsRef<[u8]>> fmt::Debug for Cursor<T> {
|
|
458
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
459
|
+
f.debug_struct("Cursor")
|
|
460
|
+
.field("pos", &self.pos)
|
|
461
|
+
.field("len", &self.bytes.as_ref().len())
|
|
462
|
+
.finish()
|
|
463
|
+
}
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
impl<T: AsRef<[u8]>> Buf for Cursor<T> {
|
|
467
|
+
#[inline]
|
|
468
|
+
fn remaining(&self) -> usize {
|
|
469
|
+
self.bytes.as_ref().len() - self.pos
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
#[inline]
|
|
473
|
+
fn chunk(&self) -> &[u8] {
|
|
474
|
+
&self.bytes.as_ref()[self.pos..]
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
#[inline]
|
|
478
|
+
fn advance(&mut self, cnt: usize) {
|
|
479
|
+
debug_assert!(self.pos + cnt <= self.bytes.as_ref().len());
|
|
480
|
+
self.pos += cnt;
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
// an internal buffer to collect writes before flushes
|
|
485
|
+
pub(super) struct WriteBuf<B> {
|
|
486
|
+
/// Re-usable buffer that holds message headers
|
|
487
|
+
headers: Cursor<Vec<u8>>,
|
|
488
|
+
max_buf_size: usize,
|
|
489
|
+
/// Deque of user buffers if strategy is Queue
|
|
490
|
+
queue: BufList<B>,
|
|
491
|
+
strategy: WriteStrategy,
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
impl<B: Buf> WriteBuf<B> {
|
|
495
|
+
fn new(strategy: WriteStrategy) -> WriteBuf<B> {
|
|
496
|
+
WriteBuf {
|
|
497
|
+
headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)),
|
|
498
|
+
max_buf_size: DEFAULT_MAX_BUFFER_SIZE,
|
|
499
|
+
queue: BufList::new(),
|
|
500
|
+
strategy,
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
impl<B> WriteBuf<B>
|
|
506
|
+
where
|
|
507
|
+
B: Buf,
|
|
508
|
+
{
|
|
509
|
+
fn set_strategy(&mut self, strategy: WriteStrategy) {
|
|
510
|
+
self.strategy = strategy;
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
pub(super) fn buffer<BB: Buf + Into<B>>(&mut self, mut buf: BB) {
|
|
514
|
+
debug_assert!(buf.has_remaining());
|
|
515
|
+
match self.strategy {
|
|
516
|
+
WriteStrategy::Flatten => {
|
|
517
|
+
let head = self.headers_mut();
|
|
518
|
+
|
|
519
|
+
head.maybe_unshift(buf.remaining());
|
|
520
|
+
trace!(
|
|
521
|
+
self.len = head.remaining(),
|
|
522
|
+
buf.len = buf.remaining(),
|
|
523
|
+
"buffer.flatten"
|
|
524
|
+
);
|
|
525
|
+
//perf: This is a little faster than <Vec as BufMut>>::put,
|
|
526
|
+
//but accomplishes the same result.
|
|
527
|
+
loop {
|
|
528
|
+
let adv = {
|
|
529
|
+
let slice = buf.chunk();
|
|
530
|
+
if slice.is_empty() {
|
|
531
|
+
return;
|
|
532
|
+
}
|
|
533
|
+
head.bytes.extend_from_slice(slice);
|
|
534
|
+
slice.len()
|
|
535
|
+
};
|
|
536
|
+
buf.advance(adv);
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
WriteStrategy::Queue => {
|
|
540
|
+
trace!(
|
|
541
|
+
self.len = self.remaining(),
|
|
542
|
+
buf.len = buf.remaining(),
|
|
543
|
+
"buffer.queue"
|
|
544
|
+
);
|
|
545
|
+
self.queue.push(buf.into());
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
fn can_buffer(&self) -> bool {
|
|
551
|
+
match self.strategy {
|
|
552
|
+
WriteStrategy::Flatten => self.remaining() < self.max_buf_size,
|
|
553
|
+
WriteStrategy::Queue => {
|
|
554
|
+
self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
fn headers_mut(&mut self) -> &mut Cursor<Vec<u8>> {
|
|
560
|
+
debug_assert!(!self.queue.has_remaining());
|
|
561
|
+
&mut self.headers
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
impl<B: Buf> fmt::Debug for WriteBuf<B> {
|
|
566
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
567
|
+
f.debug_struct("WriteBuf")
|
|
568
|
+
.field("remaining", &self.remaining())
|
|
569
|
+
.field("strategy", &self.strategy)
|
|
570
|
+
.finish()
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
impl<B: Buf> Buf for WriteBuf<B> {
|
|
575
|
+
#[inline]
|
|
576
|
+
fn remaining(&self) -> usize {
|
|
577
|
+
self.headers.remaining() + self.queue.remaining()
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
#[inline]
|
|
581
|
+
fn chunk(&self) -> &[u8] {
|
|
582
|
+
let headers = self.headers.chunk();
|
|
583
|
+
if !headers.is_empty() {
|
|
584
|
+
headers
|
|
585
|
+
} else {
|
|
586
|
+
self.queue.chunk()
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
#[inline]
|
|
591
|
+
fn advance(&mut self, cnt: usize) {
|
|
592
|
+
let hrem = self.headers.remaining();
|
|
593
|
+
|
|
594
|
+
match hrem.cmp(&cnt) {
|
|
595
|
+
cmp::Ordering::Equal => self.headers.reset(),
|
|
596
|
+
cmp::Ordering::Greater => self.headers.advance(cnt),
|
|
597
|
+
cmp::Ordering::Less => {
|
|
598
|
+
let qcnt = cnt - hrem;
|
|
599
|
+
self.headers.reset();
|
|
600
|
+
self.queue.advance(qcnt);
|
|
601
|
+
}
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
#[inline]
|
|
606
|
+
fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
|
|
607
|
+
let n = self.headers.chunks_vectored(dst);
|
|
608
|
+
self.queue.chunks_vectored(&mut dst[n..]) + n
|
|
609
|
+
}
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
#[derive(Debug)]
|
|
613
|
+
enum WriteStrategy {
|
|
614
|
+
Flatten,
|
|
615
|
+
Queue,
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
#[cfg(test)]
|
|
619
|
+
mod tests {
|
|
620
|
+
use std::time::Duration;
|
|
621
|
+
|
|
622
|
+
use tokio_test::io::Builder as Mock;
|
|
623
|
+
|
|
624
|
+
use super::*;
|
|
625
|
+
|
|
626
|
+
#[tokio::test]
|
|
627
|
+
async fn parse_reads_until_blocked() {
|
|
628
|
+
use crate::client::core::proto::h1::ClientTransaction;
|
|
629
|
+
|
|
630
|
+
let _ = pretty_env_logger::try_init();
|
|
631
|
+
let mock = Mock::new()
|
|
632
|
+
// Split over multiple reads will read all of it
|
|
633
|
+
.read(b"HTTP/1.1 200 OK\r\n")
|
|
634
|
+
.read(b"Server: crate::core:\r\n")
|
|
635
|
+
// missing last line ending
|
|
636
|
+
.wait(Duration::from_secs(1))
|
|
637
|
+
.build();
|
|
638
|
+
|
|
639
|
+
let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
|
|
640
|
+
|
|
641
|
+
// We expect a `parse` to be not ready, and so can't await it directly.
|
|
642
|
+
// Rather, this `poll_fn` will wrap the `Poll` result.
|
|
643
|
+
std::future::poll_fn(|cx| {
|
|
644
|
+
let parse_ctx = ParseContext {
|
|
645
|
+
cached_headers: &mut None,
|
|
646
|
+
req_method: &mut None,
|
|
647
|
+
h1_parser_config: Default::default(),
|
|
648
|
+
h1_max_headers: None,
|
|
649
|
+
h09_responses: false,
|
|
650
|
+
};
|
|
651
|
+
assert!(
|
|
652
|
+
buffered
|
|
653
|
+
.parse::<ClientTransaction>(cx, parse_ctx)
|
|
654
|
+
.is_pending()
|
|
655
|
+
);
|
|
656
|
+
Poll::Ready(())
|
|
657
|
+
})
|
|
658
|
+
.await;
|
|
659
|
+
|
|
660
|
+
assert_eq!(
|
|
661
|
+
buffered.read_buf,
|
|
662
|
+
b"HTTP/1.1 200 OK\r\nServer: crate::core:\r\n"[..]
|
|
663
|
+
);
|
|
664
|
+
}
|
|
665
|
+
|
|
666
|
+
#[test]
|
|
667
|
+
fn read_strategy_adaptive_increments() {
|
|
668
|
+
let mut strategy = ReadStrategy::default();
|
|
669
|
+
assert_eq!(strategy.next(), 8192);
|
|
670
|
+
|
|
671
|
+
// Grows if record == next
|
|
672
|
+
strategy.record(8192);
|
|
673
|
+
assert_eq!(strategy.next(), 16384);
|
|
674
|
+
|
|
675
|
+
strategy.record(16384);
|
|
676
|
+
assert_eq!(strategy.next(), 32768);
|
|
677
|
+
|
|
678
|
+
// Enormous records still increment at same rate
|
|
679
|
+
strategy.record(usize::MAX);
|
|
680
|
+
assert_eq!(strategy.next(), 65536);
|
|
681
|
+
|
|
682
|
+
let max = strategy.max();
|
|
683
|
+
while strategy.next() < max {
|
|
684
|
+
strategy.record(max);
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
assert_eq!(strategy.next(), max, "never goes over max");
|
|
688
|
+
strategy.record(max + 1);
|
|
689
|
+
assert_eq!(strategy.next(), max, "never goes over max");
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
#[test]
|
|
693
|
+
fn read_strategy_adaptive_decrements() {
|
|
694
|
+
let mut strategy = ReadStrategy::default();
|
|
695
|
+
strategy.record(8192);
|
|
696
|
+
assert_eq!(strategy.next(), 16384);
|
|
697
|
+
|
|
698
|
+
strategy.record(1);
|
|
699
|
+
assert_eq!(
|
|
700
|
+
strategy.next(),
|
|
701
|
+
16384,
|
|
702
|
+
"first smaller record doesn't decrement yet"
|
|
703
|
+
);
|
|
704
|
+
strategy.record(8192);
|
|
705
|
+
assert_eq!(strategy.next(), 16384, "record was with range");
|
|
706
|
+
|
|
707
|
+
strategy.record(1);
|
|
708
|
+
assert_eq!(
|
|
709
|
+
strategy.next(),
|
|
710
|
+
16384,
|
|
711
|
+
"in-range record should make this the 'first' again"
|
|
712
|
+
);
|
|
713
|
+
|
|
714
|
+
strategy.record(1);
|
|
715
|
+
assert_eq!(strategy.next(), 8192, "second smaller record decrements");
|
|
716
|
+
|
|
717
|
+
strategy.record(1);
|
|
718
|
+
assert_eq!(strategy.next(), 8192, "first doesn't decrement");
|
|
719
|
+
strategy.record(1);
|
|
720
|
+
assert_eq!(strategy.next(), 8192, "doesn't decrement under minimum");
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
#[test]
|
|
724
|
+
fn read_strategy_adaptive_stays_the_same() {
|
|
725
|
+
let mut strategy = ReadStrategy::default();
|
|
726
|
+
strategy.record(8192);
|
|
727
|
+
assert_eq!(strategy.next(), 16384);
|
|
728
|
+
|
|
729
|
+
strategy.record(8193);
|
|
730
|
+
assert_eq!(
|
|
731
|
+
strategy.next(),
|
|
732
|
+
16384,
|
|
733
|
+
"first smaller record doesn't decrement yet"
|
|
734
|
+
);
|
|
735
|
+
|
|
736
|
+
strategy.record(8193);
|
|
737
|
+
assert_eq!(
|
|
738
|
+
strategy.next(),
|
|
739
|
+
16384,
|
|
740
|
+
"with current step does not decrement"
|
|
741
|
+
);
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
#[test]
|
|
745
|
+
fn read_strategy_adaptive_max_fuzz() {
|
|
746
|
+
fn fuzz(max: usize) {
|
|
747
|
+
let mut strategy = ReadStrategy::with_max(max);
|
|
748
|
+
while strategy.next() < max {
|
|
749
|
+
strategy.record(usize::MAX);
|
|
750
|
+
}
|
|
751
|
+
let mut next = strategy.next();
|
|
752
|
+
while next > 8192 {
|
|
753
|
+
strategy.record(1);
|
|
754
|
+
strategy.record(1);
|
|
755
|
+
next = strategy.next();
|
|
756
|
+
assert!(
|
|
757
|
+
next.is_power_of_two(),
|
|
758
|
+
"decrement should be powers of two: {next} (max = {max})",
|
|
759
|
+
);
|
|
760
|
+
}
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
let mut max = 8192;
|
|
764
|
+
while max < usize::MAX {
|
|
765
|
+
fuzz(max);
|
|
766
|
+
max = (max / 2).saturating_mul(3);
|
|
767
|
+
}
|
|
768
|
+
fuzz(usize::MAX);
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
#[test]
|
|
772
|
+
#[should_panic]
|
|
773
|
+
#[cfg(debug_assertions)] // needs to trigger a debug_assert
|
|
774
|
+
fn write_buf_requires_non_empty_bufs() {
|
|
775
|
+
let mock = Mock::new().build();
|
|
776
|
+
let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
|
|
777
|
+
|
|
778
|
+
buffered.buffer(Cursor::new(Vec::new()));
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
#[tokio::test]
|
|
782
|
+
async fn write_buf_flatten() {
|
|
783
|
+
let _ = pretty_env_logger::try_init();
|
|
784
|
+
|
|
785
|
+
let mock = Mock::new()
|
|
786
|
+
.write(b"hello world, it's crate::core:!")
|
|
787
|
+
.build();
|
|
788
|
+
|
|
789
|
+
let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
|
|
790
|
+
buffered.write_buf.set_strategy(WriteStrategy::Flatten);
|
|
791
|
+
|
|
792
|
+
buffered.headers_buf().extend(b"hello ");
|
|
793
|
+
buffered.buffer(Cursor::new(b"world, ".to_vec()));
|
|
794
|
+
buffered.buffer(Cursor::new(b"it's ".to_vec()));
|
|
795
|
+
buffered.buffer(Cursor::new(b"crate::core:!".to_vec()));
|
|
796
|
+
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
|
|
797
|
+
|
|
798
|
+
buffered.flush().await.expect("flush");
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
#[test]
|
|
802
|
+
fn write_buf_flatten_partially_flushed() {
|
|
803
|
+
let _ = pretty_env_logger::try_init();
|
|
804
|
+
|
|
805
|
+
let b = |s: &str| Cursor::new(s.as_bytes().to_vec());
|
|
806
|
+
|
|
807
|
+
let mut write_buf = WriteBuf::<Cursor<Vec<u8>>>::new(WriteStrategy::Flatten);
|
|
808
|
+
|
|
809
|
+
write_buf.buffer(b("hello "));
|
|
810
|
+
write_buf.buffer(b("world, "));
|
|
811
|
+
|
|
812
|
+
assert_eq!(write_buf.chunk(), b"hello world, ");
|
|
813
|
+
|
|
814
|
+
// advance most of the way, but not all
|
|
815
|
+
write_buf.advance(11);
|
|
816
|
+
|
|
817
|
+
assert_eq!(write_buf.chunk(), b", ");
|
|
818
|
+
assert_eq!(write_buf.headers.pos, 11);
|
|
819
|
+
assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE);
|
|
820
|
+
|
|
821
|
+
// there's still room in the headers buffer, so just push on the end
|
|
822
|
+
write_buf.buffer(b("it's crate::core:!"));
|
|
823
|
+
|
|
824
|
+
assert_eq!(write_buf.chunk(), b", it's crate::core:!");
|
|
825
|
+
assert_eq!(write_buf.headers.pos, 11);
|
|
826
|
+
|
|
827
|
+
let rem1 = write_buf.remaining();
|
|
828
|
+
let cap = write_buf.headers.bytes.capacity();
|
|
829
|
+
|
|
830
|
+
// but when this would go over capacity, don't copy the old bytes
|
|
831
|
+
write_buf.buffer(Cursor::new(vec![b'X'; cap]));
|
|
832
|
+
assert_eq!(write_buf.remaining(), cap + rem1);
|
|
833
|
+
assert_eq!(write_buf.headers.pos, 0);
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
#[tokio::test]
|
|
837
|
+
async fn write_buf_queue_disable_auto() {
|
|
838
|
+
let _ = pretty_env_logger::try_init();
|
|
839
|
+
|
|
840
|
+
let mock = Mock::new()
|
|
841
|
+
.write(b"hello ")
|
|
842
|
+
.write(b"world, ")
|
|
843
|
+
.write(b"it's ")
|
|
844
|
+
.write(b"crate::core:!")
|
|
845
|
+
.build();
|
|
846
|
+
|
|
847
|
+
let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
|
|
848
|
+
buffered.write_buf.set_strategy(WriteStrategy::Queue);
|
|
849
|
+
|
|
850
|
+
// we have 4 buffers, and vec IO disabled, but explicitly said
|
|
851
|
+
// don't try to auto detect (via setting strategy above)
|
|
852
|
+
|
|
853
|
+
buffered.headers_buf().extend(b"hello ");
|
|
854
|
+
buffered.buffer(Cursor::new(b"world, ".to_vec()));
|
|
855
|
+
buffered.buffer(Cursor::new(b"it's ".to_vec()));
|
|
856
|
+
buffered.buffer(Cursor::new(b"crate::core:!".to_vec()));
|
|
857
|
+
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
|
|
858
|
+
|
|
859
|
+
buffered.flush().await.expect("flush");
|
|
860
|
+
|
|
861
|
+
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
// #[cfg(feature = "nightly")]
|
|
865
|
+
// #[bench]
|
|
866
|
+
// fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) {
|
|
867
|
+
// let s = "Hello, World!";
|
|
868
|
+
// b.bytes = s.len() as u64;
|
|
869
|
+
|
|
870
|
+
// let mut write_buf = WriteBuf::<bytes::Bytes>::new();
|
|
871
|
+
// write_buf.set_strategy(WriteStrategy::Flatten);
|
|
872
|
+
// b.iter(|| {
|
|
873
|
+
// let chunk = bytes::Bytes::from(s);
|
|
874
|
+
// write_buf.buffer(chunk);
|
|
875
|
+
// ::test::black_box(&write_buf);
|
|
876
|
+
// write_buf.headers.bytes.clear();
|
|
877
|
+
// })
|
|
878
|
+
// }
|
|
879
|
+
}
|