wreq-rb 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. checksums.yaml +7 -0
  2. data/Cargo.lock +2688 -0
  3. data/Cargo.toml +6 -0
  4. data/README.md +179 -0
  5. data/ext/wreq_rb/Cargo.toml +39 -0
  6. data/ext/wreq_rb/extconf.rb +22 -0
  7. data/ext/wreq_rb/src/client.rs +565 -0
  8. data/ext/wreq_rb/src/error.rs +25 -0
  9. data/ext/wreq_rb/src/lib.rs +20 -0
  10. data/ext/wreq_rb/src/response.rs +132 -0
  11. data/lib/wreq-rb/version.rb +5 -0
  12. data/lib/wreq-rb.rb +17 -0
  13. data/patches/0001-add-transfer-size-tracking.patch +292 -0
  14. data/vendor/wreq/Cargo.toml +306 -0
  15. data/vendor/wreq/LICENSE +202 -0
  16. data/vendor/wreq/README.md +122 -0
  17. data/vendor/wreq/examples/cert_store.rs +77 -0
  18. data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
  19. data/vendor/wreq/examples/emulation.rs +118 -0
  20. data/vendor/wreq/examples/form.rs +14 -0
  21. data/vendor/wreq/examples/http1_websocket.rs +37 -0
  22. data/vendor/wreq/examples/http2_websocket.rs +45 -0
  23. data/vendor/wreq/examples/json_dynamic.rs +41 -0
  24. data/vendor/wreq/examples/json_typed.rs +47 -0
  25. data/vendor/wreq/examples/keylog.rs +16 -0
  26. data/vendor/wreq/examples/request_with_emulation.rs +115 -0
  27. data/vendor/wreq/examples/request_with_interface.rs +37 -0
  28. data/vendor/wreq/examples/request_with_local_address.rs +16 -0
  29. data/vendor/wreq/examples/request_with_proxy.rs +13 -0
  30. data/vendor/wreq/examples/request_with_redirect.rs +22 -0
  31. data/vendor/wreq/examples/request_with_version.rs +15 -0
  32. data/vendor/wreq/examples/tor_socks.rs +24 -0
  33. data/vendor/wreq/examples/unix_socket.rs +33 -0
  34. data/vendor/wreq/src/client/body.rs +304 -0
  35. data/vendor/wreq/src/client/conn/conn.rs +231 -0
  36. data/vendor/wreq/src/client/conn/connector.rs +549 -0
  37. data/vendor/wreq/src/client/conn/http.rs +1023 -0
  38. data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
  39. data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
  40. data/vendor/wreq/src/client/conn/proxy.rs +39 -0
  41. data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
  42. data/vendor/wreq/src/client/conn/uds.rs +44 -0
  43. data/vendor/wreq/src/client/conn/verbose.rs +149 -0
  44. data/vendor/wreq/src/client/conn.rs +323 -0
  45. data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
  46. data/vendor/wreq/src/client/core/body/length.rs +118 -0
  47. data/vendor/wreq/src/client/core/body.rs +34 -0
  48. data/vendor/wreq/src/client/core/common/buf.rs +149 -0
  49. data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
  50. data/vendor/wreq/src/client/core/common/watch.rs +76 -0
  51. data/vendor/wreq/src/client/core/common.rs +3 -0
  52. data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
  53. data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
  54. data/vendor/wreq/src/client/core/conn.rs +11 -0
  55. data/vendor/wreq/src/client/core/dispatch.rs +299 -0
  56. data/vendor/wreq/src/client/core/error.rs +435 -0
  57. data/vendor/wreq/src/client/core/ext.rs +201 -0
  58. data/vendor/wreq/src/client/core/http1.rs +178 -0
  59. data/vendor/wreq/src/client/core/http2.rs +483 -0
  60. data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
  61. data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
  62. data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
  63. data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
  64. data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
  65. data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
  66. data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
  67. data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
  68. data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
  69. data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
  70. data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
  71. data/vendor/wreq/src/client/core/proto.rs +58 -0
  72. data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
  73. data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
  74. data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
  75. data/vendor/wreq/src/client/core/rt.rs +25 -0
  76. data/vendor/wreq/src/client/core/upgrade.rs +267 -0
  77. data/vendor/wreq/src/client/core.rs +16 -0
  78. data/vendor/wreq/src/client/emulation.rs +161 -0
  79. data/vendor/wreq/src/client/http/client/error.rs +142 -0
  80. data/vendor/wreq/src/client/http/client/exec.rs +29 -0
  81. data/vendor/wreq/src/client/http/client/extra.rs +77 -0
  82. data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
  83. data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
  84. data/vendor/wreq/src/client/http/client/util.rs +104 -0
  85. data/vendor/wreq/src/client/http/client.rs +1003 -0
  86. data/vendor/wreq/src/client/http/future.rs +99 -0
  87. data/vendor/wreq/src/client/http.rs +1629 -0
  88. data/vendor/wreq/src/client/layer/config/options.rs +156 -0
  89. data/vendor/wreq/src/client/layer/config.rs +116 -0
  90. data/vendor/wreq/src/client/layer/cookie.rs +161 -0
  91. data/vendor/wreq/src/client/layer/decoder.rs +139 -0
  92. data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
  93. data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
  94. data/vendor/wreq/src/client/layer/redirect.rs +145 -0
  95. data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
  96. data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
  97. data/vendor/wreq/src/client/layer/retry.rs +151 -0
  98. data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
  99. data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
  100. data/vendor/wreq/src/client/layer/timeout.rs +177 -0
  101. data/vendor/wreq/src/client/layer.rs +15 -0
  102. data/vendor/wreq/src/client/multipart.rs +717 -0
  103. data/vendor/wreq/src/client/request.rs +818 -0
  104. data/vendor/wreq/src/client/response.rs +534 -0
  105. data/vendor/wreq/src/client/ws/json.rs +99 -0
  106. data/vendor/wreq/src/client/ws/message.rs +453 -0
  107. data/vendor/wreq/src/client/ws.rs +714 -0
  108. data/vendor/wreq/src/client.rs +27 -0
  109. data/vendor/wreq/src/config.rs +140 -0
  110. data/vendor/wreq/src/cookie.rs +579 -0
  111. data/vendor/wreq/src/dns/gai.rs +249 -0
  112. data/vendor/wreq/src/dns/hickory.rs +78 -0
  113. data/vendor/wreq/src/dns/resolve.rs +180 -0
  114. data/vendor/wreq/src/dns.rs +69 -0
  115. data/vendor/wreq/src/error.rs +502 -0
  116. data/vendor/wreq/src/ext.rs +398 -0
  117. data/vendor/wreq/src/hash.rs +143 -0
  118. data/vendor/wreq/src/header.rs +506 -0
  119. data/vendor/wreq/src/into_uri.rs +187 -0
  120. data/vendor/wreq/src/lib.rs +586 -0
  121. data/vendor/wreq/src/proxy/mac.rs +82 -0
  122. data/vendor/wreq/src/proxy/matcher.rs +806 -0
  123. data/vendor/wreq/src/proxy/uds.rs +66 -0
  124. data/vendor/wreq/src/proxy/win.rs +31 -0
  125. data/vendor/wreq/src/proxy.rs +569 -0
  126. data/vendor/wreq/src/redirect.rs +575 -0
  127. data/vendor/wreq/src/retry.rs +198 -0
  128. data/vendor/wreq/src/sync.rs +129 -0
  129. data/vendor/wreq/src/tls/conn/cache.rs +123 -0
  130. data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
  131. data/vendor/wreq/src/tls/conn/ext.rs +82 -0
  132. data/vendor/wreq/src/tls/conn/macros.rs +34 -0
  133. data/vendor/wreq/src/tls/conn/service.rs +138 -0
  134. data/vendor/wreq/src/tls/conn.rs +681 -0
  135. data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
  136. data/vendor/wreq/src/tls/keylog.rs +99 -0
  137. data/vendor/wreq/src/tls/options.rs +464 -0
  138. data/vendor/wreq/src/tls/x509/identity.rs +122 -0
  139. data/vendor/wreq/src/tls/x509/parser.rs +71 -0
  140. data/vendor/wreq/src/tls/x509/store.rs +228 -0
  141. data/vendor/wreq/src/tls/x509.rs +68 -0
  142. data/vendor/wreq/src/tls.rs +154 -0
  143. data/vendor/wreq/src/trace.rs +55 -0
  144. data/vendor/wreq/src/util.rs +122 -0
  145. data/vendor/wreq/tests/badssl.rs +228 -0
  146. data/vendor/wreq/tests/brotli.rs +350 -0
  147. data/vendor/wreq/tests/client.rs +1098 -0
  148. data/vendor/wreq/tests/connector_layers.rs +227 -0
  149. data/vendor/wreq/tests/cookie.rs +306 -0
  150. data/vendor/wreq/tests/deflate.rs +347 -0
  151. data/vendor/wreq/tests/emulation.rs +260 -0
  152. data/vendor/wreq/tests/gzip.rs +347 -0
  153. data/vendor/wreq/tests/layers.rs +261 -0
  154. data/vendor/wreq/tests/multipart.rs +165 -0
  155. data/vendor/wreq/tests/proxy.rs +438 -0
  156. data/vendor/wreq/tests/redirect.rs +629 -0
  157. data/vendor/wreq/tests/retry.rs +135 -0
  158. data/vendor/wreq/tests/support/delay_server.rs +117 -0
  159. data/vendor/wreq/tests/support/error.rs +16 -0
  160. data/vendor/wreq/tests/support/layer.rs +183 -0
  161. data/vendor/wreq/tests/support/mod.rs +9 -0
  162. data/vendor/wreq/tests/support/server.rs +232 -0
  163. data/vendor/wreq/tests/timeouts.rs +281 -0
  164. data/vendor/wreq/tests/unix_socket.rs +135 -0
  165. data/vendor/wreq/tests/upgrade.rs +98 -0
  166. data/vendor/wreq/tests/zstd.rs +559 -0
  167. metadata +225 -0
@@ -0,0 +1,1170 @@
1
+ use std::{
2
+ error::Error as StdError,
3
+ fmt, io,
4
+ task::{Context, Poll, ready},
5
+ };
6
+
7
+ use bytes::{BufMut, Bytes, BytesMut};
8
+ use http::{HeaderMap, HeaderName, HeaderValue};
9
+ use http_body::Frame;
10
+
11
+ use self::Kind::{Chunked, Eof, Length};
12
+ use super::{DecodedLength, io::MemRead, role::DEFAULT_MAX_HEADERS};
13
+
14
+ /// Maximum amount of bytes allowed in chunked extensions.
15
+ ///
16
+ /// This limit is currentlty applied for the entire body, not per chunk.
17
+ const CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16;
18
+
19
+ /// Maximum number of bytes allowed for all trailer fields.
20
+ ///
21
+ /// TODO: remove this when we land h1_max_header_size support
22
+ const TRAILER_LIMIT: usize = 1024 * 16;
23
+
24
+ /// Decoders to handle different Transfer-Encodings.
25
+ ///
26
+ /// If a message body does not include a Transfer-Encoding, it *should*
27
+ /// include a Content-Length header.
28
+ #[derive(Clone, PartialEq)]
29
+ pub(crate) struct Decoder {
30
+ kind: Kind,
31
+ }
32
+
33
+ #[derive(Debug, Clone, PartialEq)]
34
+ enum Kind {
35
+ /// A Reader used when a Content-Length header is passed with a positive integer.
36
+ Length(u64),
37
+ /// A Reader used when Transfer-Encoding is `chunked`.
38
+ Chunked {
39
+ state: ChunkedState,
40
+ chunk_len: u64,
41
+ extensions_cnt: u64,
42
+ trailers_buf: Option<BytesMut>,
43
+ trailers_cnt: usize,
44
+ h1_max_headers: Option<usize>,
45
+ h1_max_header_size: Option<usize>,
46
+ },
47
+ /// A Reader used for responses that don't indicate a length or chunked.
48
+ ///
49
+ /// The bool tracks when EOF is seen on the transport.
50
+ ///
51
+ /// Note: This should only used for `Response`s. It is illegal for a
52
+ /// `Request` to be made with both `Content-Length` and
53
+ /// `Transfer-Encoding: chunked` missing, as explained from the spec:
54
+ ///
55
+ /// > If a Transfer-Encoding header field is present in a response and
56
+ /// > the chunked transfer coding is not the final encoding, the
57
+ /// > message body length is determined by reading the connection until
58
+ /// > it is closed by the server. If a Transfer-Encoding header field
59
+ /// > is present in a request and the chunked transfer coding is not
60
+ /// > the final encoding, the message body length cannot be determined
61
+ /// > reliably; the server MUST respond with the 400 (Bad Request)
62
+ /// > status code and then close the connection.
63
+ Eof(bool),
64
+ }
65
+
66
+ #[derive(Debug, PartialEq, Clone, Copy)]
67
+ enum ChunkedState {
68
+ Start,
69
+ Size,
70
+ SizeLws,
71
+ Extension,
72
+ SizeLf,
73
+ Body,
74
+ BodyCr,
75
+ BodyLf,
76
+ Trailer,
77
+ TrailerLf,
78
+ EndCr,
79
+ EndLf,
80
+ End,
81
+ }
82
+
83
+ struct StepArgs<'a> {
84
+ chunk_size: &'a mut u64,
85
+ chunk_buf: &'a mut Option<Bytes>,
86
+ extensions_cnt: &'a mut u64,
87
+ trailers_buf: &'a mut Option<BytesMut>,
88
+ trailers_cnt: &'a mut usize,
89
+ max_headers_cnt: usize,
90
+ max_headers_bytes: usize,
91
+ }
92
+
93
+ // ===== impl Decoder =====
94
+
95
+ impl Decoder {
96
+ pub(crate) fn length(x: u64) -> Decoder {
97
+ Decoder {
98
+ kind: Kind::Length(x),
99
+ }
100
+ }
101
+
102
+ pub(crate) fn chunked(
103
+ h1_max_headers: Option<usize>,
104
+ h1_max_header_size: Option<usize>,
105
+ ) -> Decoder {
106
+ Decoder {
107
+ kind: Kind::Chunked {
108
+ state: ChunkedState::new(),
109
+ chunk_len: 0,
110
+ extensions_cnt: 0,
111
+ trailers_buf: None,
112
+ trailers_cnt: 0,
113
+ h1_max_headers,
114
+ h1_max_header_size,
115
+ },
116
+ }
117
+ }
118
+
119
+ pub(crate) fn eof() -> Decoder {
120
+ Decoder {
121
+ kind: Kind::Eof(false),
122
+ }
123
+ }
124
+
125
+ pub(super) fn new(
126
+ len: DecodedLength,
127
+ h1_max_headers: Option<usize>,
128
+ h1_max_header_size: Option<usize>,
129
+ ) -> Self {
130
+ match len {
131
+ DecodedLength::CHUNKED => Decoder::chunked(h1_max_headers, h1_max_header_size),
132
+ DecodedLength::CLOSE_DELIMITED => Decoder::eof(),
133
+ length => Decoder::length(length.danger_len()),
134
+ }
135
+ }
136
+
137
+ pub(crate) fn is_eof(&self) -> bool {
138
+ matches!(
139
+ self.kind,
140
+ Length(0)
141
+ | Chunked {
142
+ state: ChunkedState::End,
143
+ ..
144
+ }
145
+ | Eof(true)
146
+ )
147
+ }
148
+
149
+ pub(crate) fn decode<R: MemRead>(
150
+ &mut self,
151
+ cx: &mut Context<'_>,
152
+ body: &mut R,
153
+ ) -> Poll<Result<Frame<Bytes>, io::Error>> {
154
+ trace!("decode; state={:?}", self.kind);
155
+ match self.kind {
156
+ Length(ref mut remaining) => {
157
+ if *remaining == 0 {
158
+ Poll::Ready(Ok(Frame::data(Bytes::new())))
159
+ } else {
160
+ let to_read = *remaining as usize;
161
+ let buf = ready!(body.read_mem(cx, to_read))?;
162
+ let num = buf.as_ref().len() as u64;
163
+ if num > *remaining {
164
+ *remaining = 0;
165
+ } else if num == 0 {
166
+ return Poll::Ready(Err(io::Error::new(
167
+ io::ErrorKind::UnexpectedEof,
168
+ IncompleteBody,
169
+ )));
170
+ } else {
171
+ *remaining -= num;
172
+ }
173
+ Poll::Ready(Ok(Frame::data(buf)))
174
+ }
175
+ }
176
+ Chunked {
177
+ ref mut state,
178
+ ref mut chunk_len,
179
+ ref mut extensions_cnt,
180
+ ref mut trailers_buf,
181
+ ref mut trailers_cnt,
182
+ ref h1_max_headers,
183
+ ref h1_max_header_size,
184
+ } => {
185
+ let h1_max_headers = h1_max_headers.unwrap_or(DEFAULT_MAX_HEADERS);
186
+ let h1_max_header_size = h1_max_header_size.unwrap_or(TRAILER_LIMIT);
187
+ loop {
188
+ let mut buf = None;
189
+ // advances the chunked state
190
+ *state = ready!(state.step(
191
+ cx,
192
+ body,
193
+ StepArgs {
194
+ chunk_size: chunk_len,
195
+ extensions_cnt,
196
+ chunk_buf: &mut buf,
197
+ trailers_buf,
198
+ trailers_cnt,
199
+ max_headers_cnt: h1_max_headers,
200
+ max_headers_bytes: h1_max_header_size,
201
+ }
202
+ ))?;
203
+ if *state == ChunkedState::End {
204
+ trace!("end of chunked");
205
+
206
+ if trailers_buf.is_some() {
207
+ trace!("found possible trailers");
208
+
209
+ // decoder enforces that trailers count will not exceed h1_max_headers
210
+ if *trailers_cnt >= h1_max_headers {
211
+ return Poll::Ready(Err(io::Error::new(
212
+ io::ErrorKind::InvalidData,
213
+ "chunk trailers count overflow",
214
+ )));
215
+ }
216
+ match decode_trailers(
217
+ &mut trailers_buf.take().expect("Trailer is None"),
218
+ *trailers_cnt,
219
+ ) {
220
+ Ok(headers) => {
221
+ return Poll::Ready(Ok(Frame::trailers(headers)));
222
+ }
223
+ Err(e) => {
224
+ return Poll::Ready(Err(e));
225
+ }
226
+ }
227
+ }
228
+
229
+ return Poll::Ready(Ok(Frame::data(Bytes::new())));
230
+ }
231
+ if let Some(buf) = buf {
232
+ return Poll::Ready(Ok(Frame::data(buf)));
233
+ }
234
+ }
235
+ }
236
+ Eof(ref mut is_eof) => {
237
+ if *is_eof {
238
+ Poll::Ready(Ok(Frame::data(Bytes::new())))
239
+ } else {
240
+ // 8192 chosen because its about 2 packets, there probably
241
+ // won't be that much available, so don't have MemReaders
242
+ // allocate buffers to big
243
+ body.read_mem(cx, 8192).map_ok(|slice| {
244
+ *is_eof = slice.is_empty();
245
+ Frame::data(slice)
246
+ })
247
+ }
248
+ }
249
+ }
250
+ }
251
+
252
+ #[cfg(test)]
253
+ async fn decode_fut<R: MemRead>(&mut self, body: &mut R) -> Result<Frame<Bytes>, io::Error> {
254
+ std::future::poll_fn(move |cx| self.decode(cx, body)).await
255
+ }
256
+ }
257
+
258
+ impl fmt::Debug for Decoder {
259
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
260
+ fmt::Debug::fmt(&self.kind, f)
261
+ }
262
+ }
263
+
264
+ macro_rules! byte (
265
+ ($rdr:ident, $cx:expr) => ({
266
+ let buf = ready!($rdr.read_mem($cx, 1))?;
267
+ if !buf.is_empty() {
268
+ buf[0]
269
+ } else {
270
+ return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof,
271
+ "unexpected EOF during chunk size line")));
272
+ }
273
+ })
274
+ );
275
+
276
+ macro_rules! or_overflow {
277
+ ($e:expr) => (
278
+ match $e {
279
+ Some(val) => val,
280
+ None => return Poll::Ready(Err(io::Error::new(
281
+ io::ErrorKind::InvalidData,
282
+ "invalid chunk size: overflow",
283
+ ))),
284
+ }
285
+ )
286
+ }
287
+
288
+ macro_rules! put_u8 {
289
+ ($trailers_buf:expr, $byte:expr, $limit:expr) => {
290
+ $trailers_buf.put_u8($byte);
291
+
292
+ if $trailers_buf.len() >= $limit {
293
+ return Poll::Ready(Err(io::Error::new(
294
+ io::ErrorKind::InvalidData,
295
+ "chunk trailers bytes over limit",
296
+ )));
297
+ }
298
+ };
299
+ }
300
+
301
+ // ===== impl ChunkedState =====
302
+
303
+ impl ChunkedState {
304
+ fn new() -> ChunkedState {
305
+ ChunkedState::Start
306
+ }
307
+
308
+ #[allow(clippy::too_many_arguments)]
309
+ fn step<R: MemRead>(
310
+ &self,
311
+ cx: &mut Context<'_>,
312
+ body: &mut R,
313
+ step: StepArgs<'_>,
314
+ ) -> Poll<Result<ChunkedState, io::Error>> {
315
+ use self::ChunkedState::*;
316
+ match *self {
317
+ Start => ChunkedState::read_start(cx, body, step.chunk_size),
318
+ Size => ChunkedState::read_size(cx, body, step.chunk_size),
319
+ SizeLws => ChunkedState::read_size_lws(cx, body),
320
+ Extension => ChunkedState::read_extension(cx, body, step.extensions_cnt),
321
+ SizeLf => ChunkedState::read_size_lf(cx, body, *step.chunk_size),
322
+ Body => ChunkedState::read_body(cx, body, step.chunk_size, step.chunk_buf),
323
+ BodyCr => ChunkedState::read_body_cr(cx, body),
324
+ BodyLf => ChunkedState::read_body_lf(cx, body),
325
+ Trailer => {
326
+ ChunkedState::read_trailer(cx, body, step.trailers_buf, step.max_headers_bytes)
327
+ }
328
+ TrailerLf => ChunkedState::read_trailer_lf(
329
+ cx,
330
+ body,
331
+ step.trailers_buf,
332
+ step.trailers_cnt,
333
+ step.max_headers_cnt,
334
+ step.max_headers_bytes,
335
+ ),
336
+ EndCr => ChunkedState::read_end_cr(cx, body, step.trailers_buf, step.max_headers_bytes),
337
+ EndLf => ChunkedState::read_end_lf(cx, body, step.trailers_buf, step.max_headers_bytes),
338
+ End => Poll::Ready(Ok(ChunkedState::End)),
339
+ }
340
+ }
341
+
342
+ fn read_start<R: MemRead>(
343
+ cx: &mut Context<'_>,
344
+ rdr: &mut R,
345
+ size: &mut u64,
346
+ ) -> Poll<Result<ChunkedState, io::Error>> {
347
+ trace!("Read chunk start");
348
+
349
+ let radix = 16;
350
+ match byte!(rdr, cx) {
351
+ b @ b'0'..=b'9' => {
352
+ *size = or_overflow!(size.checked_mul(radix));
353
+ *size = or_overflow!(size.checked_add((b - b'0') as u64));
354
+ }
355
+ b @ b'a'..=b'f' => {
356
+ *size = or_overflow!(size.checked_mul(radix));
357
+ *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64));
358
+ }
359
+ b @ b'A'..=b'F' => {
360
+ *size = or_overflow!(size.checked_mul(radix));
361
+ *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64));
362
+ }
363
+ _ => {
364
+ return Poll::Ready(Err(io::Error::new(
365
+ io::ErrorKind::InvalidInput,
366
+ "Invalid chunk size line: missing size digit",
367
+ )));
368
+ }
369
+ }
370
+
371
+ Poll::Ready(Ok(ChunkedState::Size))
372
+ }
373
+
374
+ fn read_size<R: MemRead>(
375
+ cx: &mut Context<'_>,
376
+ rdr: &mut R,
377
+ size: &mut u64,
378
+ ) -> Poll<Result<ChunkedState, io::Error>> {
379
+ trace!("Read chunk hex size");
380
+
381
+ let radix = 16;
382
+ match byte!(rdr, cx) {
383
+ b @ b'0'..=b'9' => {
384
+ *size = or_overflow!(size.checked_mul(radix));
385
+ *size = or_overflow!(size.checked_add((b - b'0') as u64));
386
+ }
387
+ b @ b'a'..=b'f' => {
388
+ *size = or_overflow!(size.checked_mul(radix));
389
+ *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64));
390
+ }
391
+ b @ b'A'..=b'F' => {
392
+ *size = or_overflow!(size.checked_mul(radix));
393
+ *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64));
394
+ }
395
+ b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
396
+ b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
397
+ b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
398
+ _ => {
399
+ return Poll::Ready(Err(io::Error::new(
400
+ io::ErrorKind::InvalidInput,
401
+ "Invalid chunk size line: Invalid Size",
402
+ )));
403
+ }
404
+ }
405
+ Poll::Ready(Ok(ChunkedState::Size))
406
+ }
407
+ fn read_size_lws<R: MemRead>(
408
+ cx: &mut Context<'_>,
409
+ rdr: &mut R,
410
+ ) -> Poll<Result<ChunkedState, io::Error>> {
411
+ trace!("read_size_lws");
412
+ match byte!(rdr, cx) {
413
+ // LWS can follow the chunk size, but no more digits can come
414
+ b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
415
+ b';' => Poll::Ready(Ok(ChunkedState::Extension)),
416
+ b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
417
+ _ => Poll::Ready(Err(io::Error::new(
418
+ io::ErrorKind::InvalidInput,
419
+ "Invalid chunk size linear white space",
420
+ ))),
421
+ }
422
+ }
423
+ fn read_extension<R: MemRead>(
424
+ cx: &mut Context<'_>,
425
+ rdr: &mut R,
426
+ extensions_cnt: &mut u64,
427
+ ) -> Poll<Result<ChunkedState, io::Error>> {
428
+ trace!("read_extension");
429
+ // We don't care about extensions really at all. Just ignore them.
430
+ // They "end" at the next CRLF.
431
+ //
432
+ // However, some implementations may not check for the CR, so to save
433
+ // them from themselves, we reject extensions containing plain LF as
434
+ // well.
435
+ match byte!(rdr, cx) {
436
+ b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
437
+ b'\n' => Poll::Ready(Err(io::Error::new(
438
+ io::ErrorKind::InvalidData,
439
+ "invalid chunk extension contains newline",
440
+ ))),
441
+ _ => {
442
+ *extensions_cnt += 1;
443
+ if *extensions_cnt >= CHUNKED_EXTENSIONS_LIMIT {
444
+ Poll::Ready(Err(io::Error::new(
445
+ io::ErrorKind::InvalidData,
446
+ "chunk extensions over limit",
447
+ )))
448
+ } else {
449
+ Poll::Ready(Ok(ChunkedState::Extension))
450
+ }
451
+ } // no supported extensions
452
+ }
453
+ }
454
+ fn read_size_lf<R: MemRead>(
455
+ cx: &mut Context<'_>,
456
+ rdr: &mut R,
457
+ size: u64,
458
+ ) -> Poll<Result<ChunkedState, io::Error>> {
459
+ trace!("Chunk size is {:?}", size);
460
+ match byte!(rdr, cx) {
461
+ b'\n' => {
462
+ if size == 0 {
463
+ Poll::Ready(Ok(ChunkedState::EndCr))
464
+ } else {
465
+ debug!("incoming chunked header: {0:#X} ({0} bytes)", size);
466
+ Poll::Ready(Ok(ChunkedState::Body))
467
+ }
468
+ }
469
+ _ => Poll::Ready(Err(io::Error::new(
470
+ io::ErrorKind::InvalidInput,
471
+ "Invalid chunk size LF",
472
+ ))),
473
+ }
474
+ }
475
+
476
+ fn read_body<R: MemRead>(
477
+ cx: &mut Context<'_>,
478
+ rdr: &mut R,
479
+ rem: &mut u64,
480
+ buf: &mut Option<Bytes>,
481
+ ) -> Poll<Result<ChunkedState, io::Error>> {
482
+ trace!("Chunked read, remaining={:?}", rem);
483
+
484
+ // cap remaining bytes at the max capacity of usize
485
+ let rem_cap = match *rem {
486
+ r if r > usize::MAX as u64 => usize::MAX,
487
+ r => r as usize,
488
+ };
489
+
490
+ let to_read = rem_cap;
491
+ let slice = ready!(rdr.read_mem(cx, to_read))?;
492
+ let count = slice.len();
493
+
494
+ if count == 0 {
495
+ *rem = 0;
496
+ return Poll::Ready(Err(io::Error::new(
497
+ io::ErrorKind::UnexpectedEof,
498
+ IncompleteBody,
499
+ )));
500
+ }
501
+ *buf = Some(slice);
502
+ *rem -= count as u64;
503
+
504
+ if *rem > 0 {
505
+ Poll::Ready(Ok(ChunkedState::Body))
506
+ } else {
507
+ Poll::Ready(Ok(ChunkedState::BodyCr))
508
+ }
509
+ }
510
+ fn read_body_cr<R: MemRead>(
511
+ cx: &mut Context<'_>,
512
+ rdr: &mut R,
513
+ ) -> Poll<Result<ChunkedState, io::Error>> {
514
+ match byte!(rdr, cx) {
515
+ b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
516
+ _ => Poll::Ready(Err(io::Error::new(
517
+ io::ErrorKind::InvalidInput,
518
+ "Invalid chunk body CR",
519
+ ))),
520
+ }
521
+ }
522
+ fn read_body_lf<R: MemRead>(
523
+ cx: &mut Context<'_>,
524
+ rdr: &mut R,
525
+ ) -> Poll<Result<ChunkedState, io::Error>> {
526
+ match byte!(rdr, cx) {
527
+ b'\n' => Poll::Ready(Ok(ChunkedState::Start)),
528
+ _ => Poll::Ready(Err(io::Error::new(
529
+ io::ErrorKind::InvalidInput,
530
+ "Invalid chunk body LF",
531
+ ))),
532
+ }
533
+ }
534
+
535
+ fn read_trailer<R: MemRead>(
536
+ cx: &mut Context<'_>,
537
+ rdr: &mut R,
538
+ trailers_buf: &mut Option<BytesMut>,
539
+ h1_max_header_size: usize,
540
+ ) -> Poll<Result<ChunkedState, io::Error>> {
541
+ trace!("read_trailer");
542
+ let byte = byte!(rdr, cx);
543
+
544
+ put_u8!(
545
+ trailers_buf.as_mut().expect("trailers_buf is None"),
546
+ byte,
547
+ h1_max_header_size
548
+ );
549
+
550
+ match byte {
551
+ b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)),
552
+ _ => Poll::Ready(Ok(ChunkedState::Trailer)),
553
+ }
554
+ }
555
+
556
+ fn read_trailer_lf<R: MemRead>(
557
+ cx: &mut Context<'_>,
558
+ rdr: &mut R,
559
+ trailers_buf: &mut Option<BytesMut>,
560
+ trailers_cnt: &mut usize,
561
+ h1_max_headers: usize,
562
+ h1_max_header_size: usize,
563
+ ) -> Poll<Result<ChunkedState, io::Error>> {
564
+ let byte = byte!(rdr, cx);
565
+ match byte {
566
+ b'\n' => {
567
+ if *trailers_cnt >= h1_max_headers {
568
+ return Poll::Ready(Err(io::Error::new(
569
+ io::ErrorKind::InvalidData,
570
+ "chunk trailers count overflow",
571
+ )));
572
+ }
573
+ *trailers_cnt += 1;
574
+
575
+ put_u8!(
576
+ trailers_buf.as_mut().expect("trailers_buf is None"),
577
+ byte,
578
+ h1_max_header_size
579
+ );
580
+
581
+ Poll::Ready(Ok(ChunkedState::EndCr))
582
+ }
583
+ _ => Poll::Ready(Err(io::Error::new(
584
+ io::ErrorKind::InvalidInput,
585
+ "Invalid trailer end LF",
586
+ ))),
587
+ }
588
+ }
589
+
590
+ fn read_end_cr<R: MemRead>(
591
+ cx: &mut Context<'_>,
592
+ rdr: &mut R,
593
+ trailers_buf: &mut Option<BytesMut>,
594
+ h1_max_header_size: usize,
595
+ ) -> Poll<Result<ChunkedState, io::Error>> {
596
+ let byte = byte!(rdr, cx);
597
+ match byte {
598
+ b'\r' => {
599
+ if let Some(trailers_buf) = trailers_buf {
600
+ put_u8!(trailers_buf, byte, h1_max_header_size);
601
+ }
602
+ Poll::Ready(Ok(ChunkedState::EndLf))
603
+ }
604
+ byte => {
605
+ match trailers_buf {
606
+ None => {
607
+ // 64 will fit a single Expires header without reallocating
608
+ let mut buf = BytesMut::with_capacity(64);
609
+ buf.put_u8(byte);
610
+ *trailers_buf = Some(buf);
611
+ }
612
+ Some(trailers_buf) => {
613
+ put_u8!(trailers_buf, byte, h1_max_header_size);
614
+ }
615
+ }
616
+
617
+ Poll::Ready(Ok(ChunkedState::Trailer))
618
+ }
619
+ }
620
+ }
621
+ fn read_end_lf<R: MemRead>(
622
+ cx: &mut Context<'_>,
623
+ rdr: &mut R,
624
+ trailers_buf: &mut Option<BytesMut>,
625
+ h1_max_header_size: usize,
626
+ ) -> Poll<Result<ChunkedState, io::Error>> {
627
+ let byte = byte!(rdr, cx);
628
+ match byte {
629
+ b'\n' => {
630
+ if let Some(trailers_buf) = trailers_buf {
631
+ put_u8!(trailers_buf, byte, h1_max_header_size);
632
+ }
633
+ Poll::Ready(Ok(ChunkedState::End))
634
+ }
635
+ _ => Poll::Ready(Err(io::Error::new(
636
+ io::ErrorKind::InvalidInput,
637
+ "Invalid chunk end LF",
638
+ ))),
639
+ }
640
+ }
641
+ }
642
+
643
+ // TODO: disallow Transfer-Encoding, Content-Length, Trailer, etc in trailers ??
644
+ fn decode_trailers(buf: &mut BytesMut, count: usize) -> Result<HeaderMap, io::Error> {
645
+ let mut trailers = HeaderMap::new();
646
+ let mut headers = vec![httparse::EMPTY_HEADER; count];
647
+ let res = httparse::parse_headers(buf, &mut headers);
648
+ match res {
649
+ Ok(httparse::Status::Complete((_, headers))) => {
650
+ for header in headers.iter() {
651
+ use std::convert::TryFrom;
652
+ let name = match HeaderName::try_from(header.name) {
653
+ Ok(name) => name,
654
+ Err(_) => {
655
+ return Err(io::Error::new(
656
+ io::ErrorKind::InvalidInput,
657
+ format!("Invalid header name: {:?}", &header),
658
+ ));
659
+ }
660
+ };
661
+
662
+ let value = match HeaderValue::from_bytes(header.value) {
663
+ Ok(value) => value,
664
+ Err(_) => {
665
+ return Err(io::Error::new(
666
+ io::ErrorKind::InvalidInput,
667
+ format!("Invalid header value: {:?}", &header),
668
+ ));
669
+ }
670
+ };
671
+
672
+ trailers.insert(name, value);
673
+ }
674
+
675
+ Ok(trailers)
676
+ }
677
+ Ok(httparse::Status::Partial) => Err(io::Error::new(
678
+ io::ErrorKind::InvalidInput,
679
+ "Partial header",
680
+ )),
681
+ Err(e) => Err(io::Error::new(io::ErrorKind::InvalidInput, e)),
682
+ }
683
+ }
684
+
685
+ #[derive(Debug)]
686
+ struct IncompleteBody;
687
+
688
+ // === impl IncompleteBody ===
689
+
690
+ impl fmt::Display for IncompleteBody {
691
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
692
+ write!(f, "end of file before message length reached")
693
+ }
694
+ }
695
+
696
+ impl StdError for IncompleteBody {}
697
+
698
+ #[cfg(test)]
699
+ mod tests {
700
+ use std::{pin::Pin, time::Duration};
701
+
702
+ use tokio::io::{AsyncRead, ReadBuf};
703
+
704
+ use super::*;
705
+
706
+ impl MemRead for &[u8] {
707
+ fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
708
+ let n = std::cmp::min(len, self.len());
709
+ if n > 0 {
710
+ let (a, b) = self.split_at(n);
711
+ let buf = Bytes::copy_from_slice(a);
712
+ *self = b;
713
+ Poll::Ready(Ok(buf))
714
+ } else {
715
+ Poll::Ready(Ok(Bytes::new()))
716
+ }
717
+ }
718
+ }
719
+
720
+ impl MemRead for &mut (dyn AsyncRead + Unpin) {
721
+ fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
722
+ let mut v = vec![0; len];
723
+ let mut buf = ReadBuf::new(&mut v);
724
+ ready!(Pin::new(self).poll_read(cx, &mut buf)?);
725
+ Poll::Ready(Ok(Bytes::copy_from_slice(buf.filled())))
726
+ }
727
+ }
728
+
729
+ impl MemRead for Bytes {
730
+ fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
731
+ let n = std::cmp::min(len, self.len());
732
+ let ret = self.split_to(n);
733
+ Poll::Ready(Ok(ret))
734
+ }
735
+ }
736
+
737
+ #[tokio::test]
738
+ async fn test_read_chunk_size() {
739
+ use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof};
740
+
741
+ async fn read(s: &str) -> u64 {
742
+ let mut state = ChunkedState::new();
743
+ let rdr = &mut s.as_bytes();
744
+ let mut size = 0;
745
+ let mut ext_cnt = 0;
746
+ let mut trailers_cnt = 0;
747
+ loop {
748
+ let result = std::future::poll_fn(|cx| {
749
+ state.step(
750
+ cx,
751
+ rdr,
752
+ StepArgs {
753
+ chunk_size: &mut size,
754
+ extensions_cnt: &mut ext_cnt,
755
+ chunk_buf: &mut None,
756
+ trailers_buf: &mut None,
757
+ trailers_cnt: &mut trailers_cnt,
758
+ max_headers_cnt: DEFAULT_MAX_HEADERS,
759
+ max_headers_bytes: TRAILER_LIMIT,
760
+ },
761
+ )
762
+ })
763
+ .await;
764
+ let desc = format!("read_size failed for {s:?}");
765
+ state = result.expect(&desc);
766
+ if state == ChunkedState::Body || state == ChunkedState::EndCr {
767
+ break;
768
+ }
769
+ }
770
+ size
771
+ }
772
+
773
+ async fn read_err(s: &str, expected_err: io::ErrorKind) {
774
+ let mut state = ChunkedState::new();
775
+ let rdr = &mut s.as_bytes();
776
+ let mut size = 0;
777
+ let mut ext_cnt = 0;
778
+ let mut trailers_cnt = 0;
779
+ loop {
780
+ let result = std::future::poll_fn(|cx| {
781
+ state.step(
782
+ cx,
783
+ rdr,
784
+ StepArgs {
785
+ chunk_size: &mut size,
786
+ extensions_cnt: &mut ext_cnt,
787
+ chunk_buf: &mut None,
788
+ trailers_buf: &mut None,
789
+ trailers_cnt: &mut trailers_cnt,
790
+ max_headers_cnt: DEFAULT_MAX_HEADERS,
791
+ max_headers_bytes: TRAILER_LIMIT,
792
+ },
793
+ )
794
+ })
795
+ .await;
796
+ state = match result {
797
+ Ok(s) => s,
798
+ Err(e) => {
799
+ assert!(
800
+ expected_err == e.kind(),
801
+ "Reading {:?}, expected {:?}, but got {:?}",
802
+ s,
803
+ expected_err,
804
+ e.kind()
805
+ );
806
+ return;
807
+ }
808
+ };
809
+ if state == ChunkedState::Body || state == ChunkedState::End {
810
+ panic!("Was Ok. Expected Err for {s:?}");
811
+ }
812
+ }
813
+ }
814
+
815
+ assert_eq!(1, read("1\r\n").await);
816
+ assert_eq!(1, read("01\r\n").await);
817
+ assert_eq!(0, read("0\r\n").await);
818
+ assert_eq!(0, read("00\r\n").await);
819
+ assert_eq!(10, read("A\r\n").await);
820
+ assert_eq!(10, read("a\r\n").await);
821
+ assert_eq!(255, read("Ff\r\n").await);
822
+ assert_eq!(255, read("Ff \r\n").await);
823
+ // Missing LF or CRLF
824
+ read_err("F\rF", InvalidInput).await;
825
+ read_err("F", UnexpectedEof).await;
826
+ // Missing digit
827
+ read_err("\r\n\r\n", InvalidInput).await;
828
+ read_err("\r\n", InvalidInput).await;
829
+ // Invalid hex digit
830
+ read_err("X\r\n", InvalidInput).await;
831
+ read_err("1X\r\n", InvalidInput).await;
832
+ read_err("-\r\n", InvalidInput).await;
833
+ read_err("-1\r\n", InvalidInput).await;
834
+ // Acceptable (if not fully valid) extensions do not influence the size
835
+ assert_eq!(1, read("1;extension\r\n").await);
836
+ assert_eq!(10, read("a;ext name=value\r\n").await);
837
+ assert_eq!(1, read("1;extension;extension2\r\n").await);
838
+ assert_eq!(1, read("1;;; ;\r\n").await);
839
+ assert_eq!(2, read("2; extension...\r\n").await);
840
+ assert_eq!(3, read("3 ; extension=123\r\n").await);
841
+ assert_eq!(3, read("3 ;\r\n").await);
842
+ assert_eq!(3, read("3 ; \r\n").await);
843
+ // Invalid extensions cause an error
844
+ read_err("1 invalid extension\r\n", InvalidInput).await;
845
+ read_err("1 A\r\n", InvalidInput).await;
846
+ read_err("1;no CRLF", UnexpectedEof).await;
847
+ read_err("1;reject\nnewlines\r\n", InvalidData).await;
848
+ // Overflow
849
+ read_err("f0000000000000003\r\n", InvalidData).await;
850
+ }
851
+
852
+ #[tokio::test]
853
+ async fn test_read_sized_early_eof() {
854
+ let mut bytes = &b"foo bar"[..];
855
+ let mut decoder = Decoder::length(10);
856
+ assert_eq!(
857
+ decoder
858
+ .decode_fut(&mut bytes)
859
+ .await
860
+ .unwrap()
861
+ .data_ref()
862
+ .unwrap()
863
+ .len(),
864
+ 7
865
+ );
866
+ let e = decoder.decode_fut(&mut bytes).await.unwrap_err();
867
+ assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
868
+ }
869
+
870
+ #[tokio::test]
871
+ async fn test_read_chunked_early_eof() {
872
+ let mut bytes = &b"\
873
+ 9\r\n\
874
+ foo bar\
875
+ "[..];
876
+ let mut decoder = Decoder::chunked(None, None);
877
+ assert_eq!(
878
+ decoder
879
+ .decode_fut(&mut bytes)
880
+ .await
881
+ .unwrap()
882
+ .data_ref()
883
+ .unwrap()
884
+ .len(),
885
+ 7
886
+ );
887
+ let e = decoder.decode_fut(&mut bytes).await.unwrap_err();
888
+ assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
889
+ }
890
+
891
+ #[tokio::test]
892
+ async fn test_read_chunked_single_read() {
893
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..];
894
+ let buf = Decoder::chunked(None, None)
895
+ .decode_fut(&mut mock_buf)
896
+ .await
897
+ .expect("decode")
898
+ .into_data()
899
+ .expect("unknown frame type");
900
+ assert_eq!(16, buf.len());
901
+ let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
902
+ assert_eq!("1234567890abcdef", &result);
903
+ }
904
+
905
+ #[tokio::test]
906
+ async fn test_read_chunked_with_missing_zero_digit() {
907
+ // After reading a valid chunk, the ending is missing a zero.
908
+ let mut mock_buf = &b"1\r\nZ\r\n\r\n\r\n"[..];
909
+ let mut decoder = Decoder::chunked(None, None);
910
+ let buf = decoder
911
+ .decode_fut(&mut mock_buf)
912
+ .await
913
+ .expect("decode")
914
+ .into_data()
915
+ .expect("unknown frame type");
916
+ assert_eq!("Z", buf);
917
+
918
+ let err = decoder
919
+ .decode_fut(&mut mock_buf)
920
+ .await
921
+ .expect_err("decode 2");
922
+ assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
923
+ }
924
+
925
+ #[tokio::test]
926
+ async fn test_read_chunked_extensions_over_limit() {
927
+ // construct a chunked body where each individual chunked extension
928
+ // is totally fine, but combined is over the limit.
929
+ let per_chunk = super::CHUNKED_EXTENSIONS_LIMIT * 2 / 3;
930
+ let mut scratch = vec![];
931
+ for _ in 0..2 {
932
+ scratch.extend(b"1;");
933
+ scratch.extend(b"x".repeat(per_chunk as usize));
934
+ scratch.extend(b"\r\nA\r\n");
935
+ }
936
+ scratch.extend(b"0\r\n\r\n");
937
+ let mut mock_buf = Bytes::from(scratch);
938
+
939
+ let mut decoder = Decoder::chunked(None, None);
940
+ let buf1 = decoder
941
+ .decode_fut(&mut mock_buf)
942
+ .await
943
+ .expect("decode1")
944
+ .into_data()
945
+ .expect("unknown frame type");
946
+ assert_eq!(&buf1[..], b"A");
947
+
948
+ let err = decoder
949
+ .decode_fut(&mut mock_buf)
950
+ .await
951
+ .expect_err("decode2");
952
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
953
+ assert_eq!(err.to_string(), "chunk extensions over limit");
954
+ }
955
+
956
+ #[tokio::test]
957
+ async fn test_read_chunked_trailer_with_missing_lf() {
958
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..];
959
+ let mut decoder = Decoder::chunked(None, None);
960
+ decoder.decode_fut(&mut mock_buf).await.expect("decode");
961
+ let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err();
962
+ assert_eq!(e.kind(), io::ErrorKind::InvalidInput);
963
+ }
964
+
965
+ #[tokio::test]
966
+ async fn test_read_chunked_after_eof() {
967
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
968
+ let mut decoder = Decoder::chunked(None, None);
969
+
970
+ // normal read
971
+ let buf = decoder
972
+ .decode_fut(&mut mock_buf)
973
+ .await
974
+ .unwrap()
975
+ .into_data()
976
+ .expect("unknown frame type");
977
+ assert_eq!(16, buf.len());
978
+ let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
979
+ assert_eq!("1234567890abcdef", &result);
980
+
981
+ // eof read
982
+ let buf = decoder
983
+ .decode_fut(&mut mock_buf)
984
+ .await
985
+ .expect("decode")
986
+ .into_data()
987
+ .expect("unknown frame type");
988
+ assert_eq!(0, buf.len());
989
+
990
+ // ensure read after eof also returns eof
991
+ let buf = decoder
992
+ .decode_fut(&mut mock_buf)
993
+ .await
994
+ .expect("decode")
995
+ .into_data()
996
+ .expect("unknown frame type");
997
+ assert_eq!(0, buf.len());
998
+ }
999
+
1000
+ // perform an async read using a custom buffer size and causing a blocking
1001
+ // read at the specified byte
1002
+ async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String {
1003
+ let mut outs = Vec::new();
1004
+
1005
+ let mut ins = if block_at == 0 {
1006
+ tokio_test::io::Builder::new()
1007
+ .wait(Duration::from_millis(10))
1008
+ .read(content)
1009
+ .build()
1010
+ } else {
1011
+ tokio_test::io::Builder::new()
1012
+ .read(&content[..block_at])
1013
+ .wait(Duration::from_millis(10))
1014
+ .read(&content[block_at..])
1015
+ .build()
1016
+ };
1017
+
1018
+ let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin);
1019
+
1020
+ loop {
1021
+ let buf = decoder
1022
+ .decode_fut(&mut ins)
1023
+ .await
1024
+ .expect("unexpected decode error")
1025
+ .into_data()
1026
+ .expect("unexpected frame type");
1027
+ if buf.is_empty() {
1028
+ break; // eof
1029
+ }
1030
+ outs.extend(buf.as_ref());
1031
+ }
1032
+
1033
+ String::from_utf8(outs).expect("decode String")
1034
+ }
1035
+
1036
+ // iterate over the different ways that this async read could go.
1037
+ // tests blocking a read at each byte along the content - The shotgun approach
1038
+ async fn all_async_cases(content: &str, expected: &str, decoder: Decoder) {
1039
+ let content_len = content.len();
1040
+ for block_at in 0..content_len {
1041
+ let actual = read_async(decoder.clone(), content.as_bytes(), block_at).await;
1042
+ assert_eq!(expected, &actual) //, "Failed async. Blocking at {}", block_at);
1043
+ }
1044
+ }
1045
+
1046
+ #[tokio::test]
1047
+ async fn test_read_length_async() {
1048
+ let content = "foobar";
1049
+ all_async_cases(content, content, Decoder::length(content.len() as u64)).await;
1050
+ }
1051
+
1052
+ #[tokio::test]
1053
+ async fn test_read_chunked_async() {
1054
+ let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n";
1055
+ let expected = "foobar";
1056
+ all_async_cases(content, expected, Decoder::chunked(None, None)).await;
1057
+ }
1058
+
1059
+ #[tokio::test]
1060
+ async fn test_read_eof_async() {
1061
+ let content = "foobar";
1062
+ all_async_cases(content, content, Decoder::eof()).await;
1063
+ }
1064
+
1065
+ #[test]
1066
+ fn test_decode_trailers() {
1067
+ let mut buf = BytesMut::new();
1068
+ buf.extend_from_slice(
1069
+ b"Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\nX-Stream-Error: failed to decode\r\n\r\n",
1070
+ );
1071
+ let headers = decode_trailers(&mut buf, 2).expect("decode_trailers");
1072
+ assert_eq!(headers.len(), 2);
1073
+ assert_eq!(
1074
+ headers.get("Expires").unwrap(),
1075
+ "Wed, 21 Oct 2015 07:28:00 GMT"
1076
+ );
1077
+ assert_eq!(headers.get("X-Stream-Error").unwrap(), "failed to decode");
1078
+ }
1079
+
1080
+ #[tokio::test]
1081
+ async fn test_trailer_max_headers_enforced() {
1082
+ let h1_max_headers = 10;
1083
+ let mut scratch = vec![];
1084
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
1085
+ for i in 0..h1_max_headers {
1086
+ scratch.extend(format!("trailer{i}: {i}\r\n").as_bytes());
1087
+ }
1088
+ scratch.extend(b"\r\n");
1089
+ let mut mock_buf = Bytes::from(scratch);
1090
+
1091
+ let mut decoder = Decoder::chunked(Some(h1_max_headers), None);
1092
+
1093
+ // ready chunked body
1094
+ let buf = decoder
1095
+ .decode_fut(&mut mock_buf)
1096
+ .await
1097
+ .unwrap()
1098
+ .into_data()
1099
+ .expect("unknown frame type");
1100
+ assert_eq!(16, buf.len());
1101
+
1102
+ // eof read
1103
+ let err = decoder
1104
+ .decode_fut(&mut mock_buf)
1105
+ .await
1106
+ .expect_err("trailer fields over limit");
1107
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
1108
+ }
1109
+
1110
+ #[tokio::test]
1111
+ async fn test_trailer_max_header_size_huge_trailer() {
1112
+ let max_header_size = 1024;
1113
+ let mut scratch = vec![];
1114
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
1115
+ scratch.extend(format!("huge_trailer: {}\r\n", "x".repeat(max_header_size)).as_bytes());
1116
+ scratch.extend(b"\r\n");
1117
+ let mut mock_buf = Bytes::from(scratch);
1118
+
1119
+ let mut decoder = Decoder::chunked(None, Some(max_header_size));
1120
+
1121
+ // ready chunked body
1122
+ let buf = decoder
1123
+ .decode_fut(&mut mock_buf)
1124
+ .await
1125
+ .unwrap()
1126
+ .into_data()
1127
+ .expect("unknown frame type");
1128
+ assert_eq!(16, buf.len());
1129
+
1130
+ // eof read
1131
+ let err = decoder
1132
+ .decode_fut(&mut mock_buf)
1133
+ .await
1134
+ .expect_err("trailers over limit");
1135
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
1136
+ }
1137
+
1138
+ #[tokio::test]
1139
+ async fn test_trailer_max_header_size_many_small_trailers() {
1140
+ let max_headers = 10;
1141
+ let header_size = 64;
1142
+ let mut scratch = vec![];
1143
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
1144
+
1145
+ for i in 0..max_headers {
1146
+ scratch.extend(format!("trailer{}: {}\r\n", i, "x".repeat(header_size)).as_bytes());
1147
+ }
1148
+
1149
+ scratch.extend(b"\r\n");
1150
+ let mut mock_buf = Bytes::from(scratch);
1151
+
1152
+ let mut decoder = Decoder::chunked(None, Some(max_headers * header_size));
1153
+
1154
+ // ready chunked body
1155
+ let buf = decoder
1156
+ .decode_fut(&mut mock_buf)
1157
+ .await
1158
+ .unwrap()
1159
+ .into_data()
1160
+ .expect("unknown frame type");
1161
+ assert_eq!(16, buf.len());
1162
+
1163
+ // eof read
1164
+ let err = decoder
1165
+ .decode_fut(&mut mock_buf)
1166
+ .await
1167
+ .expect_err("trailers over limit");
1168
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
1169
+ }
1170
+ }