wreq-rb 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Cargo.lock +2688 -0
- data/Cargo.toml +6 -0
- data/README.md +179 -0
- data/ext/wreq_rb/Cargo.toml +39 -0
- data/ext/wreq_rb/extconf.rb +22 -0
- data/ext/wreq_rb/src/client.rs +565 -0
- data/ext/wreq_rb/src/error.rs +25 -0
- data/ext/wreq_rb/src/lib.rs +20 -0
- data/ext/wreq_rb/src/response.rs +132 -0
- data/lib/wreq-rb/version.rb +5 -0
- data/lib/wreq-rb.rb +17 -0
- data/patches/0001-add-transfer-size-tracking.patch +292 -0
- data/vendor/wreq/Cargo.toml +306 -0
- data/vendor/wreq/LICENSE +202 -0
- data/vendor/wreq/README.md +122 -0
- data/vendor/wreq/examples/cert_store.rs +77 -0
- data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
- data/vendor/wreq/examples/emulation.rs +118 -0
- data/vendor/wreq/examples/form.rs +14 -0
- data/vendor/wreq/examples/http1_websocket.rs +37 -0
- data/vendor/wreq/examples/http2_websocket.rs +45 -0
- data/vendor/wreq/examples/json_dynamic.rs +41 -0
- data/vendor/wreq/examples/json_typed.rs +47 -0
- data/vendor/wreq/examples/keylog.rs +16 -0
- data/vendor/wreq/examples/request_with_emulation.rs +115 -0
- data/vendor/wreq/examples/request_with_interface.rs +37 -0
- data/vendor/wreq/examples/request_with_local_address.rs +16 -0
- data/vendor/wreq/examples/request_with_proxy.rs +13 -0
- data/vendor/wreq/examples/request_with_redirect.rs +22 -0
- data/vendor/wreq/examples/request_with_version.rs +15 -0
- data/vendor/wreq/examples/tor_socks.rs +24 -0
- data/vendor/wreq/examples/unix_socket.rs +33 -0
- data/vendor/wreq/src/client/body.rs +304 -0
- data/vendor/wreq/src/client/conn/conn.rs +231 -0
- data/vendor/wreq/src/client/conn/connector.rs +549 -0
- data/vendor/wreq/src/client/conn/http.rs +1023 -0
- data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
- data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
- data/vendor/wreq/src/client/conn/proxy.rs +39 -0
- data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
- data/vendor/wreq/src/client/conn/uds.rs +44 -0
- data/vendor/wreq/src/client/conn/verbose.rs +149 -0
- data/vendor/wreq/src/client/conn.rs +323 -0
- data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
- data/vendor/wreq/src/client/core/body/length.rs +118 -0
- data/vendor/wreq/src/client/core/body.rs +34 -0
- data/vendor/wreq/src/client/core/common/buf.rs +149 -0
- data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
- data/vendor/wreq/src/client/core/common/watch.rs +76 -0
- data/vendor/wreq/src/client/core/common.rs +3 -0
- data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
- data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
- data/vendor/wreq/src/client/core/conn.rs +11 -0
- data/vendor/wreq/src/client/core/dispatch.rs +299 -0
- data/vendor/wreq/src/client/core/error.rs +435 -0
- data/vendor/wreq/src/client/core/ext.rs +201 -0
- data/vendor/wreq/src/client/core/http1.rs +178 -0
- data/vendor/wreq/src/client/core/http2.rs +483 -0
- data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
- data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
- data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
- data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
- data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
- data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
- data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
- data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
- data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
- data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
- data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
- data/vendor/wreq/src/client/core/proto.rs +58 -0
- data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
- data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
- data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
- data/vendor/wreq/src/client/core/rt.rs +25 -0
- data/vendor/wreq/src/client/core/upgrade.rs +267 -0
- data/vendor/wreq/src/client/core.rs +16 -0
- data/vendor/wreq/src/client/emulation.rs +161 -0
- data/vendor/wreq/src/client/http/client/error.rs +142 -0
- data/vendor/wreq/src/client/http/client/exec.rs +29 -0
- data/vendor/wreq/src/client/http/client/extra.rs +77 -0
- data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
- data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
- data/vendor/wreq/src/client/http/client/util.rs +104 -0
- data/vendor/wreq/src/client/http/client.rs +1003 -0
- data/vendor/wreq/src/client/http/future.rs +99 -0
- data/vendor/wreq/src/client/http.rs +1629 -0
- data/vendor/wreq/src/client/layer/config/options.rs +156 -0
- data/vendor/wreq/src/client/layer/config.rs +116 -0
- data/vendor/wreq/src/client/layer/cookie.rs +161 -0
- data/vendor/wreq/src/client/layer/decoder.rs +139 -0
- data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
- data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
- data/vendor/wreq/src/client/layer/redirect.rs +145 -0
- data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
- data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
- data/vendor/wreq/src/client/layer/retry.rs +151 -0
- data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
- data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
- data/vendor/wreq/src/client/layer/timeout.rs +177 -0
- data/vendor/wreq/src/client/layer.rs +15 -0
- data/vendor/wreq/src/client/multipart.rs +717 -0
- data/vendor/wreq/src/client/request.rs +818 -0
- data/vendor/wreq/src/client/response.rs +534 -0
- data/vendor/wreq/src/client/ws/json.rs +99 -0
- data/vendor/wreq/src/client/ws/message.rs +453 -0
- data/vendor/wreq/src/client/ws.rs +714 -0
- data/vendor/wreq/src/client.rs +27 -0
- data/vendor/wreq/src/config.rs +140 -0
- data/vendor/wreq/src/cookie.rs +579 -0
- data/vendor/wreq/src/dns/gai.rs +249 -0
- data/vendor/wreq/src/dns/hickory.rs +78 -0
- data/vendor/wreq/src/dns/resolve.rs +180 -0
- data/vendor/wreq/src/dns.rs +69 -0
- data/vendor/wreq/src/error.rs +502 -0
- data/vendor/wreq/src/ext.rs +398 -0
- data/vendor/wreq/src/hash.rs +143 -0
- data/vendor/wreq/src/header.rs +506 -0
- data/vendor/wreq/src/into_uri.rs +187 -0
- data/vendor/wreq/src/lib.rs +586 -0
- data/vendor/wreq/src/proxy/mac.rs +82 -0
- data/vendor/wreq/src/proxy/matcher.rs +806 -0
- data/vendor/wreq/src/proxy/uds.rs +66 -0
- data/vendor/wreq/src/proxy/win.rs +31 -0
- data/vendor/wreq/src/proxy.rs +569 -0
- data/vendor/wreq/src/redirect.rs +575 -0
- data/vendor/wreq/src/retry.rs +198 -0
- data/vendor/wreq/src/sync.rs +129 -0
- data/vendor/wreq/src/tls/conn/cache.rs +123 -0
- data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
- data/vendor/wreq/src/tls/conn/ext.rs +82 -0
- data/vendor/wreq/src/tls/conn/macros.rs +34 -0
- data/vendor/wreq/src/tls/conn/service.rs +138 -0
- data/vendor/wreq/src/tls/conn.rs +681 -0
- data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
- data/vendor/wreq/src/tls/keylog.rs +99 -0
- data/vendor/wreq/src/tls/options.rs +464 -0
- data/vendor/wreq/src/tls/x509/identity.rs +122 -0
- data/vendor/wreq/src/tls/x509/parser.rs +71 -0
- data/vendor/wreq/src/tls/x509/store.rs +228 -0
- data/vendor/wreq/src/tls/x509.rs +68 -0
- data/vendor/wreq/src/tls.rs +154 -0
- data/vendor/wreq/src/trace.rs +55 -0
- data/vendor/wreq/src/util.rs +122 -0
- data/vendor/wreq/tests/badssl.rs +228 -0
- data/vendor/wreq/tests/brotli.rs +350 -0
- data/vendor/wreq/tests/client.rs +1098 -0
- data/vendor/wreq/tests/connector_layers.rs +227 -0
- data/vendor/wreq/tests/cookie.rs +306 -0
- data/vendor/wreq/tests/deflate.rs +347 -0
- data/vendor/wreq/tests/emulation.rs +260 -0
- data/vendor/wreq/tests/gzip.rs +347 -0
- data/vendor/wreq/tests/layers.rs +261 -0
- data/vendor/wreq/tests/multipart.rs +165 -0
- data/vendor/wreq/tests/proxy.rs +438 -0
- data/vendor/wreq/tests/redirect.rs +629 -0
- data/vendor/wreq/tests/retry.rs +135 -0
- data/vendor/wreq/tests/support/delay_server.rs +117 -0
- data/vendor/wreq/tests/support/error.rs +16 -0
- data/vendor/wreq/tests/support/layer.rs +183 -0
- data/vendor/wreq/tests/support/mod.rs +9 -0
- data/vendor/wreq/tests/support/server.rs +232 -0
- data/vendor/wreq/tests/timeouts.rs +281 -0
- data/vendor/wreq/tests/unix_socket.rs +135 -0
- data/vendor/wreq/tests/upgrade.rs +98 -0
- data/vendor/wreq/tests/zstd.rs +559 -0
- metadata +225 -0
|
@@ -0,0 +1,539 @@
|
|
|
1
|
+
//! HTTP2 Ping usage
|
|
2
|
+
//!
|
|
3
|
+
//! core uses HTTP2 pings for two purposes:
|
|
4
|
+
//!
|
|
5
|
+
//! 1. Adaptive flow control using BDP
|
|
6
|
+
//! 2. Connection keep-alive
|
|
7
|
+
//!
|
|
8
|
+
//! Both cases are optional.
|
|
9
|
+
//!
|
|
10
|
+
//! # BDP Algorithm
|
|
11
|
+
//!
|
|
12
|
+
//! 1. When receiving a DATA frame, if a BDP ping isn't outstanding: 1a. Record current time. 1b.
|
|
13
|
+
//! Send a BDP ping.
|
|
14
|
+
//! 2. Increment the number of received bytes.
|
|
15
|
+
//! 3. When the BDP ping ack is received: 3a. Record duration from sent time. 3b. Merge RTT with a
|
|
16
|
+
//! running average. 3c. Calculate bdp as bytes/rtt. 3d. If bdp is over 2/3 max, set new max to
|
|
17
|
+
//! bdp and update windows.
|
|
18
|
+
|
|
19
|
+
use std::{
|
|
20
|
+
fmt,
|
|
21
|
+
future::Future,
|
|
22
|
+
pin::Pin,
|
|
23
|
+
sync::Arc,
|
|
24
|
+
task::{self, Poll},
|
|
25
|
+
time::{Duration, Instant},
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
use http2::{Ping, PingPong};
|
|
29
|
+
|
|
30
|
+
use crate::{
|
|
31
|
+
client::core::{
|
|
32
|
+
self, Error,
|
|
33
|
+
rt::{Sleep, Time},
|
|
34
|
+
},
|
|
35
|
+
sync::Mutex,
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
type WindowSize = u32;
|
|
39
|
+
|
|
40
|
+
pub(super) fn disabled() -> Recorder {
|
|
41
|
+
Recorder { shared: None }
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
pub(super) fn channel(ping_pong: PingPong, config: Config, timer: Time) -> (Recorder, Ponger) {
|
|
45
|
+
debug_assert!(
|
|
46
|
+
config.is_enabled(),
|
|
47
|
+
"ping channel requires bdp or keep-alive config",
|
|
48
|
+
);
|
|
49
|
+
|
|
50
|
+
let bdp = config.bdp_initial_window.map(|wnd| Bdp {
|
|
51
|
+
bdp: wnd,
|
|
52
|
+
max_bandwidth: 0.0,
|
|
53
|
+
rtt: 0.0,
|
|
54
|
+
ping_delay: Duration::from_millis(100),
|
|
55
|
+
stable_count: 0,
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
let now = timer.now();
|
|
59
|
+
|
|
60
|
+
let (bytes, next_bdp_at) = if bdp.is_some() {
|
|
61
|
+
(Some(0), Some(now))
|
|
62
|
+
} else {
|
|
63
|
+
(None, None)
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive {
|
|
67
|
+
interval,
|
|
68
|
+
timeout: config.keep_alive_timeout,
|
|
69
|
+
while_idle: config.keep_alive_while_idle,
|
|
70
|
+
sleep: timer.sleep(interval),
|
|
71
|
+
state: KeepAliveState::Init,
|
|
72
|
+
timer: timer.clone(),
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
let last_read_at = keep_alive.as_ref().map(|_| now);
|
|
76
|
+
|
|
77
|
+
let shared = Arc::new(Mutex::new(Shared {
|
|
78
|
+
bytes,
|
|
79
|
+
last_read_at,
|
|
80
|
+
is_keep_alive_timed_out: false,
|
|
81
|
+
ping_pong,
|
|
82
|
+
ping_sent_at: None,
|
|
83
|
+
next_bdp_at,
|
|
84
|
+
timer,
|
|
85
|
+
}));
|
|
86
|
+
|
|
87
|
+
(
|
|
88
|
+
Recorder {
|
|
89
|
+
shared: Some(shared.clone()),
|
|
90
|
+
},
|
|
91
|
+
Ponger {
|
|
92
|
+
bdp,
|
|
93
|
+
keep_alive,
|
|
94
|
+
shared,
|
|
95
|
+
},
|
|
96
|
+
)
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
#[derive(Debug, Clone)]
|
|
100
|
+
pub(crate) struct Config {
|
|
101
|
+
bdp_initial_window: Option<WindowSize>,
|
|
102
|
+
/// If no frames are received in this amount of time, a PING frame is sent.
|
|
103
|
+
keep_alive_interval: Option<Duration>,
|
|
104
|
+
/// After sending a keepalive PING, the connection will be closed if
|
|
105
|
+
/// a pong is not received in this amount of time.
|
|
106
|
+
keep_alive_timeout: Duration,
|
|
107
|
+
/// If true, sends pings even when there are no active streams.
|
|
108
|
+
keep_alive_while_idle: bool,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
#[derive(Clone)]
|
|
112
|
+
pub(crate) struct Recorder {
|
|
113
|
+
shared: Option<Arc<Mutex<Shared>>>,
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
pub(super) struct Ponger {
|
|
117
|
+
bdp: Option<Bdp>,
|
|
118
|
+
keep_alive: Option<KeepAlive>,
|
|
119
|
+
shared: Arc<Mutex<Shared>>,
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
struct Shared {
|
|
123
|
+
ping_pong: PingPong,
|
|
124
|
+
ping_sent_at: Option<Instant>,
|
|
125
|
+
|
|
126
|
+
// bdp
|
|
127
|
+
/// If `Some`, bdp is enabled, and this tracks how many bytes have been
|
|
128
|
+
/// read during the current sample.
|
|
129
|
+
bytes: Option<usize>,
|
|
130
|
+
/// We delay a variable amount of time between BDP pings. This allows us
|
|
131
|
+
/// to send less pings as the bandwidth stabilizes.
|
|
132
|
+
next_bdp_at: Option<Instant>,
|
|
133
|
+
|
|
134
|
+
// keep-alive
|
|
135
|
+
/// If `Some`, keep-alive is enabled, and the Instant is how long ago
|
|
136
|
+
/// the connection read the last frame.
|
|
137
|
+
last_read_at: Option<Instant>,
|
|
138
|
+
|
|
139
|
+
is_keep_alive_timed_out: bool,
|
|
140
|
+
timer: Time,
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
struct Bdp {
|
|
144
|
+
/// Current BDP in bytes
|
|
145
|
+
bdp: u32,
|
|
146
|
+
/// Largest bandwidth we've seen so far.
|
|
147
|
+
max_bandwidth: f64,
|
|
148
|
+
/// Round trip time in seconds
|
|
149
|
+
rtt: f64,
|
|
150
|
+
/// Delay the next ping by this amount.
|
|
151
|
+
///
|
|
152
|
+
/// This will change depending on how stable the current bandwidth is.
|
|
153
|
+
ping_delay: Duration,
|
|
154
|
+
/// The count of ping round trips where BDP has stayed the same.
|
|
155
|
+
stable_count: u32,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
struct KeepAlive {
|
|
159
|
+
/// If no frames are received in this amount of time, a PING frame is sent.
|
|
160
|
+
interval: Duration,
|
|
161
|
+
/// After sending a keepalive PING, the connection will be closed if
|
|
162
|
+
/// a pong is not received in this amount of time.
|
|
163
|
+
timeout: Duration,
|
|
164
|
+
/// If true, sends pings even when there are no active streams.
|
|
165
|
+
while_idle: bool,
|
|
166
|
+
state: KeepAliveState,
|
|
167
|
+
sleep: Pin<Box<dyn Sleep>>,
|
|
168
|
+
timer: Time,
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
enum KeepAliveState {
|
|
172
|
+
Init,
|
|
173
|
+
Scheduled(Instant),
|
|
174
|
+
PingSent,
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
pub(super) enum Ponged {
|
|
178
|
+
SizeUpdate(WindowSize),
|
|
179
|
+
KeepAliveTimedOut,
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
#[derive(Debug)]
|
|
183
|
+
pub(super) struct KeepAliveTimedOut;
|
|
184
|
+
|
|
185
|
+
// ===== impl Config =====
|
|
186
|
+
|
|
187
|
+
impl Config {
|
|
188
|
+
/// Creates a new `Config` with the specified parameters.
|
|
189
|
+
pub(crate) fn new(
|
|
190
|
+
adaptive_window: bool,
|
|
191
|
+
initial_window_size: u32,
|
|
192
|
+
keep_alive_interval: Option<Duration>,
|
|
193
|
+
keep_alive_timeout: Duration,
|
|
194
|
+
keep_alive_while_idle: bool,
|
|
195
|
+
) -> Self {
|
|
196
|
+
Config {
|
|
197
|
+
bdp_initial_window: if adaptive_window {
|
|
198
|
+
Some(initial_window_size)
|
|
199
|
+
} else {
|
|
200
|
+
None
|
|
201
|
+
},
|
|
202
|
+
keep_alive_interval,
|
|
203
|
+
keep_alive_timeout,
|
|
204
|
+
keep_alive_while_idle,
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
pub(super) fn is_enabled(&self) -> bool {
|
|
209
|
+
self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some()
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// ===== impl Recorder =====
|
|
214
|
+
|
|
215
|
+
impl Recorder {
|
|
216
|
+
pub(crate) fn record_data(&self, len: usize) {
|
|
217
|
+
let shared = if let Some(ref shared) = self.shared {
|
|
218
|
+
shared
|
|
219
|
+
} else {
|
|
220
|
+
return;
|
|
221
|
+
};
|
|
222
|
+
|
|
223
|
+
let mut locked = shared.lock();
|
|
224
|
+
|
|
225
|
+
locked.update_last_read_at();
|
|
226
|
+
|
|
227
|
+
// are we ready to send another bdp ping?
|
|
228
|
+
// if not, we don't need to record bytes either
|
|
229
|
+
|
|
230
|
+
if let Some(ref next_bdp_at) = locked.next_bdp_at {
|
|
231
|
+
if Instant::now() < *next_bdp_at {
|
|
232
|
+
return;
|
|
233
|
+
} else {
|
|
234
|
+
locked.next_bdp_at = None;
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
if let Some(ref mut bytes) = locked.bytes {
|
|
239
|
+
*bytes += len;
|
|
240
|
+
} else {
|
|
241
|
+
// no need to send bdp ping if bdp is disabled
|
|
242
|
+
return;
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
if !locked.is_ping_sent() {
|
|
246
|
+
locked.send_ping();
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
pub(crate) fn record_non_data(&self) {
|
|
251
|
+
let shared = if let Some(ref shared) = self.shared {
|
|
252
|
+
shared
|
|
253
|
+
} else {
|
|
254
|
+
return;
|
|
255
|
+
};
|
|
256
|
+
|
|
257
|
+
let mut locked = shared.lock();
|
|
258
|
+
|
|
259
|
+
locked.update_last_read_at();
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
/// If the incoming stream is already closed, convert self into
|
|
263
|
+
/// a disabled reporter.
|
|
264
|
+
pub(super) fn for_stream(self, stream: &http2::RecvStream) -> Self {
|
|
265
|
+
if stream.is_end_stream() {
|
|
266
|
+
disabled()
|
|
267
|
+
} else {
|
|
268
|
+
self
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
pub(super) fn ensure_not_timed_out(&self) -> core::Result<()> {
|
|
273
|
+
if let Some(ref shared) = self.shared {
|
|
274
|
+
let locked = shared.lock();
|
|
275
|
+
if locked.is_keep_alive_timed_out {
|
|
276
|
+
return Err(KeepAliveTimedOut.crate_error());
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// else
|
|
281
|
+
Ok(())
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
// ===== impl Ponger =====
|
|
286
|
+
|
|
287
|
+
impl Ponger {
|
|
288
|
+
pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll<Ponged> {
|
|
289
|
+
let mut locked = self.shared.lock();
|
|
290
|
+
// hoping this is fine to move within the lock
|
|
291
|
+
let now = locked.timer.now();
|
|
292
|
+
|
|
293
|
+
let is_idle = self.is_idle();
|
|
294
|
+
|
|
295
|
+
if let Some(ref mut ka) = self.keep_alive {
|
|
296
|
+
ka.maybe_schedule(is_idle, &locked);
|
|
297
|
+
ka.maybe_ping(cx, is_idle, &mut locked);
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
if !locked.is_ping_sent() {
|
|
301
|
+
// XXX: this doesn't register a waker...?
|
|
302
|
+
return Poll::Pending;
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
match locked.ping_pong.poll_pong(cx) {
|
|
306
|
+
Poll::Ready(Ok(_pong)) => {
|
|
307
|
+
let start = locked
|
|
308
|
+
.ping_sent_at
|
|
309
|
+
.expect("pong received implies ping_sent_at");
|
|
310
|
+
locked.ping_sent_at = None;
|
|
311
|
+
let rtt = now - start;
|
|
312
|
+
trace!("recv pong");
|
|
313
|
+
|
|
314
|
+
if let Some(ref mut ka) = self.keep_alive {
|
|
315
|
+
locked.update_last_read_at();
|
|
316
|
+
ka.maybe_schedule(is_idle, &locked);
|
|
317
|
+
ka.maybe_ping(cx, is_idle, &mut locked);
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
if let Some(ref mut bdp) = self.bdp {
|
|
321
|
+
let bytes = locked.bytes.expect("bdp enabled implies bytes");
|
|
322
|
+
locked.bytes = Some(0); // reset
|
|
323
|
+
trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt);
|
|
324
|
+
|
|
325
|
+
let update = bdp.calculate(bytes, rtt);
|
|
326
|
+
locked.next_bdp_at = Some(now + bdp.ping_delay);
|
|
327
|
+
if let Some(update) = update {
|
|
328
|
+
return Poll::Ready(Ponged::SizeUpdate(update));
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
Poll::Ready(Err(_e)) => {
|
|
333
|
+
debug!("pong error: {}", _e);
|
|
334
|
+
}
|
|
335
|
+
Poll::Pending => {
|
|
336
|
+
if let Some(ref mut ka) = self.keep_alive {
|
|
337
|
+
if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) {
|
|
338
|
+
self.keep_alive = None;
|
|
339
|
+
locked.is_keep_alive_timed_out = true;
|
|
340
|
+
return Poll::Ready(Ponged::KeepAliveTimedOut);
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// XXX: this doesn't register a waker...?
|
|
347
|
+
Poll::Pending
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
fn is_idle(&self) -> bool {
|
|
351
|
+
Arc::strong_count(&self.shared) <= 2
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
// ===== impl Shared =====
|
|
356
|
+
|
|
357
|
+
impl Shared {
|
|
358
|
+
fn send_ping(&mut self) {
|
|
359
|
+
match self.ping_pong.send_ping(Ping::opaque()) {
|
|
360
|
+
Ok(()) => {
|
|
361
|
+
self.ping_sent_at = Some(self.timer.now());
|
|
362
|
+
trace!("sent ping");
|
|
363
|
+
}
|
|
364
|
+
Err(_err) => {
|
|
365
|
+
debug!("error sending ping: {}", _err);
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
fn is_ping_sent(&self) -> bool {
|
|
371
|
+
self.ping_sent_at.is_some()
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
fn update_last_read_at(&mut self) {
|
|
375
|
+
if self.last_read_at.is_some() {
|
|
376
|
+
self.last_read_at = Some(self.timer.now());
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
fn last_read_at(&self) -> Instant {
|
|
381
|
+
self.last_read_at.expect("keep_alive expects last_read_at")
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
// ===== impl Bdp =====
|
|
386
|
+
|
|
387
|
+
/// Any higher than this likely will be hitting the TCP flow control.
|
|
388
|
+
const BDP_LIMIT: usize = 1024 * 1024 * 16;
|
|
389
|
+
|
|
390
|
+
impl Bdp {
|
|
391
|
+
fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option<WindowSize> {
|
|
392
|
+
// No need to do any math if we're at the limit.
|
|
393
|
+
if self.bdp as usize == BDP_LIMIT {
|
|
394
|
+
self.stabilize_delay();
|
|
395
|
+
return None;
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
// average the rtt
|
|
399
|
+
let rtt = seconds(rtt);
|
|
400
|
+
if self.rtt == 0.0 {
|
|
401
|
+
// First sample means rtt is first rtt.
|
|
402
|
+
self.rtt = rtt;
|
|
403
|
+
} else {
|
|
404
|
+
// Weigh this rtt as 1/8 for a moving average.
|
|
405
|
+
self.rtt += (rtt - self.rtt) * 0.125;
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
// calculate the current bandwidth
|
|
409
|
+
let bw = (bytes as f64) / (self.rtt * 1.5);
|
|
410
|
+
trace!("current bandwidth = {:.1}B/s", bw);
|
|
411
|
+
|
|
412
|
+
if bw < self.max_bandwidth {
|
|
413
|
+
// not a faster bandwidth, so don't update
|
|
414
|
+
self.stabilize_delay();
|
|
415
|
+
return None;
|
|
416
|
+
} else {
|
|
417
|
+
self.max_bandwidth = bw;
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
// if the current `bytes` sample is at least 2/3 the previous
|
|
421
|
+
// bdp, increase to double the current sample.
|
|
422
|
+
if bytes >= self.bdp as usize * 2 / 3 {
|
|
423
|
+
self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize;
|
|
424
|
+
trace!("BDP increased to {}", self.bdp);
|
|
425
|
+
|
|
426
|
+
self.stable_count = 0;
|
|
427
|
+
self.ping_delay /= 2;
|
|
428
|
+
Some(self.bdp)
|
|
429
|
+
} else {
|
|
430
|
+
self.stabilize_delay();
|
|
431
|
+
None
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
fn stabilize_delay(&mut self) {
|
|
436
|
+
if self.ping_delay < Duration::from_secs(10) {
|
|
437
|
+
self.stable_count += 1;
|
|
438
|
+
|
|
439
|
+
if self.stable_count >= 2 {
|
|
440
|
+
self.ping_delay *= 4;
|
|
441
|
+
self.stable_count = 0;
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
fn seconds(dur: Duration) -> f64 {
|
|
448
|
+
const NANOS_PER_SEC: f64 = 1_000_000_000.0;
|
|
449
|
+
let secs = dur.as_secs() as f64;
|
|
450
|
+
secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// ===== impl KeepAlive =====
|
|
454
|
+
|
|
455
|
+
impl KeepAlive {
|
|
456
|
+
fn maybe_schedule(&mut self, is_idle: bool, shared: &Shared) {
|
|
457
|
+
match self.state {
|
|
458
|
+
KeepAliveState::Init => {
|
|
459
|
+
if !self.while_idle && is_idle {
|
|
460
|
+
return;
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
self.schedule(shared);
|
|
464
|
+
}
|
|
465
|
+
KeepAliveState::PingSent => {
|
|
466
|
+
if shared.is_ping_sent() {
|
|
467
|
+
return;
|
|
468
|
+
}
|
|
469
|
+
self.schedule(shared);
|
|
470
|
+
}
|
|
471
|
+
KeepAliveState::Scheduled(..) => (),
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
fn schedule(&mut self, shared: &Shared) {
|
|
476
|
+
let interval = shared.last_read_at() + self.interval;
|
|
477
|
+
self.state = KeepAliveState::Scheduled(interval);
|
|
478
|
+
self.timer.reset(&mut self.sleep, interval);
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
fn maybe_ping(&mut self, cx: &mut task::Context<'_>, is_idle: bool, shared: &mut Shared) {
|
|
482
|
+
match self.state {
|
|
483
|
+
KeepAliveState::Scheduled(at) => {
|
|
484
|
+
if Pin::new(&mut self.sleep).poll(cx).is_pending() {
|
|
485
|
+
return;
|
|
486
|
+
}
|
|
487
|
+
// check if we've received a frame while we were scheduled
|
|
488
|
+
if shared.last_read_at() + self.interval > at {
|
|
489
|
+
self.state = KeepAliveState::Init;
|
|
490
|
+
cx.waker().wake_by_ref(); // schedule us again
|
|
491
|
+
return;
|
|
492
|
+
}
|
|
493
|
+
if !self.while_idle && is_idle {
|
|
494
|
+
trace!("keep-alive no need to ping when idle and while_idle=false");
|
|
495
|
+
return;
|
|
496
|
+
}
|
|
497
|
+
trace!("keep-alive interval ({:?}) reached", self.interval);
|
|
498
|
+
shared.send_ping();
|
|
499
|
+
self.state = KeepAliveState::PingSent;
|
|
500
|
+
let timeout = self.timer.now() + self.timeout;
|
|
501
|
+
self.timer.reset(&mut self.sleep, timeout);
|
|
502
|
+
}
|
|
503
|
+
KeepAliveState::Init | KeepAliveState::PingSent => (),
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> {
|
|
508
|
+
match self.state {
|
|
509
|
+
KeepAliveState::PingSent => {
|
|
510
|
+
if Pin::new(&mut self.sleep).poll(cx).is_pending() {
|
|
511
|
+
return Ok(());
|
|
512
|
+
}
|
|
513
|
+
trace!("keep-alive timeout ({:?}) reached", self.timeout);
|
|
514
|
+
Err(KeepAliveTimedOut)
|
|
515
|
+
}
|
|
516
|
+
KeepAliveState::Init | KeepAliveState::Scheduled(..) => Ok(()),
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
// ===== impl KeepAliveTimedOut =====
|
|
522
|
+
|
|
523
|
+
impl KeepAliveTimedOut {
|
|
524
|
+
pub(super) fn crate_error(self) -> Error {
|
|
525
|
+
Error::new(crate::client::core::error::Kind::Http2).with(self)
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
impl fmt::Display for KeepAliveTimedOut {
|
|
530
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
531
|
+
f.write_str("keep-alive timed out")
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
impl std::error::Error for KeepAliveTimedOut {
|
|
536
|
+
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
|
537
|
+
Some(&crate::client::core::error::TimedOut)
|
|
538
|
+
}
|
|
539
|
+
}
|