wreq-rb 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Cargo.lock +2688 -0
- data/Cargo.toml +6 -0
- data/README.md +179 -0
- data/ext/wreq_rb/Cargo.toml +39 -0
- data/ext/wreq_rb/extconf.rb +22 -0
- data/ext/wreq_rb/src/client.rs +565 -0
- data/ext/wreq_rb/src/error.rs +25 -0
- data/ext/wreq_rb/src/lib.rs +20 -0
- data/ext/wreq_rb/src/response.rs +132 -0
- data/lib/wreq-rb/version.rb +5 -0
- data/lib/wreq-rb.rb +17 -0
- data/patches/0001-add-transfer-size-tracking.patch +292 -0
- data/vendor/wreq/Cargo.toml +306 -0
- data/vendor/wreq/LICENSE +202 -0
- data/vendor/wreq/README.md +122 -0
- data/vendor/wreq/examples/cert_store.rs +77 -0
- data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
- data/vendor/wreq/examples/emulation.rs +118 -0
- data/vendor/wreq/examples/form.rs +14 -0
- data/vendor/wreq/examples/http1_websocket.rs +37 -0
- data/vendor/wreq/examples/http2_websocket.rs +45 -0
- data/vendor/wreq/examples/json_dynamic.rs +41 -0
- data/vendor/wreq/examples/json_typed.rs +47 -0
- data/vendor/wreq/examples/keylog.rs +16 -0
- data/vendor/wreq/examples/request_with_emulation.rs +115 -0
- data/vendor/wreq/examples/request_with_interface.rs +37 -0
- data/vendor/wreq/examples/request_with_local_address.rs +16 -0
- data/vendor/wreq/examples/request_with_proxy.rs +13 -0
- data/vendor/wreq/examples/request_with_redirect.rs +22 -0
- data/vendor/wreq/examples/request_with_version.rs +15 -0
- data/vendor/wreq/examples/tor_socks.rs +24 -0
- data/vendor/wreq/examples/unix_socket.rs +33 -0
- data/vendor/wreq/src/client/body.rs +304 -0
- data/vendor/wreq/src/client/conn/conn.rs +231 -0
- data/vendor/wreq/src/client/conn/connector.rs +549 -0
- data/vendor/wreq/src/client/conn/http.rs +1023 -0
- data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
- data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
- data/vendor/wreq/src/client/conn/proxy.rs +39 -0
- data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
- data/vendor/wreq/src/client/conn/uds.rs +44 -0
- data/vendor/wreq/src/client/conn/verbose.rs +149 -0
- data/vendor/wreq/src/client/conn.rs +323 -0
- data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
- data/vendor/wreq/src/client/core/body/length.rs +118 -0
- data/vendor/wreq/src/client/core/body.rs +34 -0
- data/vendor/wreq/src/client/core/common/buf.rs +149 -0
- data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
- data/vendor/wreq/src/client/core/common/watch.rs +76 -0
- data/vendor/wreq/src/client/core/common.rs +3 -0
- data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
- data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
- data/vendor/wreq/src/client/core/conn.rs +11 -0
- data/vendor/wreq/src/client/core/dispatch.rs +299 -0
- data/vendor/wreq/src/client/core/error.rs +435 -0
- data/vendor/wreq/src/client/core/ext.rs +201 -0
- data/vendor/wreq/src/client/core/http1.rs +178 -0
- data/vendor/wreq/src/client/core/http2.rs +483 -0
- data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
- data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
- data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
- data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
- data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
- data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
- data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
- data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
- data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
- data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
- data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
- data/vendor/wreq/src/client/core/proto.rs +58 -0
- data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
- data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
- data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
- data/vendor/wreq/src/client/core/rt.rs +25 -0
- data/vendor/wreq/src/client/core/upgrade.rs +267 -0
- data/vendor/wreq/src/client/core.rs +16 -0
- data/vendor/wreq/src/client/emulation.rs +161 -0
- data/vendor/wreq/src/client/http/client/error.rs +142 -0
- data/vendor/wreq/src/client/http/client/exec.rs +29 -0
- data/vendor/wreq/src/client/http/client/extra.rs +77 -0
- data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
- data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
- data/vendor/wreq/src/client/http/client/util.rs +104 -0
- data/vendor/wreq/src/client/http/client.rs +1003 -0
- data/vendor/wreq/src/client/http/future.rs +99 -0
- data/vendor/wreq/src/client/http.rs +1629 -0
- data/vendor/wreq/src/client/layer/config/options.rs +156 -0
- data/vendor/wreq/src/client/layer/config.rs +116 -0
- data/vendor/wreq/src/client/layer/cookie.rs +161 -0
- data/vendor/wreq/src/client/layer/decoder.rs +139 -0
- data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
- data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
- data/vendor/wreq/src/client/layer/redirect.rs +145 -0
- data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
- data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
- data/vendor/wreq/src/client/layer/retry.rs +151 -0
- data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
- data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
- data/vendor/wreq/src/client/layer/timeout.rs +177 -0
- data/vendor/wreq/src/client/layer.rs +15 -0
- data/vendor/wreq/src/client/multipart.rs +717 -0
- data/vendor/wreq/src/client/request.rs +818 -0
- data/vendor/wreq/src/client/response.rs +534 -0
- data/vendor/wreq/src/client/ws/json.rs +99 -0
- data/vendor/wreq/src/client/ws/message.rs +453 -0
- data/vendor/wreq/src/client/ws.rs +714 -0
- data/vendor/wreq/src/client.rs +27 -0
- data/vendor/wreq/src/config.rs +140 -0
- data/vendor/wreq/src/cookie.rs +579 -0
- data/vendor/wreq/src/dns/gai.rs +249 -0
- data/vendor/wreq/src/dns/hickory.rs +78 -0
- data/vendor/wreq/src/dns/resolve.rs +180 -0
- data/vendor/wreq/src/dns.rs +69 -0
- data/vendor/wreq/src/error.rs +502 -0
- data/vendor/wreq/src/ext.rs +398 -0
- data/vendor/wreq/src/hash.rs +143 -0
- data/vendor/wreq/src/header.rs +506 -0
- data/vendor/wreq/src/into_uri.rs +187 -0
- data/vendor/wreq/src/lib.rs +586 -0
- data/vendor/wreq/src/proxy/mac.rs +82 -0
- data/vendor/wreq/src/proxy/matcher.rs +806 -0
- data/vendor/wreq/src/proxy/uds.rs +66 -0
- data/vendor/wreq/src/proxy/win.rs +31 -0
- data/vendor/wreq/src/proxy.rs +569 -0
- data/vendor/wreq/src/redirect.rs +575 -0
- data/vendor/wreq/src/retry.rs +198 -0
- data/vendor/wreq/src/sync.rs +129 -0
- data/vendor/wreq/src/tls/conn/cache.rs +123 -0
- data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
- data/vendor/wreq/src/tls/conn/ext.rs +82 -0
- data/vendor/wreq/src/tls/conn/macros.rs +34 -0
- data/vendor/wreq/src/tls/conn/service.rs +138 -0
- data/vendor/wreq/src/tls/conn.rs +681 -0
- data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
- data/vendor/wreq/src/tls/keylog.rs +99 -0
- data/vendor/wreq/src/tls/options.rs +464 -0
- data/vendor/wreq/src/tls/x509/identity.rs +122 -0
- data/vendor/wreq/src/tls/x509/parser.rs +71 -0
- data/vendor/wreq/src/tls/x509/store.rs +228 -0
- data/vendor/wreq/src/tls/x509.rs +68 -0
- data/vendor/wreq/src/tls.rs +154 -0
- data/vendor/wreq/src/trace.rs +55 -0
- data/vendor/wreq/src/util.rs +122 -0
- data/vendor/wreq/tests/badssl.rs +228 -0
- data/vendor/wreq/tests/brotli.rs +350 -0
- data/vendor/wreq/tests/client.rs +1098 -0
- data/vendor/wreq/tests/connector_layers.rs +227 -0
- data/vendor/wreq/tests/cookie.rs +306 -0
- data/vendor/wreq/tests/deflate.rs +347 -0
- data/vendor/wreq/tests/emulation.rs +260 -0
- data/vendor/wreq/tests/gzip.rs +347 -0
- data/vendor/wreq/tests/layers.rs +261 -0
- data/vendor/wreq/tests/multipart.rs +165 -0
- data/vendor/wreq/tests/proxy.rs +438 -0
- data/vendor/wreq/tests/redirect.rs +629 -0
- data/vendor/wreq/tests/retry.rs +135 -0
- data/vendor/wreq/tests/support/delay_server.rs +117 -0
- data/vendor/wreq/tests/support/error.rs +16 -0
- data/vendor/wreq/tests/support/layer.rs +183 -0
- data/vendor/wreq/tests/support/mod.rs +9 -0
- data/vendor/wreq/tests/support/server.rs +232 -0
- data/vendor/wreq/tests/timeouts.rs +281 -0
- data/vendor/wreq/tests/unix_socket.rs +135 -0
- data/vendor/wreq/tests/upgrade.rs +98 -0
- data/vendor/wreq/tests/zstd.rs +559 -0
- metadata +225 -0
|
@@ -0,0 +1,1105 @@
|
|
|
1
|
+
use std::{
|
|
2
|
+
collections::VecDeque,
|
|
3
|
+
convert::Infallible,
|
|
4
|
+
error::Error as StdError,
|
|
5
|
+
fmt::{self, Debug},
|
|
6
|
+
future::Future,
|
|
7
|
+
hash::Hash,
|
|
8
|
+
num::NonZero,
|
|
9
|
+
ops::{Deref, DerefMut},
|
|
10
|
+
pin::Pin,
|
|
11
|
+
sync::{Arc, Weak},
|
|
12
|
+
task::{self, Poll, ready},
|
|
13
|
+
time::{Duration, Instant},
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
use schnellru::ByLength;
|
|
17
|
+
use tokio::sync::oneshot;
|
|
18
|
+
|
|
19
|
+
use super::exec::{self, Exec};
|
|
20
|
+
use crate::{
|
|
21
|
+
client::core::rt::{ArcTimer, Executor, Timer},
|
|
22
|
+
hash::{HASHER, HashMap, HashSet, LruMap},
|
|
23
|
+
sync::Mutex,
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
pub struct Pool<T, K: Key> {
|
|
27
|
+
// If the pool is disabled, this is None.
|
|
28
|
+
inner: Option<Arc<Mutex<PoolInner<T, K>>>>,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Before using a pooled connection, make sure the sender is not dead.
|
|
32
|
+
//
|
|
33
|
+
// This is a trait to allow the `client::pool::tests` to work for `i32`.
|
|
34
|
+
//
|
|
35
|
+
// See https://github.com/hyperium/hyper/issues/1429
|
|
36
|
+
pub trait Poolable: Unpin + Send + Sized + 'static {
|
|
37
|
+
fn is_open(&self) -> bool;
|
|
38
|
+
/// Reserve this connection.
|
|
39
|
+
///
|
|
40
|
+
/// Allows for HTTP/2 to return a shared reservation.
|
|
41
|
+
fn reserve(self) -> Reservation<Self>;
|
|
42
|
+
fn can_share(&self) -> bool;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {}
|
|
46
|
+
|
|
47
|
+
impl<T> Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {}
|
|
48
|
+
|
|
49
|
+
/// A marker to identify what version a pooled connection is.
|
|
50
|
+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
|
51
|
+
#[repr(u8)]
|
|
52
|
+
pub enum Ver {
|
|
53
|
+
Auto,
|
|
54
|
+
Http2,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/// When checking out a pooled connection, it might be that the connection
|
|
58
|
+
/// only supports a single reservation, or it might be usable for many.
|
|
59
|
+
///
|
|
60
|
+
/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be
|
|
61
|
+
/// used for multiple requests.
|
|
62
|
+
// FIXME: allow() required due to `impl Trait` leaking types to this lint
|
|
63
|
+
pub enum Reservation<T> {
|
|
64
|
+
/// This connection could be used multiple times, the first one will be
|
|
65
|
+
/// reinserted into the `idle` pool, and the second will be given to
|
|
66
|
+
/// the `Checkout`.
|
|
67
|
+
Shared(T, T),
|
|
68
|
+
/// This connection requires unique access. It will be returned after
|
|
69
|
+
/// use is complete.
|
|
70
|
+
Unique(T),
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/// Simple type alias in case the key type needs to be adjusted.
|
|
74
|
+
// pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc<String>;
|
|
75
|
+
struct PoolInner<T, K: Eq + Hash> {
|
|
76
|
+
// A flag that a connection is being established, and the connection
|
|
77
|
+
// should be shared. This prevents making multiple HTTP/2 connections
|
|
78
|
+
// to the same host.
|
|
79
|
+
connecting: HashSet<K>,
|
|
80
|
+
// These are internal Conns sitting in the event loop in the KeepAlive
|
|
81
|
+
// state, waiting to receive a new Request to send on the socket.
|
|
82
|
+
idle: LruMap<K, Vec<Idle<T>>>,
|
|
83
|
+
max_idle_per_host: usize,
|
|
84
|
+
// These are outstanding Checkouts that are waiting for a socket to be
|
|
85
|
+
// able to send a Request one. This is used when "racing" for a new
|
|
86
|
+
// connection.
|
|
87
|
+
//
|
|
88
|
+
// The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait
|
|
89
|
+
// for the Pool to receive an idle Conn. When a Conn becomes idle,
|
|
90
|
+
// this list is checked for any parked Checkouts, and tries to notify
|
|
91
|
+
// them that the Conn could be used instead of waiting for a brand new
|
|
92
|
+
// connection.
|
|
93
|
+
waiters: HashMap<K, VecDeque<oneshot::Sender<T>>>,
|
|
94
|
+
// A oneshot channel is used to allow the interval to be notified when
|
|
95
|
+
// the Pool completely drops. That way, the interval can cancel immediately.
|
|
96
|
+
idle_interval_ref: Option<oneshot::Sender<Infallible>>,
|
|
97
|
+
exec: Exec,
|
|
98
|
+
timer: Option<ArcTimer>,
|
|
99
|
+
timeout: Option<Duration>,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// This is because `Weak::new()` *allocates* space for `T`, even if it
|
|
103
|
+
// doesn't need it!
|
|
104
|
+
struct WeakOpt<T>(Option<Weak<T>>);
|
|
105
|
+
|
|
106
|
+
#[derive(Clone, Copy, Debug)]
|
|
107
|
+
pub struct Config {
|
|
108
|
+
pub idle_timeout: Option<Duration>,
|
|
109
|
+
pub max_idle_per_host: usize,
|
|
110
|
+
pub max_pool_size: Option<NonZero<u32>>,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
impl Config {
|
|
114
|
+
pub fn is_enabled(&self) -> bool {
|
|
115
|
+
self.max_idle_per_host > 0
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
impl<T, K: Key> Pool<T, K> {
|
|
120
|
+
pub fn new<E, M>(config: Config, executor: E, timer: Option<M>) -> Pool<T, K>
|
|
121
|
+
where
|
|
122
|
+
E: Executor<exec::BoxSendFuture> + Send + Sync + Clone + 'static,
|
|
123
|
+
M: Timer + Send + Sync + Clone + 'static,
|
|
124
|
+
{
|
|
125
|
+
let inner = if config.is_enabled() {
|
|
126
|
+
Some(Arc::new(Mutex::new(PoolInner {
|
|
127
|
+
connecting: HashSet::with_hasher(HASHER),
|
|
128
|
+
idle: LruMap::with_hasher(
|
|
129
|
+
ByLength::new(config.max_pool_size.map_or(u32::MAX, NonZero::get)),
|
|
130
|
+
HASHER,
|
|
131
|
+
),
|
|
132
|
+
idle_interval_ref: None,
|
|
133
|
+
max_idle_per_host: config.max_idle_per_host,
|
|
134
|
+
waiters: HashMap::with_hasher(HASHER),
|
|
135
|
+
exec: Exec::new(executor),
|
|
136
|
+
timer: timer.map(ArcTimer::new),
|
|
137
|
+
timeout: config.idle_timeout,
|
|
138
|
+
})))
|
|
139
|
+
} else {
|
|
140
|
+
None
|
|
141
|
+
};
|
|
142
|
+
|
|
143
|
+
Pool { inner }
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
pub(crate) fn is_enabled(&self) -> bool {
|
|
147
|
+
self.inner.is_some()
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
impl<T: Poolable, K: Key> Pool<T, K> {
|
|
152
|
+
/// Returns a `Checkout` which is a future that resolves if an idle
|
|
153
|
+
/// connection becomes available.
|
|
154
|
+
pub fn checkout(&self, key: K) -> Checkout<T, K> {
|
|
155
|
+
Checkout {
|
|
156
|
+
key,
|
|
157
|
+
pool: self.clone(),
|
|
158
|
+
waiter: None,
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/// Ensure that there is only ever 1 connecting task for HTTP/2
|
|
163
|
+
/// connections. This does nothing for HTTP/1.
|
|
164
|
+
pub fn connecting(&self, key: K, ver: Ver) -> Option<Connecting<T, K>> {
|
|
165
|
+
if ver == Ver::Http2 {
|
|
166
|
+
if let Some(ref enabled) = self.inner {
|
|
167
|
+
let mut inner = enabled.lock();
|
|
168
|
+
return if inner.connecting.insert(key.clone()) {
|
|
169
|
+
let connecting = Connecting {
|
|
170
|
+
key,
|
|
171
|
+
pool: WeakOpt::downgrade(enabled),
|
|
172
|
+
};
|
|
173
|
+
Some(connecting)
|
|
174
|
+
} else {
|
|
175
|
+
trace!("HTTP/2 connecting already in progress for {:?}", key);
|
|
176
|
+
None
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// else
|
|
182
|
+
Some(Connecting {
|
|
183
|
+
key,
|
|
184
|
+
// in HTTP/1's case, there is never a lock, so we don't
|
|
185
|
+
// need to do anything in Drop.
|
|
186
|
+
pool: WeakOpt::none(),
|
|
187
|
+
})
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
pub fn pooled(&self, mut connecting: Connecting<T, K>, value: T) -> Pooled<T, K> {
|
|
191
|
+
let (value, pool_ref) = if let Some(ref enabled) = self.inner {
|
|
192
|
+
match value.reserve() {
|
|
193
|
+
Reservation::Shared(to_insert, to_return) => {
|
|
194
|
+
let mut inner = enabled.lock();
|
|
195
|
+
inner.put(&connecting.key, to_insert, enabled);
|
|
196
|
+
// Do this here instead of Drop for Connecting because we
|
|
197
|
+
// already have a lock, no need to lock the mutex twice.
|
|
198
|
+
inner.connected(&connecting.key);
|
|
199
|
+
drop(inner);
|
|
200
|
+
// prevent the Drop of Connecting from repeating inner.connected()
|
|
201
|
+
connecting.pool = WeakOpt::none();
|
|
202
|
+
|
|
203
|
+
// Shared reservations don't need a reference to the pool,
|
|
204
|
+
// since the pool always keeps a copy.
|
|
205
|
+
(to_return, WeakOpt::none())
|
|
206
|
+
}
|
|
207
|
+
Reservation::Unique(value) => {
|
|
208
|
+
// Unique reservations must take a reference to the pool
|
|
209
|
+
// since they hope to reinsert once the reservation is
|
|
210
|
+
// completed
|
|
211
|
+
(value, WeakOpt::downgrade(enabled))
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
} else {
|
|
215
|
+
// If pool is not enabled, skip all the things...
|
|
216
|
+
|
|
217
|
+
// The Connecting should have had no pool ref
|
|
218
|
+
debug_assert!(connecting.pool.upgrade().is_none());
|
|
219
|
+
|
|
220
|
+
(value, WeakOpt::none())
|
|
221
|
+
};
|
|
222
|
+
|
|
223
|
+
Pooled {
|
|
224
|
+
key: connecting.key.clone(),
|
|
225
|
+
is_reused: false,
|
|
226
|
+
pool: pool_ref,
|
|
227
|
+
value: Some(value),
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
fn reuse(&self, key: &K, value: T) -> Pooled<T, K> {
|
|
232
|
+
debug!("reuse idle connection for {:?}", key);
|
|
233
|
+
// TODO: unhack this
|
|
234
|
+
// In Pool::pooled(), which is used for inserting brand new connections,
|
|
235
|
+
// there's some code that adjusts the pool reference taken depending
|
|
236
|
+
// on if the Reservation can be shared or is unique. By the time
|
|
237
|
+
// reuse() is called, the reservation has already been made, and
|
|
238
|
+
// we just have the final value, without knowledge of if this is
|
|
239
|
+
// unique or shared. So, the hack is to just assume Ver::Http2 means
|
|
240
|
+
// shared... :(
|
|
241
|
+
let mut pool_ref = WeakOpt::none();
|
|
242
|
+
if !value.can_share() {
|
|
243
|
+
if let Some(ref enabled) = self.inner {
|
|
244
|
+
pool_ref = WeakOpt::downgrade(enabled);
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
Pooled {
|
|
249
|
+
is_reused: true,
|
|
250
|
+
key: key.clone(),
|
|
251
|
+
pool: pool_ref,
|
|
252
|
+
value: Some(value),
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
/// Pop off this list, looking for a usable connection that hasn't expired.
|
|
258
|
+
struct IdlePopper<'a, T, K> {
|
|
259
|
+
#[allow(dead_code)]
|
|
260
|
+
key: &'a K,
|
|
261
|
+
list: &'a mut Vec<Idle<T>>,
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> {
|
|
265
|
+
fn pop(self, expiration: &Expiration) -> Option<Idle<T>> {
|
|
266
|
+
while let Some(entry) = self.list.pop() {
|
|
267
|
+
// If the connection has been closed, or is older than our idle
|
|
268
|
+
// timeout, simply drop it and keep looking...
|
|
269
|
+
if !entry.value.is_open() {
|
|
270
|
+
trace!("removing closed connection for {:?}", self.key);
|
|
271
|
+
continue;
|
|
272
|
+
}
|
|
273
|
+
// TODO: Actually, since the `idle` list is pushed to the end always,
|
|
274
|
+
// that would imply that if *this* entry is expired, then anything
|
|
275
|
+
// "earlier" in the list would *have* to be expired also... Right?
|
|
276
|
+
//
|
|
277
|
+
// In that case, we could just break out of the loop and drop the
|
|
278
|
+
// whole list...
|
|
279
|
+
if expiration.expires(entry.idle_at) {
|
|
280
|
+
trace!("removing expired connection for {:?}", self.key);
|
|
281
|
+
continue;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
let value = match entry.value.reserve() {
|
|
285
|
+
Reservation::Shared(to_reinsert, to_checkout) => {
|
|
286
|
+
self.list.push(Idle {
|
|
287
|
+
idle_at: Instant::now(),
|
|
288
|
+
value: to_reinsert,
|
|
289
|
+
});
|
|
290
|
+
to_checkout
|
|
291
|
+
}
|
|
292
|
+
Reservation::Unique(unique) => unique,
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
return Some(Idle {
|
|
296
|
+
idle_at: entry.idle_at,
|
|
297
|
+
value,
|
|
298
|
+
});
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
None
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
impl<T: Poolable, K: Key> PoolInner<T, K> {
|
|
306
|
+
fn put(&mut self, key: &K, value: T, __pool_ref: &Arc<Mutex<PoolInner<T, K>>>) {
|
|
307
|
+
if value.can_share() && self.idle.peek(key).is_some() {
|
|
308
|
+
trace!("put; existing idle HTTP/2 connection for {:?}", key);
|
|
309
|
+
return;
|
|
310
|
+
}
|
|
311
|
+
trace!("put; add idle connection for {:?}", key);
|
|
312
|
+
let mut remove_waiters = false;
|
|
313
|
+
let mut value = Some(value);
|
|
314
|
+
if let Some(waiters) = self.waiters.get_mut(key) {
|
|
315
|
+
while let Some(tx) = waiters.pop_front() {
|
|
316
|
+
if !tx.is_closed() {
|
|
317
|
+
let reserved = value.take().expect("value already sent");
|
|
318
|
+
let reserved = match reserved.reserve() {
|
|
319
|
+
Reservation::Shared(to_keep, to_send) => {
|
|
320
|
+
value = Some(to_keep);
|
|
321
|
+
to_send
|
|
322
|
+
}
|
|
323
|
+
Reservation::Unique(uniq) => uniq,
|
|
324
|
+
};
|
|
325
|
+
match tx.send(reserved) {
|
|
326
|
+
Ok(()) => {
|
|
327
|
+
if value.is_none() {
|
|
328
|
+
break;
|
|
329
|
+
} else {
|
|
330
|
+
continue;
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
Err(e) => {
|
|
334
|
+
value = Some(e);
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
trace!("put; removing canceled waiter for {:?}", key);
|
|
340
|
+
}
|
|
341
|
+
remove_waiters = waiters.is_empty();
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
if remove_waiters {
|
|
345
|
+
self.waiters.remove(key);
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
if let Some(value) = value {
|
|
349
|
+
// borrow-check scope...
|
|
350
|
+
{
|
|
351
|
+
let idle_list = self
|
|
352
|
+
.idle
|
|
353
|
+
.get_or_insert(key.clone(), Vec::<Idle<T>>::default);
|
|
354
|
+
|
|
355
|
+
if let Some(idle_list) = idle_list {
|
|
356
|
+
if self.max_idle_per_host <= idle_list.len() {
|
|
357
|
+
trace!("max idle per host for {:?}, dropping connection", key);
|
|
358
|
+
return;
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
debug!("pooling idle connection for {:?}", key);
|
|
362
|
+
idle_list.push(Idle {
|
|
363
|
+
value,
|
|
364
|
+
idle_at: Instant::now(),
|
|
365
|
+
});
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
self.spawn_idle_interval(__pool_ref);
|
|
370
|
+
} else {
|
|
371
|
+
trace!("put; found waiter for {:?}", key)
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
/// A `Connecting` task is complete. Not necessarily successfully,
|
|
376
|
+
/// but the lock is going away, so clean up.
|
|
377
|
+
fn connected(&mut self, key: &K) {
|
|
378
|
+
let existed = self.connecting.remove(key);
|
|
379
|
+
debug_assert!(existed, "Connecting dropped, key not in pool.connecting");
|
|
380
|
+
// cancel any waiters. if there are any, it's because
|
|
381
|
+
// this Connecting task didn't complete successfully.
|
|
382
|
+
// those waiters would never receive a connection.
|
|
383
|
+
self.waiters.remove(key);
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
fn spawn_idle_interval(&mut self, pool_ref: &Arc<Mutex<PoolInner<T, K>>>) {
|
|
387
|
+
if self.idle_interval_ref.is_some() {
|
|
388
|
+
return;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
let dur = if let Some(dur) = self.timeout {
|
|
392
|
+
dur
|
|
393
|
+
} else {
|
|
394
|
+
return;
|
|
395
|
+
};
|
|
396
|
+
|
|
397
|
+
if dur == Duration::ZERO {
|
|
398
|
+
return;
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
let timer = if let Some(timer) = self.timer.clone() {
|
|
402
|
+
timer
|
|
403
|
+
} else {
|
|
404
|
+
return;
|
|
405
|
+
};
|
|
406
|
+
|
|
407
|
+
// While someone might want a shorter duration, and it will be respected
|
|
408
|
+
// at checkout time, there's no need to wake up and proactively evict
|
|
409
|
+
// faster than this.
|
|
410
|
+
//
|
|
411
|
+
// The value of 90ms was chosen as a balance between responsiveness and
|
|
412
|
+
// efficiency. A shorter interval could lead to unnecessary wake-ups and
|
|
413
|
+
// increased CPU usage, while a longer interval might delay the eviction
|
|
414
|
+
// of idle connections. This value has been empirically determined to
|
|
415
|
+
// work well in typical use cases.
|
|
416
|
+
const MIN_CHECK: Duration = Duration::from_millis(90);
|
|
417
|
+
|
|
418
|
+
let dur = dur.max(MIN_CHECK);
|
|
419
|
+
|
|
420
|
+
let (tx, rx) = oneshot::channel();
|
|
421
|
+
self.idle_interval_ref = Some(tx);
|
|
422
|
+
|
|
423
|
+
let interval = IdleTask {
|
|
424
|
+
timer: timer.clone(),
|
|
425
|
+
duration: dur,
|
|
426
|
+
pool: WeakOpt::downgrade(pool_ref),
|
|
427
|
+
pool_drop_notifier: rx,
|
|
428
|
+
};
|
|
429
|
+
|
|
430
|
+
self.exec.execute(interval.run());
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
impl<T, K: Eq + Hash> PoolInner<T, K> {
|
|
435
|
+
/// Any `FutureResponse`s that were created will have made a `Checkout`,
|
|
436
|
+
/// and possibly inserted into the pool that it is waiting for an idle
|
|
437
|
+
/// connection. If a user ever dropped that future, we need to clean out
|
|
438
|
+
/// those parked senders.
|
|
439
|
+
fn clean_waiters(&mut self, key: &K) {
|
|
440
|
+
let mut remove_waiters = false;
|
|
441
|
+
if let Some(waiters) = self.waiters.get_mut(key) {
|
|
442
|
+
waiters.retain(|tx| !tx.is_closed());
|
|
443
|
+
remove_waiters = waiters.is_empty();
|
|
444
|
+
}
|
|
445
|
+
if remove_waiters {
|
|
446
|
+
self.waiters.remove(key);
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
impl<T: Poolable, K: Key> PoolInner<T, K> {
|
|
452
|
+
/// This should *only* be called by the IdleTask
|
|
453
|
+
fn clear_expired(&mut self) {
|
|
454
|
+
let dur = self.timeout.expect("interval assumes timeout");
|
|
455
|
+
let now = Instant::now();
|
|
456
|
+
|
|
457
|
+
let mut keys_to_remove = Vec::new();
|
|
458
|
+
for (key, values) in self.idle.iter_mut() {
|
|
459
|
+
values.retain(|entry| {
|
|
460
|
+
if !entry.value.is_open() {
|
|
461
|
+
trace!("idle interval evicting closed for {:?}", key);
|
|
462
|
+
return false;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
// Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470.
|
|
466
|
+
if now.saturating_duration_since(entry.idle_at) > dur {
|
|
467
|
+
trace!("idle interval evicting expired for {:?}", key);
|
|
468
|
+
return false;
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
// Otherwise, keep this value...
|
|
472
|
+
true
|
|
473
|
+
});
|
|
474
|
+
|
|
475
|
+
// If the list is empty, remove the key.
|
|
476
|
+
if values.is_empty() {
|
|
477
|
+
keys_to_remove.push(key.clone());
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
for key in keys_to_remove {
|
|
482
|
+
trace!("idle interval removing empty key {:?}", key);
|
|
483
|
+
self.idle.remove(&key);
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
impl<T, K: Key> Clone for Pool<T, K> {
|
|
489
|
+
fn clone(&self) -> Pool<T, K> {
|
|
490
|
+
Pool {
|
|
491
|
+
inner: self.inner.clone(),
|
|
492
|
+
}
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
/// A wrapped poolable value that tries to reinsert to the Pool on Drop.
|
|
497
|
+
// Note: The bounds `T: Poolable` is needed for the Drop impl.
|
|
498
|
+
pub struct Pooled<T: Poolable, K: Key> {
|
|
499
|
+
value: Option<T>,
|
|
500
|
+
is_reused: bool,
|
|
501
|
+
key: K,
|
|
502
|
+
pool: WeakOpt<Mutex<PoolInner<T, K>>>,
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
impl<T: Poolable, K: Key> Pooled<T, K> {
|
|
506
|
+
pub fn is_reused(&self) -> bool {
|
|
507
|
+
self.is_reused
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
pub fn is_pool_enabled(&self) -> bool {
|
|
511
|
+
self.pool.0.is_some()
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
fn as_ref(&self) -> &T {
|
|
515
|
+
self.value.as_ref().expect("not dropped")
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
fn as_mut(&mut self) -> &mut T {
|
|
519
|
+
self.value.as_mut().expect("not dropped")
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
impl<T: Poolable, K: Key> Deref for Pooled<T, K> {
|
|
524
|
+
type Target = T;
|
|
525
|
+
fn deref(&self) -> &T {
|
|
526
|
+
self.as_ref()
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
impl<T: Poolable, K: Key> DerefMut for Pooled<T, K> {
|
|
531
|
+
fn deref_mut(&mut self) -> &mut T {
|
|
532
|
+
self.as_mut()
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
impl<T: Poolable, K: Key> Drop for Pooled<T, K> {
|
|
537
|
+
fn drop(&mut self) {
|
|
538
|
+
if let Some(value) = self.value.take() {
|
|
539
|
+
if !value.is_open() {
|
|
540
|
+
// If we *already* know the connection is done here,
|
|
541
|
+
// it shouldn't be re-inserted back into the pool.
|
|
542
|
+
return;
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
if let Some(pool) = self.pool.upgrade() {
|
|
546
|
+
let mut inner = pool.lock();
|
|
547
|
+
inner.put(&self.key, value, &pool);
|
|
548
|
+
} else if !value.can_share() {
|
|
549
|
+
trace!("pool dropped, dropping pooled ({:?})", self.key);
|
|
550
|
+
}
|
|
551
|
+
// Ver::Http2 is already in the Pool (or dead), so we wouldn't
|
|
552
|
+
// have an actual reference to the Pool.
|
|
553
|
+
}
|
|
554
|
+
}
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
impl<T: Poolable, K: Key> Debug for Pooled<T, K> {
|
|
558
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
559
|
+
f.debug_struct("Pooled").field("key", &self.key).finish()
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
struct Idle<T> {
|
|
564
|
+
idle_at: Instant,
|
|
565
|
+
value: T,
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
pub struct Checkout<T, K: Key> {
|
|
569
|
+
key: K,
|
|
570
|
+
pool: Pool<T, K>,
|
|
571
|
+
waiter: Option<oneshot::Receiver<T>>,
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
#[derive(Debug)]
|
|
575
|
+
#[non_exhaustive]
|
|
576
|
+
pub enum Error {
|
|
577
|
+
PoolDisabled,
|
|
578
|
+
CheckoutNoLongerWanted,
|
|
579
|
+
CheckedOutClosedValue,
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
impl Error {
|
|
583
|
+
pub(super) fn is_canceled(&self) -> bool {
|
|
584
|
+
matches!(self, Error::CheckedOutClosedValue)
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
impl fmt::Display for Error {
|
|
589
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
590
|
+
f.write_str(match self {
|
|
591
|
+
Error::PoolDisabled => "pool is disabled",
|
|
592
|
+
Error::CheckedOutClosedValue => "checked out connection was closed",
|
|
593
|
+
Error::CheckoutNoLongerWanted => "request was canceled",
|
|
594
|
+
})
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
impl StdError for Error {}
|
|
599
|
+
|
|
600
|
+
impl<T: Poolable, K: Key> Checkout<T, K> {
|
|
601
|
+
fn poll_waiter(
|
|
602
|
+
&mut self,
|
|
603
|
+
cx: &mut task::Context<'_>,
|
|
604
|
+
) -> Poll<Option<Result<Pooled<T, K>, Error>>> {
|
|
605
|
+
if let Some(mut rx) = self.waiter.take() {
|
|
606
|
+
match Pin::new(&mut rx).poll(cx) {
|
|
607
|
+
Poll::Ready(Ok(value)) => {
|
|
608
|
+
if value.is_open() {
|
|
609
|
+
Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value))))
|
|
610
|
+
} else {
|
|
611
|
+
Poll::Ready(Some(Err(Error::CheckedOutClosedValue)))
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
Poll::Pending => {
|
|
615
|
+
self.waiter = Some(rx);
|
|
616
|
+
Poll::Pending
|
|
617
|
+
}
|
|
618
|
+
Poll::Ready(Err(_canceled)) => {
|
|
619
|
+
Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted)))
|
|
620
|
+
}
|
|
621
|
+
}
|
|
622
|
+
} else {
|
|
623
|
+
Poll::Ready(None)
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option<Pooled<T, K>> {
|
|
628
|
+
let entry = {
|
|
629
|
+
let mut inner = self.pool.inner.as_ref()?.lock();
|
|
630
|
+
let expiration = Expiration::new(inner.timeout);
|
|
631
|
+
let maybe_entry = inner.idle.get(&self.key).and_then(|list| {
|
|
632
|
+
trace!("take? {:?}: expiration = {:?}", self.key, expiration.0);
|
|
633
|
+
// A block to end the mutable borrow on list,
|
|
634
|
+
// so the map below can check is_empty()
|
|
635
|
+
{
|
|
636
|
+
let popper = IdlePopper {
|
|
637
|
+
key: &self.key,
|
|
638
|
+
list,
|
|
639
|
+
};
|
|
640
|
+
popper.pop(&expiration)
|
|
641
|
+
}
|
|
642
|
+
.map(|e| (e, list.is_empty()))
|
|
643
|
+
});
|
|
644
|
+
|
|
645
|
+
let (entry, empty) = if let Some((e, empty)) = maybe_entry {
|
|
646
|
+
(Some(e), empty)
|
|
647
|
+
} else {
|
|
648
|
+
// No entry found means nuke the list for sure.
|
|
649
|
+
(None, true)
|
|
650
|
+
};
|
|
651
|
+
|
|
652
|
+
if empty {
|
|
653
|
+
inner.idle.remove(&self.key);
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
if entry.is_none() && self.waiter.is_none() {
|
|
657
|
+
let (tx, mut rx) = oneshot::channel();
|
|
658
|
+
trace!("checkout waiting for idle connection: {:?}", self.key);
|
|
659
|
+
inner
|
|
660
|
+
.waiters
|
|
661
|
+
.entry(self.key.clone())
|
|
662
|
+
.or_insert_with(VecDeque::new)
|
|
663
|
+
.push_back(tx);
|
|
664
|
+
|
|
665
|
+
// register the waker with this oneshot
|
|
666
|
+
assert!(Pin::new(&mut rx).poll(cx).is_pending());
|
|
667
|
+
self.waiter = Some(rx);
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
entry
|
|
671
|
+
};
|
|
672
|
+
|
|
673
|
+
entry.map(|e| self.pool.reuse(&self.key, e.value))
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
impl<T: Poolable, K: Key> Future for Checkout<T, K> {
|
|
678
|
+
type Output = Result<Pooled<T, K>, Error>;
|
|
679
|
+
|
|
680
|
+
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
|
|
681
|
+
if let Some(pooled) = ready!(self.poll_waiter(cx)?) {
|
|
682
|
+
return Poll::Ready(Ok(pooled));
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
if let Some(pooled) = self.checkout(cx) {
|
|
686
|
+
Poll::Ready(Ok(pooled))
|
|
687
|
+
} else if !self.pool.is_enabled() {
|
|
688
|
+
Poll::Ready(Err(Error::PoolDisabled))
|
|
689
|
+
} else {
|
|
690
|
+
// There's a new waiter, already registered in self.checkout()
|
|
691
|
+
debug_assert!(self.waiter.is_some());
|
|
692
|
+
Poll::Pending
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
impl<T, K: Key> Drop for Checkout<T, K> {
|
|
698
|
+
fn drop(&mut self) {
|
|
699
|
+
if self.waiter.take().is_some() {
|
|
700
|
+
trace!("checkout dropped for {:?}", self.key);
|
|
701
|
+
if let Some(mut inner) = self.pool.inner.as_ref().map(|i| i.lock()) {
|
|
702
|
+
inner.clean_waiters(&self.key);
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
}
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
pub struct Connecting<T: Poolable, K: Key> {
|
|
709
|
+
key: K,
|
|
710
|
+
pool: WeakOpt<Mutex<PoolInner<T, K>>>,
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
impl<T: Poolable, K: Key> Connecting<T, K> {
|
|
714
|
+
pub fn alpn_h2(self, pool: &Pool<T, K>) -> Option<Self> {
|
|
715
|
+
debug_assert!(
|
|
716
|
+
self.pool.0.is_none(),
|
|
717
|
+
"Connecting::alpn_h2 but already Http2"
|
|
718
|
+
);
|
|
719
|
+
|
|
720
|
+
pool.connecting(self.key.clone(), Ver::Http2)
|
|
721
|
+
}
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
impl<T: Poolable, K: Key> Drop for Connecting<T, K> {
|
|
725
|
+
fn drop(&mut self) {
|
|
726
|
+
if let Some(pool) = self.pool.upgrade() {
|
|
727
|
+
// No need to panic on drop, that could abort!
|
|
728
|
+
let mut inner = pool.lock();
|
|
729
|
+
inner.connected(&self.key);
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
struct Expiration(Option<Duration>);
|
|
735
|
+
|
|
736
|
+
impl Expiration {
|
|
737
|
+
fn new(dur: Option<Duration>) -> Expiration {
|
|
738
|
+
Expiration(dur)
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
fn expires(&self, instant: Instant) -> bool {
|
|
742
|
+
match self.0 {
|
|
743
|
+
// Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470.
|
|
744
|
+
Some(timeout) => Instant::now().saturating_duration_since(instant) > timeout,
|
|
745
|
+
None => false,
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
struct IdleTask<T, K: Key> {
|
|
751
|
+
timer: ArcTimer,
|
|
752
|
+
duration: Duration,
|
|
753
|
+
pool: WeakOpt<Mutex<PoolInner<T, K>>>,
|
|
754
|
+
// This allows the IdleTask to be notified as soon as the entire
|
|
755
|
+
// Pool is fully dropped, and shutdown. This channel is never sent on,
|
|
756
|
+
// but Err(Canceled) will be received when the Pool is dropped.
|
|
757
|
+
pool_drop_notifier: oneshot::Receiver<Infallible>,
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
impl<T: Poolable + 'static, K: Key> IdleTask<T, K> {
|
|
761
|
+
async fn run(self) {
|
|
762
|
+
use futures_util::future;
|
|
763
|
+
|
|
764
|
+
let mut sleep = self.timer.sleep_until(Instant::now() + self.duration);
|
|
765
|
+
let mut on_pool_drop = self.pool_drop_notifier;
|
|
766
|
+
loop {
|
|
767
|
+
match future::select(&mut on_pool_drop, &mut sleep).await {
|
|
768
|
+
future::Either::Left(_) => {
|
|
769
|
+
// pool dropped, bah-bye
|
|
770
|
+
break;
|
|
771
|
+
}
|
|
772
|
+
future::Either::Right(((), _)) => {
|
|
773
|
+
if let Some(inner) = self.pool.upgrade() {
|
|
774
|
+
let mut inner = inner.lock();
|
|
775
|
+
trace!("idle interval checking for expired");
|
|
776
|
+
inner.clear_expired();
|
|
777
|
+
drop(inner);
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
let deadline = Instant::now() + self.duration;
|
|
781
|
+
self.timer.reset(&mut sleep, deadline);
|
|
782
|
+
}
|
|
783
|
+
}
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
trace!("pool closed, canceling idle interval");
|
|
787
|
+
}
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
impl<T> WeakOpt<T> {
|
|
791
|
+
fn none() -> Self {
|
|
792
|
+
WeakOpt(None)
|
|
793
|
+
}
|
|
794
|
+
|
|
795
|
+
fn downgrade(arc: &Arc<T>) -> Self {
|
|
796
|
+
WeakOpt(Some(Arc::downgrade(arc)))
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
fn upgrade(&self) -> Option<Arc<T>> {
|
|
800
|
+
self.0.as_ref().and_then(Weak::upgrade)
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
#[cfg(test)]
|
|
805
|
+
mod tests {
|
|
806
|
+
use std::{
|
|
807
|
+
fmt::Debug,
|
|
808
|
+
future::Future,
|
|
809
|
+
hash::Hash,
|
|
810
|
+
num::NonZero,
|
|
811
|
+
pin::Pin,
|
|
812
|
+
task::{self, Poll},
|
|
813
|
+
time::Duration,
|
|
814
|
+
};
|
|
815
|
+
|
|
816
|
+
use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt};
|
|
817
|
+
use crate::{
|
|
818
|
+
client::core::rt::{ArcTimer, TokioExecutor, TokioTimer},
|
|
819
|
+
sync::MutexGuard,
|
|
820
|
+
};
|
|
821
|
+
|
|
822
|
+
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
|
823
|
+
struct KeyImpl(http::uri::Scheme, http::uri::Authority);
|
|
824
|
+
|
|
825
|
+
/// Test unique reservations.
|
|
826
|
+
#[derive(Debug, PartialEq, Eq)]
|
|
827
|
+
struct Uniq<T>(T);
|
|
828
|
+
|
|
829
|
+
impl<T: Send + 'static + Unpin> Poolable for Uniq<T> {
|
|
830
|
+
fn is_open(&self) -> bool {
|
|
831
|
+
true
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
fn reserve(self) -> Reservation<Self> {
|
|
835
|
+
Reservation::Unique(self)
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
fn can_share(&self) -> bool {
|
|
839
|
+
false
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
fn c<T: Poolable, K: Key>(key: K) -> Connecting<T, K> {
|
|
844
|
+
Connecting {
|
|
845
|
+
key,
|
|
846
|
+
pool: WeakOpt::none(),
|
|
847
|
+
}
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
fn host_key(s: &str) -> KeyImpl {
|
|
851
|
+
KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key"))
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
fn pool_no_timer<T, K: Key>() -> Pool<T, K> {
|
|
855
|
+
pool_max_idle_no_timer(usize::MAX)
|
|
856
|
+
}
|
|
857
|
+
|
|
858
|
+
fn pool_max_idle_no_timer<T, K: Key>(max_idle: usize) -> Pool<T, K> {
|
|
859
|
+
Pool::new(
|
|
860
|
+
super::Config {
|
|
861
|
+
idle_timeout: Some(Duration::from_millis(100)),
|
|
862
|
+
max_idle_per_host: max_idle,
|
|
863
|
+
max_pool_size: None,
|
|
864
|
+
},
|
|
865
|
+
TokioExecutor::new(),
|
|
866
|
+
Option::<ArcTimer>::None,
|
|
867
|
+
)
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
impl<T: Poolable, K: Key> Pool<T, K> {
|
|
871
|
+
fn locked(&self) -> MutexGuard<'_, super::PoolInner<T, K>> {
|
|
872
|
+
self.inner.as_ref().expect("enabled").lock()
|
|
873
|
+
}
|
|
874
|
+
}
|
|
875
|
+
|
|
876
|
+
#[tokio::test]
|
|
877
|
+
async fn test_pool_checkout_smoke() {
|
|
878
|
+
let pool = pool_no_timer();
|
|
879
|
+
let key = host_key("foo");
|
|
880
|
+
let pooled = pool.pooled(c(key.clone()), Uniq(41));
|
|
881
|
+
|
|
882
|
+
drop(pooled);
|
|
883
|
+
|
|
884
|
+
match pool.checkout(key).await {
|
|
885
|
+
Ok(pooled) => assert_eq!(*pooled, Uniq(41)),
|
|
886
|
+
Err(_) => panic!("not ready"),
|
|
887
|
+
};
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
/// Helper to check if the future is ready after polling once.
|
|
891
|
+
struct PollOnce<'a, F>(&'a mut F);
|
|
892
|
+
|
|
893
|
+
impl<F, T, U> Future for PollOnce<'_, F>
|
|
894
|
+
where
|
|
895
|
+
F: Future<Output = Result<T, U>> + Unpin,
|
|
896
|
+
{
|
|
897
|
+
type Output = Option<()>;
|
|
898
|
+
|
|
899
|
+
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
|
|
900
|
+
match Pin::new(&mut self.0).poll(cx) {
|
|
901
|
+
Poll::Ready(Ok(_)) => Poll::Ready(Some(())),
|
|
902
|
+
Poll::Ready(Err(_)) => Poll::Ready(Some(())),
|
|
903
|
+
Poll::Pending => Poll::Ready(None),
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
#[tokio::test]
|
|
909
|
+
async fn test_pool_checkout_returns_none_if_expired() {
|
|
910
|
+
let pool = pool_no_timer();
|
|
911
|
+
let key = host_key("foo");
|
|
912
|
+
let pooled = pool.pooled(c(key.clone()), Uniq(41));
|
|
913
|
+
|
|
914
|
+
drop(pooled);
|
|
915
|
+
let timeout = pool.locked().timeout.unwrap();
|
|
916
|
+
tokio::time::sleep(timeout).await;
|
|
917
|
+
let mut checkout = pool.checkout(key);
|
|
918
|
+
let poll_once = PollOnce(&mut checkout);
|
|
919
|
+
let is_not_ready = poll_once.await.is_none();
|
|
920
|
+
assert!(is_not_ready);
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
#[tokio::test]
|
|
924
|
+
async fn test_pool_checkout_removes_expired() {
|
|
925
|
+
let pool = pool_no_timer();
|
|
926
|
+
let key = host_key("foo");
|
|
927
|
+
|
|
928
|
+
pool.pooled(c(key.clone()), Uniq(41));
|
|
929
|
+
pool.pooled(c(key.clone()), Uniq(5));
|
|
930
|
+
pool.pooled(c(key.clone()), Uniq(99));
|
|
931
|
+
|
|
932
|
+
assert_eq!(
|
|
933
|
+
pool.locked().idle.get(&key).map(|entries| entries.len()),
|
|
934
|
+
Some(3)
|
|
935
|
+
);
|
|
936
|
+
let timeout = pool.locked().timeout.unwrap();
|
|
937
|
+
tokio::time::sleep(timeout).await;
|
|
938
|
+
|
|
939
|
+
let mut checkout = pool.checkout(key.clone());
|
|
940
|
+
let poll_once = PollOnce(&mut checkout);
|
|
941
|
+
// checkout.await should clean out the expired
|
|
942
|
+
poll_once.await;
|
|
943
|
+
assert!(pool.locked().idle.get(&key).is_none());
|
|
944
|
+
}
|
|
945
|
+
|
|
946
|
+
#[test]
|
|
947
|
+
fn test_pool_max_idle_per_host() {
|
|
948
|
+
let pool = pool_max_idle_no_timer(2);
|
|
949
|
+
let key = host_key("foo");
|
|
950
|
+
|
|
951
|
+
pool.pooled(c(key.clone()), Uniq(41));
|
|
952
|
+
pool.pooled(c(key.clone()), Uniq(5));
|
|
953
|
+
pool.pooled(c(key.clone()), Uniq(99));
|
|
954
|
+
|
|
955
|
+
// pooled and dropped 3, max_idle should only allow 2
|
|
956
|
+
assert_eq!(
|
|
957
|
+
pool.locked().idle.get(&key).map(|entries| entries.len()),
|
|
958
|
+
Some(2)
|
|
959
|
+
);
|
|
960
|
+
}
|
|
961
|
+
|
|
962
|
+
#[tokio::test]
|
|
963
|
+
async fn test_pool_timer_removes_expired() {
|
|
964
|
+
let pool = Pool::new(
|
|
965
|
+
super::Config {
|
|
966
|
+
idle_timeout: Some(Duration::from_millis(10)),
|
|
967
|
+
max_idle_per_host: usize::MAX,
|
|
968
|
+
max_pool_size: None,
|
|
969
|
+
},
|
|
970
|
+
TokioExecutor::new(),
|
|
971
|
+
Some(TokioTimer::new()),
|
|
972
|
+
);
|
|
973
|
+
|
|
974
|
+
let key = host_key("foo");
|
|
975
|
+
|
|
976
|
+
pool.pooled(c(key.clone()), Uniq(41));
|
|
977
|
+
pool.pooled(c(key.clone()), Uniq(5));
|
|
978
|
+
pool.pooled(c(key.clone()), Uniq(99));
|
|
979
|
+
|
|
980
|
+
assert_eq!(
|
|
981
|
+
pool.locked().idle.get(&key).map(|entries| entries.len()),
|
|
982
|
+
Some(3)
|
|
983
|
+
);
|
|
984
|
+
|
|
985
|
+
// Let the timer tick passed the expiration...
|
|
986
|
+
tokio::time::sleep(Duration::from_millis(30)).await;
|
|
987
|
+
|
|
988
|
+
// But minimum interval is higher, so nothing should have been reaped
|
|
989
|
+
assert_eq!(
|
|
990
|
+
pool.locked().idle.get(&key).map(|entries| entries.len()),
|
|
991
|
+
Some(3)
|
|
992
|
+
);
|
|
993
|
+
|
|
994
|
+
// Now wait passed the minimum interval more
|
|
995
|
+
tokio::time::sleep(Duration::from_millis(70)).await;
|
|
996
|
+
|
|
997
|
+
assert!(pool.locked().idle.get(&key).is_none());
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
#[tokio::test]
|
|
1001
|
+
async fn test_pool_checkout_task_unparked() {
|
|
1002
|
+
use futures_util::{FutureExt, future::join};
|
|
1003
|
+
|
|
1004
|
+
let pool = pool_no_timer();
|
|
1005
|
+
let key = host_key("foo");
|
|
1006
|
+
let pooled = pool.pooled(c(key.clone()), Uniq(41));
|
|
1007
|
+
|
|
1008
|
+
let checkout = join(pool.checkout(key), async {
|
|
1009
|
+
// the checkout future will park first,
|
|
1010
|
+
// and then this lazy future will be polled, which will insert
|
|
1011
|
+
// the pooled back into the pool
|
|
1012
|
+
//
|
|
1013
|
+
// this test makes sure that doing so will unpark the checkout
|
|
1014
|
+
drop(pooled);
|
|
1015
|
+
})
|
|
1016
|
+
.map(|(entry, _)| entry);
|
|
1017
|
+
|
|
1018
|
+
assert_eq!(*checkout.await.unwrap(), Uniq(41));
|
|
1019
|
+
}
|
|
1020
|
+
|
|
1021
|
+
#[tokio::test]
|
|
1022
|
+
async fn test_pool_checkout_drop_cleans_up_waiters() {
|
|
1023
|
+
let pool = pool_no_timer::<Uniq<i32>, KeyImpl>();
|
|
1024
|
+
let key = host_key("foo");
|
|
1025
|
+
|
|
1026
|
+
let mut checkout1 = pool.checkout(key.clone());
|
|
1027
|
+
let mut checkout2 = pool.checkout(key.clone());
|
|
1028
|
+
|
|
1029
|
+
let poll_once1 = PollOnce(&mut checkout1);
|
|
1030
|
+
let poll_once2 = PollOnce(&mut checkout2);
|
|
1031
|
+
|
|
1032
|
+
// first poll needed to get into Pool's parked
|
|
1033
|
+
poll_once1.await;
|
|
1034
|
+
assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
|
|
1035
|
+
poll_once2.await;
|
|
1036
|
+
assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2);
|
|
1037
|
+
|
|
1038
|
+
// on drop, clean up Pool
|
|
1039
|
+
drop(checkout1);
|
|
1040
|
+
assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
|
|
1041
|
+
|
|
1042
|
+
drop(checkout2);
|
|
1043
|
+
assert!(!pool.locked().waiters.contains_key(&key));
|
|
1044
|
+
}
|
|
1045
|
+
|
|
1046
|
+
#[derive(Debug)]
|
|
1047
|
+
struct CanClose {
|
|
1048
|
+
#[allow(unused)]
|
|
1049
|
+
val: i32,
|
|
1050
|
+
closed: bool,
|
|
1051
|
+
}
|
|
1052
|
+
|
|
1053
|
+
impl Poolable for CanClose {
|
|
1054
|
+
fn is_open(&self) -> bool {
|
|
1055
|
+
!self.closed
|
|
1056
|
+
}
|
|
1057
|
+
|
|
1058
|
+
fn reserve(self) -> Reservation<Self> {
|
|
1059
|
+
Reservation::Unique(self)
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
fn can_share(&self) -> bool {
|
|
1063
|
+
false
|
|
1064
|
+
}
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
#[test]
|
|
1068
|
+
fn pooled_drop_if_closed_doesnt_reinsert() {
|
|
1069
|
+
let pool = pool_no_timer();
|
|
1070
|
+
let key = host_key("foo");
|
|
1071
|
+
pool.pooled(
|
|
1072
|
+
c(key.clone()),
|
|
1073
|
+
CanClose {
|
|
1074
|
+
val: 57,
|
|
1075
|
+
closed: true,
|
|
1076
|
+
},
|
|
1077
|
+
);
|
|
1078
|
+
|
|
1079
|
+
assert!(pool.locked().idle.get(&key).is_none());
|
|
1080
|
+
}
|
|
1081
|
+
|
|
1082
|
+
#[tokio::test]
|
|
1083
|
+
async fn test_pool_size_limit() {
|
|
1084
|
+
let pool = Pool::new(
|
|
1085
|
+
super::Config {
|
|
1086
|
+
idle_timeout: Some(Duration::from_millis(100)),
|
|
1087
|
+
max_idle_per_host: usize::MAX,
|
|
1088
|
+
max_pool_size: Some(NonZero::new(2).expect("max pool size")),
|
|
1089
|
+
},
|
|
1090
|
+
TokioExecutor::new(),
|
|
1091
|
+
Option::<ArcTimer>::None,
|
|
1092
|
+
);
|
|
1093
|
+
let key1 = host_key("foo");
|
|
1094
|
+
let key2 = host_key("bar");
|
|
1095
|
+
let key3 = host_key("baz");
|
|
1096
|
+
|
|
1097
|
+
pool.pooled(c(key1.clone()), Uniq(41));
|
|
1098
|
+
pool.pooled(c(key2.clone()), Uniq(5));
|
|
1099
|
+
pool.pooled(c(key3.clone()), Uniq(99));
|
|
1100
|
+
|
|
1101
|
+
assert!(pool.locked().idle.get(&key1).is_none());
|
|
1102
|
+
assert!(pool.locked().idle.get(&key2).is_some());
|
|
1103
|
+
assert!(pool.locked().idle.get(&key3).is_some());
|
|
1104
|
+
}
|
|
1105
|
+
}
|