wreq-rb 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. checksums.yaml +7 -0
  2. data/Cargo.lock +2688 -0
  3. data/Cargo.toml +6 -0
  4. data/README.md +179 -0
  5. data/ext/wreq_rb/Cargo.toml +39 -0
  6. data/ext/wreq_rb/extconf.rb +22 -0
  7. data/ext/wreq_rb/src/client.rs +565 -0
  8. data/ext/wreq_rb/src/error.rs +25 -0
  9. data/ext/wreq_rb/src/lib.rs +20 -0
  10. data/ext/wreq_rb/src/response.rs +132 -0
  11. data/lib/wreq-rb/version.rb +5 -0
  12. data/lib/wreq-rb.rb +17 -0
  13. data/patches/0001-add-transfer-size-tracking.patch +292 -0
  14. data/vendor/wreq/Cargo.toml +306 -0
  15. data/vendor/wreq/LICENSE +202 -0
  16. data/vendor/wreq/README.md +122 -0
  17. data/vendor/wreq/examples/cert_store.rs +77 -0
  18. data/vendor/wreq/examples/connect_via_lower_priority_tokio_runtime.rs +258 -0
  19. data/vendor/wreq/examples/emulation.rs +118 -0
  20. data/vendor/wreq/examples/form.rs +14 -0
  21. data/vendor/wreq/examples/http1_websocket.rs +37 -0
  22. data/vendor/wreq/examples/http2_websocket.rs +45 -0
  23. data/vendor/wreq/examples/json_dynamic.rs +41 -0
  24. data/vendor/wreq/examples/json_typed.rs +47 -0
  25. data/vendor/wreq/examples/keylog.rs +16 -0
  26. data/vendor/wreq/examples/request_with_emulation.rs +115 -0
  27. data/vendor/wreq/examples/request_with_interface.rs +37 -0
  28. data/vendor/wreq/examples/request_with_local_address.rs +16 -0
  29. data/vendor/wreq/examples/request_with_proxy.rs +13 -0
  30. data/vendor/wreq/examples/request_with_redirect.rs +22 -0
  31. data/vendor/wreq/examples/request_with_version.rs +15 -0
  32. data/vendor/wreq/examples/tor_socks.rs +24 -0
  33. data/vendor/wreq/examples/unix_socket.rs +33 -0
  34. data/vendor/wreq/src/client/body.rs +304 -0
  35. data/vendor/wreq/src/client/conn/conn.rs +231 -0
  36. data/vendor/wreq/src/client/conn/connector.rs +549 -0
  37. data/vendor/wreq/src/client/conn/http.rs +1023 -0
  38. data/vendor/wreq/src/client/conn/proxy/socks.rs +233 -0
  39. data/vendor/wreq/src/client/conn/proxy/tunnel.rs +260 -0
  40. data/vendor/wreq/src/client/conn/proxy.rs +39 -0
  41. data/vendor/wreq/src/client/conn/tls_info.rs +98 -0
  42. data/vendor/wreq/src/client/conn/uds.rs +44 -0
  43. data/vendor/wreq/src/client/conn/verbose.rs +149 -0
  44. data/vendor/wreq/src/client/conn.rs +323 -0
  45. data/vendor/wreq/src/client/core/body/incoming.rs +485 -0
  46. data/vendor/wreq/src/client/core/body/length.rs +118 -0
  47. data/vendor/wreq/src/client/core/body.rs +34 -0
  48. data/vendor/wreq/src/client/core/common/buf.rs +149 -0
  49. data/vendor/wreq/src/client/core/common/rewind.rs +141 -0
  50. data/vendor/wreq/src/client/core/common/watch.rs +76 -0
  51. data/vendor/wreq/src/client/core/common.rs +3 -0
  52. data/vendor/wreq/src/client/core/conn/http1.rs +342 -0
  53. data/vendor/wreq/src/client/core/conn/http2.rs +307 -0
  54. data/vendor/wreq/src/client/core/conn.rs +11 -0
  55. data/vendor/wreq/src/client/core/dispatch.rs +299 -0
  56. data/vendor/wreq/src/client/core/error.rs +435 -0
  57. data/vendor/wreq/src/client/core/ext.rs +201 -0
  58. data/vendor/wreq/src/client/core/http1.rs +178 -0
  59. data/vendor/wreq/src/client/core/http2.rs +483 -0
  60. data/vendor/wreq/src/client/core/proto/h1/conn.rs +988 -0
  61. data/vendor/wreq/src/client/core/proto/h1/decode.rs +1170 -0
  62. data/vendor/wreq/src/client/core/proto/h1/dispatch.rs +684 -0
  63. data/vendor/wreq/src/client/core/proto/h1/encode.rs +580 -0
  64. data/vendor/wreq/src/client/core/proto/h1/io.rs +879 -0
  65. data/vendor/wreq/src/client/core/proto/h1/role.rs +694 -0
  66. data/vendor/wreq/src/client/core/proto/h1.rs +104 -0
  67. data/vendor/wreq/src/client/core/proto/h2/client.rs +650 -0
  68. data/vendor/wreq/src/client/core/proto/h2/ping.rs +539 -0
  69. data/vendor/wreq/src/client/core/proto/h2.rs +379 -0
  70. data/vendor/wreq/src/client/core/proto/headers.rs +138 -0
  71. data/vendor/wreq/src/client/core/proto.rs +58 -0
  72. data/vendor/wreq/src/client/core/rt/bounds.rs +57 -0
  73. data/vendor/wreq/src/client/core/rt/timer.rs +150 -0
  74. data/vendor/wreq/src/client/core/rt/tokio.rs +99 -0
  75. data/vendor/wreq/src/client/core/rt.rs +25 -0
  76. data/vendor/wreq/src/client/core/upgrade.rs +267 -0
  77. data/vendor/wreq/src/client/core.rs +16 -0
  78. data/vendor/wreq/src/client/emulation.rs +161 -0
  79. data/vendor/wreq/src/client/http/client/error.rs +142 -0
  80. data/vendor/wreq/src/client/http/client/exec.rs +29 -0
  81. data/vendor/wreq/src/client/http/client/extra.rs +77 -0
  82. data/vendor/wreq/src/client/http/client/lazy.rs +79 -0
  83. data/vendor/wreq/src/client/http/client/pool.rs +1105 -0
  84. data/vendor/wreq/src/client/http/client/util.rs +104 -0
  85. data/vendor/wreq/src/client/http/client.rs +1003 -0
  86. data/vendor/wreq/src/client/http/future.rs +99 -0
  87. data/vendor/wreq/src/client/http.rs +1629 -0
  88. data/vendor/wreq/src/client/layer/config/options.rs +156 -0
  89. data/vendor/wreq/src/client/layer/config.rs +116 -0
  90. data/vendor/wreq/src/client/layer/cookie.rs +161 -0
  91. data/vendor/wreq/src/client/layer/decoder.rs +139 -0
  92. data/vendor/wreq/src/client/layer/redirect/future.rs +270 -0
  93. data/vendor/wreq/src/client/layer/redirect/policy.rs +63 -0
  94. data/vendor/wreq/src/client/layer/redirect.rs +145 -0
  95. data/vendor/wreq/src/client/layer/retry/classify.rs +105 -0
  96. data/vendor/wreq/src/client/layer/retry/scope.rs +51 -0
  97. data/vendor/wreq/src/client/layer/retry.rs +151 -0
  98. data/vendor/wreq/src/client/layer/timeout/body.rs +233 -0
  99. data/vendor/wreq/src/client/layer/timeout/future.rs +90 -0
  100. data/vendor/wreq/src/client/layer/timeout.rs +177 -0
  101. data/vendor/wreq/src/client/layer.rs +15 -0
  102. data/vendor/wreq/src/client/multipart.rs +717 -0
  103. data/vendor/wreq/src/client/request.rs +818 -0
  104. data/vendor/wreq/src/client/response.rs +534 -0
  105. data/vendor/wreq/src/client/ws/json.rs +99 -0
  106. data/vendor/wreq/src/client/ws/message.rs +453 -0
  107. data/vendor/wreq/src/client/ws.rs +714 -0
  108. data/vendor/wreq/src/client.rs +27 -0
  109. data/vendor/wreq/src/config.rs +140 -0
  110. data/vendor/wreq/src/cookie.rs +579 -0
  111. data/vendor/wreq/src/dns/gai.rs +249 -0
  112. data/vendor/wreq/src/dns/hickory.rs +78 -0
  113. data/vendor/wreq/src/dns/resolve.rs +180 -0
  114. data/vendor/wreq/src/dns.rs +69 -0
  115. data/vendor/wreq/src/error.rs +502 -0
  116. data/vendor/wreq/src/ext.rs +398 -0
  117. data/vendor/wreq/src/hash.rs +143 -0
  118. data/vendor/wreq/src/header.rs +506 -0
  119. data/vendor/wreq/src/into_uri.rs +187 -0
  120. data/vendor/wreq/src/lib.rs +586 -0
  121. data/vendor/wreq/src/proxy/mac.rs +82 -0
  122. data/vendor/wreq/src/proxy/matcher.rs +806 -0
  123. data/vendor/wreq/src/proxy/uds.rs +66 -0
  124. data/vendor/wreq/src/proxy/win.rs +31 -0
  125. data/vendor/wreq/src/proxy.rs +569 -0
  126. data/vendor/wreq/src/redirect.rs +575 -0
  127. data/vendor/wreq/src/retry.rs +198 -0
  128. data/vendor/wreq/src/sync.rs +129 -0
  129. data/vendor/wreq/src/tls/conn/cache.rs +123 -0
  130. data/vendor/wreq/src/tls/conn/cert_compression.rs +125 -0
  131. data/vendor/wreq/src/tls/conn/ext.rs +82 -0
  132. data/vendor/wreq/src/tls/conn/macros.rs +34 -0
  133. data/vendor/wreq/src/tls/conn/service.rs +138 -0
  134. data/vendor/wreq/src/tls/conn.rs +681 -0
  135. data/vendor/wreq/src/tls/keylog/handle.rs +64 -0
  136. data/vendor/wreq/src/tls/keylog.rs +99 -0
  137. data/vendor/wreq/src/tls/options.rs +464 -0
  138. data/vendor/wreq/src/tls/x509/identity.rs +122 -0
  139. data/vendor/wreq/src/tls/x509/parser.rs +71 -0
  140. data/vendor/wreq/src/tls/x509/store.rs +228 -0
  141. data/vendor/wreq/src/tls/x509.rs +68 -0
  142. data/vendor/wreq/src/tls.rs +154 -0
  143. data/vendor/wreq/src/trace.rs +55 -0
  144. data/vendor/wreq/src/util.rs +122 -0
  145. data/vendor/wreq/tests/badssl.rs +228 -0
  146. data/vendor/wreq/tests/brotli.rs +350 -0
  147. data/vendor/wreq/tests/client.rs +1098 -0
  148. data/vendor/wreq/tests/connector_layers.rs +227 -0
  149. data/vendor/wreq/tests/cookie.rs +306 -0
  150. data/vendor/wreq/tests/deflate.rs +347 -0
  151. data/vendor/wreq/tests/emulation.rs +260 -0
  152. data/vendor/wreq/tests/gzip.rs +347 -0
  153. data/vendor/wreq/tests/layers.rs +261 -0
  154. data/vendor/wreq/tests/multipart.rs +165 -0
  155. data/vendor/wreq/tests/proxy.rs +438 -0
  156. data/vendor/wreq/tests/redirect.rs +629 -0
  157. data/vendor/wreq/tests/retry.rs +135 -0
  158. data/vendor/wreq/tests/support/delay_server.rs +117 -0
  159. data/vendor/wreq/tests/support/error.rs +16 -0
  160. data/vendor/wreq/tests/support/layer.rs +183 -0
  161. data/vendor/wreq/tests/support/mod.rs +9 -0
  162. data/vendor/wreq/tests/support/server.rs +232 -0
  163. data/vendor/wreq/tests/timeouts.rs +281 -0
  164. data/vendor/wreq/tests/unix_socket.rs +135 -0
  165. data/vendor/wreq/tests/upgrade.rs +98 -0
  166. data/vendor/wreq/tests/zstd.rs +559 -0
  167. metadata +225 -0
@@ -0,0 +1,135 @@
1
+ mod support;
2
+
3
+ use std::sync::{
4
+ Arc,
5
+ atomic::{AtomicUsize, Ordering},
6
+ };
7
+
8
+ use support::server;
9
+ use wreq::Client;
10
+
11
+ #[tokio::test]
12
+ async fn retries_apply_in_scope() {
13
+ let _ = pretty_env_logger::try_init();
14
+
15
+ let cnt = Arc::new(AtomicUsize::new(0));
16
+ let server = server::http(move |_req| {
17
+ let cnt = cnt.clone();
18
+ async move {
19
+ if cnt.fetch_add(1, Ordering::Relaxed) == 0 {
20
+ // first req is bad
21
+ http::Response::builder()
22
+ .status(http::StatusCode::SERVICE_UNAVAILABLE)
23
+ .body(Default::default())
24
+ .unwrap()
25
+ } else {
26
+ http::Response::default()
27
+ }
28
+ }
29
+ });
30
+
31
+ let scope = server.addr().ip().to_string();
32
+ let policy = wreq::retry::Policy::for_host(scope).classify_fn(|req_rep| {
33
+ if req_rep.status() == Some(http::StatusCode::SERVICE_UNAVAILABLE) {
34
+ req_rep.retryable()
35
+ } else {
36
+ req_rep.success()
37
+ }
38
+ });
39
+
40
+ let url = format!("http://{}", server.addr());
41
+ let resp = Client::builder()
42
+ .retry(policy)
43
+ .build()
44
+ .unwrap()
45
+ .get(url)
46
+ .send()
47
+ .await
48
+ .unwrap();
49
+
50
+ assert_eq!(resp.status(), 200);
51
+ }
52
+
53
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
54
+ async fn default_retries_have_a_limit() {
55
+ let _ = pretty_env_logger::try_init();
56
+
57
+ let server = server::http_with_config(
58
+ move |req| async move {
59
+ assert_eq!(req.version(), http::Version::HTTP_2);
60
+ // refused forever
61
+ Err(http2::Error::from(http2::Reason::REFUSED_STREAM))
62
+ },
63
+ |_| {},
64
+ );
65
+
66
+ let client = Client::builder().http2_only().build().unwrap();
67
+
68
+ let url = format!("http://{}", server.addr());
69
+
70
+ let _err = client.get(url).send().await.unwrap_err();
71
+ }
72
+
73
+ // NOTE: using the default "current_thread" runtime here would cause the test to
74
+ // fail, because the only thread would block until `panic_rx` receives a
75
+ // notification while the client needs to be driven to get the graceful shutdown
76
+ // done.
77
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
78
+ async fn highly_concurrent_requests_to_http2_server_with_low_max_concurrent_streams() {
79
+ let client = Client::builder().http2_only().no_proxy().build().unwrap();
80
+
81
+ let server = server::http_with_config(
82
+ move |req| async move {
83
+ assert_eq!(req.version(), http::Version::HTTP_2);
84
+ Ok::<_, std::convert::Infallible>(http::Response::default())
85
+ },
86
+ |builder| {
87
+ builder.http2().max_concurrent_streams(1);
88
+ },
89
+ );
90
+
91
+ let url = format!("http://{}", server.addr());
92
+
93
+ let futs = (0..100).map(|_| {
94
+ let client = client.clone();
95
+ let url = url.clone();
96
+ async move {
97
+ let res = client.get(&url).send().await.unwrap();
98
+ assert_eq!(res.status(), wreq::StatusCode::OK);
99
+ }
100
+ });
101
+ futures_util::future::join_all(futs).await;
102
+ }
103
+
104
+ #[tokio::test]
105
+ async fn highly_concurrent_requests_to_slow_http2_server_with_low_max_concurrent_streams() {
106
+ use support::delay_server;
107
+
108
+ let client = Client::builder().http2_only().no_proxy().build().unwrap();
109
+
110
+ let server = delay_server::Server::new(
111
+ move |req| async move {
112
+ assert_eq!(req.version(), http::Version::HTTP_2);
113
+ http::Response::default()
114
+ },
115
+ |http| {
116
+ http.http2().max_concurrent_streams(1);
117
+ },
118
+ std::time::Duration::from_secs(2),
119
+ )
120
+ .await;
121
+
122
+ let url = format!("http://{}", server.addr());
123
+
124
+ let futs = (0..100).map(|_| {
125
+ let client = client.clone();
126
+ let url = url.clone();
127
+ async move {
128
+ let res = client.get(&url).send().await.unwrap();
129
+ assert_eq!(res.status(), wreq::StatusCode::OK);
130
+ }
131
+ });
132
+ futures_util::future::join_all(futs).await;
133
+
134
+ server.shutdown().await;
135
+ }
@@ -0,0 +1,117 @@
1
+ #![allow(unused)]
2
+ use std::{convert::Infallible, future::Future, net, time::Duration};
3
+
4
+ use futures_util::FutureExt;
5
+ use http::{Request, Response};
6
+ use hyper::service::service_fn;
7
+ use tokio::{net::TcpListener, select, sync::oneshot};
8
+
9
+ /// This server, unlike [`super::server::Server`], allows for delaying the
10
+ /// specified amount of time after each TCP connection is established. This is
11
+ /// useful for testing the behavior of the client when the server is slow.
12
+ ///
13
+ /// For example, in case of HTTP/2, once the TCP/TLS connection is established,
14
+ /// both endpoints are supposed to send a preface and an initial `SETTINGS`
15
+ /// frame (See [RFC9113 3.4] for details). What if these frames are delayed for
16
+ /// whatever reason? This server allows for testing such scenarios.
17
+ ///
18
+ /// [RFC9113 3.4]: https://www.rfc-editor.org/rfc/rfc9113.html#name-http-2-connection-preface
19
+ pub struct Server {
20
+ addr: net::SocketAddr,
21
+ shutdown_tx: Option<oneshot::Sender<()>>,
22
+ server_terminated_rx: oneshot::Receiver<()>,
23
+ }
24
+
25
+ type Builder = hyper_util::server::conn::auto::Builder<hyper_util::rt::TokioExecutor>;
26
+
27
+ impl Server {
28
+ pub async fn new<F1, Fut, F2, Bu>(func: F1, apply_config: F2, delay: Duration) -> Self
29
+ where
30
+ F1: Fn(Request<hyper::body::Incoming>) -> Fut + Clone + Send + 'static,
31
+ Fut: Future<Output = Response<wreq::Body>> + Send + 'static,
32
+ F2: FnOnce(&mut Builder) -> Bu + Send + 'static,
33
+ {
34
+ let (shutdown_tx, shutdown_rx) = oneshot::channel();
35
+ let (server_terminated_tx, server_terminated_rx) = oneshot::channel();
36
+
37
+ let tcp_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
38
+ let addr = tcp_listener.local_addr().unwrap();
39
+
40
+ tokio::spawn(async move {
41
+ let mut builder =
42
+ hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());
43
+ apply_config(&mut builder);
44
+
45
+ tokio::spawn(async move {
46
+ let builder = builder;
47
+ let (connection_shutdown_tx, connection_shutdown_rx) = oneshot::channel();
48
+ let connection_shutdown_rx = connection_shutdown_rx.shared();
49
+ let mut shutdown_rx = std::pin::pin!(shutdown_rx);
50
+
51
+ let mut handles = Vec::new();
52
+ loop {
53
+ select! {
54
+ _ = shutdown_rx.as_mut() => {
55
+ connection_shutdown_tx.send(()).unwrap();
56
+ break;
57
+ }
58
+ res = tcp_listener.accept() => {
59
+ let (stream, _) = res.unwrap();
60
+ let io = hyper_util::rt::TokioIo::new(stream);
61
+
62
+
63
+ let handle = tokio::spawn({
64
+ let connection_shutdown_rx = connection_shutdown_rx.clone();
65
+ let func = func.clone();
66
+ let svc = service_fn(move |req| {
67
+ let fut = func(req);
68
+ async move {
69
+ Ok::<_, Infallible>(fut.await)
70
+ }});
71
+ let builder = builder.clone();
72
+
73
+ async move {
74
+ let fut = builder.serve_connection_with_upgrades(io, svc);
75
+ tokio::time::sleep(delay).await;
76
+
77
+ let mut conn = std::pin::pin!(fut);
78
+
79
+ select! {
80
+ _ = conn.as_mut() => {}
81
+ _ = connection_shutdown_rx => {
82
+ conn.as_mut().graceful_shutdown();
83
+ conn.await.unwrap();
84
+ }
85
+ }
86
+ }
87
+ });
88
+
89
+ handles.push(handle);
90
+ }
91
+ }
92
+ }
93
+
94
+ futures_util::future::join_all(handles).await;
95
+ server_terminated_tx.send(()).unwrap();
96
+ });
97
+ });
98
+
99
+ Self {
100
+ addr,
101
+ shutdown_tx: Some(shutdown_tx),
102
+ server_terminated_rx,
103
+ }
104
+ }
105
+
106
+ pub async fn shutdown(mut self) {
107
+ if let Some(tx) = self.shutdown_tx.take() {
108
+ let _ = tx.send(());
109
+ }
110
+
111
+ self.server_terminated_rx.await.unwrap();
112
+ }
113
+
114
+ pub fn addr(&self) -> net::SocketAddr {
115
+ self.addr
116
+ }
117
+ }
@@ -0,0 +1,16 @@
1
+ use std::error::Error as StdError;
2
+
3
+ #[allow(unused)]
4
+ pub fn inspect<E>(err: E) -> Vec<String>
5
+ where
6
+ E: Into<Box<dyn StdError + Send + Sync>>,
7
+ {
8
+ let berr = err.into();
9
+ let mut err = Some(&*berr as &(dyn StdError + 'static));
10
+ let mut errs = Vec::new();
11
+ while let Some(e) = err {
12
+ errs.push(e.to_string());
13
+ err = e.source();
14
+ }
15
+ errs
16
+ }
@@ -0,0 +1,183 @@
1
+ use std::{
2
+ future::Future,
3
+ pin::Pin,
4
+ task::{Context, Poll},
5
+ time::Duration,
6
+ };
7
+
8
+ use futures::future::BoxFuture;
9
+ use pin_project_lite::pin_project;
10
+ use tokio::time::Sleep;
11
+ use tower::{BoxError, Layer, Service};
12
+
13
+ /// This tower layer injects an arbitrary delay before calling downstream layers.
14
+ #[derive(Clone)]
15
+ pub struct DelayLayer {
16
+ delay: Duration,
17
+ }
18
+
19
+ impl DelayLayer {
20
+ #[allow(unused)]
21
+ pub const fn new(delay: Duration) -> Self {
22
+ DelayLayer { delay }
23
+ }
24
+ }
25
+
26
+ impl<S> Layer<S> for DelayLayer {
27
+ type Service = Delay<S>;
28
+ fn layer(&self, service: S) -> Self::Service {
29
+ Delay::new(service, self.delay)
30
+ }
31
+ }
32
+
33
+ impl std::fmt::Debug for DelayLayer {
34
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
35
+ f.debug_struct("DelayLayer")
36
+ .field("delay", &self.delay)
37
+ .finish()
38
+ }
39
+ }
40
+
41
+ /// This tower service injects an arbitrary delay before calling downstream layers.
42
+ #[derive(Debug, Clone)]
43
+ pub struct Delay<S> {
44
+ inner: S,
45
+ delay: Duration,
46
+ }
47
+ impl<S> Delay<S> {
48
+ pub fn new(inner: S, delay: Duration) -> Self {
49
+ Delay { inner, delay }
50
+ }
51
+ }
52
+
53
+ impl<S, Request> Service<Request> for Delay<S>
54
+ where
55
+ S: Service<Request>,
56
+ S::Error: Into<BoxError>,
57
+ {
58
+ type Response = S::Response;
59
+
60
+ type Error = BoxError;
61
+
62
+ type Future = ResponseFuture<S::Future>;
63
+
64
+ fn poll_ready(
65
+ &mut self,
66
+ cx: &mut std::task::Context<'_>,
67
+ ) -> std::task::Poll<Result<(), Self::Error>> {
68
+ println!("Delay::poll_ready called");
69
+ match self.inner.poll_ready(cx) {
70
+ Poll::Pending => Poll::Pending,
71
+ Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
72
+ }
73
+ }
74
+
75
+ fn call(&mut self, req: Request) -> Self::Future {
76
+ println!("Delay::call executed");
77
+ let response = self.inner.call(req);
78
+ let sleep = tokio::time::sleep(self.delay);
79
+
80
+ ResponseFuture::new(response, sleep)
81
+ }
82
+ }
83
+
84
+ // `Delay` response future
85
+ pin_project! {
86
+ #[derive(Debug)]
87
+ pub struct ResponseFuture<S> {
88
+ #[pin]
89
+ response: S,
90
+ #[pin]
91
+ sleep: Sleep,
92
+ }
93
+ }
94
+
95
+ impl<S> ResponseFuture<S> {
96
+ pub(crate) fn new(response: S, sleep: Sleep) -> Self {
97
+ ResponseFuture { response, sleep }
98
+ }
99
+ }
100
+
101
+ impl<F, S, E> Future for ResponseFuture<F>
102
+ where
103
+ F: Future<Output = Result<S, E>>,
104
+ E: Into<BoxError>,
105
+ {
106
+ type Output = Result<S, BoxError>;
107
+
108
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
109
+ let this = self.project();
110
+
111
+ // First poll the sleep until complete
112
+ match this.sleep.poll(cx) {
113
+ Poll::Pending => return Poll::Pending,
114
+ Poll::Ready(_) => {}
115
+ }
116
+
117
+ // Then poll the inner future
118
+ match this.response.poll(cx) {
119
+ Poll::Ready(v) => Poll::Ready(v.map_err(Into::into)),
120
+ Poll::Pending => Poll::Pending,
121
+ }
122
+ }
123
+ }
124
+
125
+ #[derive(Clone)]
126
+ pub struct SharedConcurrencyLimitLayer {
127
+ semaphore: std::sync::Arc<tokio::sync::Semaphore>,
128
+ }
129
+
130
+ impl SharedConcurrencyLimitLayer {
131
+ #[allow(unused)]
132
+ pub fn new(limit: usize) -> Self {
133
+ Self {
134
+ semaphore: std::sync::Arc::new(tokio::sync::Semaphore::new(limit)),
135
+ }
136
+ }
137
+ }
138
+
139
+ impl<S> tower::Layer<S> for SharedConcurrencyLimitLayer {
140
+ type Service = SharedConcurrencyLimit<S>;
141
+
142
+ fn layer(&self, inner: S) -> Self::Service {
143
+ SharedConcurrencyLimit {
144
+ inner,
145
+ semaphore: self.semaphore.clone(),
146
+ }
147
+ }
148
+ }
149
+
150
+ #[derive(Clone)]
151
+ pub struct SharedConcurrencyLimit<S> {
152
+ inner: S,
153
+ semaphore: std::sync::Arc<tokio::sync::Semaphore>,
154
+ }
155
+
156
+ impl<S, Req> tower::Service<Req> for SharedConcurrencyLimit<S>
157
+ where
158
+ S: tower::Service<Req> + Clone + Send + 'static,
159
+ S::Future: Send + 'static,
160
+ Req: Send + 'static,
161
+ {
162
+ type Response = S::Response;
163
+ type Error = S::Error;
164
+ type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
165
+
166
+ fn poll_ready(
167
+ &mut self,
168
+ _cx: &mut std::task::Context<'_>,
169
+ ) -> std::task::Poll<Result<(), Self::Error>> {
170
+ // always ready, we handle limits in call
171
+ std::task::Poll::Ready(Ok(()))
172
+ }
173
+
174
+ fn call(&mut self, req: Req) -> Self::Future {
175
+ let semaphore = self.semaphore.clone();
176
+ let mut inner = self.inner.clone();
177
+
178
+ Box::pin(async move {
179
+ let _permit = semaphore.acquire_owned().await.unwrap();
180
+ inner.call(req).await
181
+ })
182
+ }
183
+ }
@@ -0,0 +1,9 @@
1
+ pub mod delay_server;
2
+ pub mod error;
3
+ pub mod layer;
4
+ pub mod server;
5
+
6
+ // TODO: remove once done converting to new support server?
7
+ #[allow(unused)]
8
+ pub static DEFAULT_USER_AGENT: &str =
9
+ concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
@@ -0,0 +1,232 @@
1
+ use std::{
2
+ convert::Infallible, future::Future, io, net, sync::mpsc as std_mpsc, thread, time::Duration,
3
+ };
4
+
5
+ use tokio::{io::AsyncReadExt, net::TcpStream, runtime, sync::oneshot};
6
+ use wreq::Body;
7
+
8
+ pub struct Server {
9
+ addr: net::SocketAddr,
10
+ panic_rx: std_mpsc::Receiver<()>,
11
+ events_rx: std_mpsc::Receiver<Event>,
12
+ shutdown_tx: Option<oneshot::Sender<()>>,
13
+ }
14
+
15
+ #[non_exhaustive]
16
+ pub enum Event {
17
+ ConnectionClosed,
18
+ }
19
+
20
+ impl Server {
21
+ pub fn addr(&self) -> net::SocketAddr {
22
+ self.addr
23
+ }
24
+
25
+ #[allow(unused)]
26
+ pub fn events(&mut self) -> Vec<Event> {
27
+ let mut events = Vec::new();
28
+ while let Ok(event) = self.events_rx.try_recv() {
29
+ events.push(event);
30
+ }
31
+ events
32
+ }
33
+ }
34
+
35
+ impl Drop for Server {
36
+ fn drop(&mut self) {
37
+ if let Some(tx) = self.shutdown_tx.take() {
38
+ let _ = tx.send(());
39
+ }
40
+
41
+ if !::std::thread::panicking() {
42
+ self.panic_rx
43
+ .recv_timeout(Duration::from_secs(3))
44
+ .expect("test server should not panic");
45
+ }
46
+ }
47
+ }
48
+
49
+ #[allow(unused)]
50
+ pub fn http<F, Fut>(func: F) -> Server
51
+ where
52
+ F: Fn(http::Request<hyper::body::Incoming>) -> Fut + Clone + Send + 'static,
53
+ Fut: Future<Output = http::Response<Body>> + Send + 'static,
54
+ {
55
+ let infall = move |req| {
56
+ let fut = func(req);
57
+ async move { Ok::<_, Infallible>(fut.await) }
58
+ };
59
+ http_with_config(infall, |_builder| {})
60
+ }
61
+
62
+ type Builder = hyper_util::server::conn::auto::Builder<hyper_util::rt::TokioExecutor>;
63
+
64
+ pub fn http_with_config<F1, Fut, E, F2, Bu>(func: F1, apply_config: F2) -> Server
65
+ where
66
+ F1: Fn(http::Request<hyper::body::Incoming>) -> Fut + Clone + Send + 'static,
67
+ Fut: Future<Output = Result<http::Response<Body>, E>> + Send + 'static,
68
+ E: Into<Box<dyn std::error::Error + Send + Sync>>,
69
+ F2: FnOnce(&mut Builder) -> Bu + Send + 'static,
70
+ {
71
+ // Spawn new runtime in thread to prevent reactor execution context conflict
72
+ let test_name = thread::current().name().unwrap_or("<unknown>").to_string();
73
+ thread::spawn(move || {
74
+ let rt = runtime::Builder::new_current_thread()
75
+ .enable_all()
76
+ .build()
77
+ .expect("new rt");
78
+ let listener = rt.block_on(async move {
79
+ tokio::net::TcpListener::bind(&std::net::SocketAddr::from(([127, 0, 0, 1], 0)))
80
+ .await
81
+ .unwrap()
82
+ });
83
+ let addr = listener.local_addr().unwrap();
84
+
85
+ let (shutdown_tx, mut shutdown_rx) = oneshot::channel();
86
+ let (panic_tx, panic_rx) = std_mpsc::channel();
87
+ let (events_tx, events_rx) = std_mpsc::channel();
88
+ let tname = format!(
89
+ "test({test_name})-support-server",
90
+ );
91
+ thread::Builder::new()
92
+ .name(tname)
93
+ .spawn(move || {
94
+ rt.block_on(async move {
95
+ let mut builder =
96
+ hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());
97
+ apply_config(&mut builder);
98
+
99
+ loop {
100
+ tokio::select! {
101
+ _ = &mut shutdown_rx => {
102
+ break;
103
+ }
104
+ accepted = listener.accept() => {
105
+ let (io, _) = accepted.expect("accepted");
106
+ let func = func.clone();
107
+ let svc = hyper::service::service_fn(func);
108
+ let builder = builder.clone();
109
+ let events_tx = events_tx.clone();
110
+ tokio::spawn(async move {
111
+ let _ = builder.serve_connection_with_upgrades(hyper_util::rt::TokioIo::new(io), svc).await;
112
+ let _ = events_tx.send(Event::ConnectionClosed);
113
+ });
114
+ }
115
+ }
116
+ }
117
+ let _ = panic_tx.send(());
118
+ });
119
+ })
120
+ .expect("thread spawn");
121
+ Server {
122
+ addr,
123
+ panic_rx,
124
+ events_rx,
125
+ shutdown_tx: Some(shutdown_tx),
126
+ }
127
+ })
128
+ .join()
129
+ .unwrap()
130
+ }
131
+
132
+ #[allow(unused)]
133
+ pub fn low_level_with_response<F>(do_response: F) -> Server
134
+ where
135
+ for<'c> F: Fn(&'c [u8], &'c mut TcpStream) -> Box<dyn Future<Output = ()> + Send + 'c>
136
+ + Clone
137
+ + Send
138
+ + 'static,
139
+ {
140
+ // Spawn new runtime in thread to prevent reactor execution context conflict
141
+ let test_name = thread::current().name().unwrap_or("<unknown>").to_string();
142
+ thread::spawn(move || {
143
+ let rt = runtime::Builder::new_current_thread()
144
+ .enable_all()
145
+ .build()
146
+ .expect("new rt");
147
+ let listener = rt.block_on(async move {
148
+ tokio::net::TcpListener::bind(&std::net::SocketAddr::from(([127, 0, 0, 1], 0)))
149
+ .await
150
+ .unwrap()
151
+ });
152
+ let addr = listener.local_addr().unwrap();
153
+
154
+ let (shutdown_tx, mut shutdown_rx) = oneshot::channel();
155
+ let (panic_tx, panic_rx) = std_mpsc::channel();
156
+ let (events_tx, events_rx) = std_mpsc::channel();
157
+ let tname = format!("test({test_name})-support-server",);
158
+ thread::Builder::new()
159
+ .name(tname)
160
+ .spawn(move || {
161
+ rt.block_on(async move {
162
+ loop {
163
+ tokio::select! {
164
+ _ = &mut shutdown_rx => {
165
+ break;
166
+ }
167
+ accepted = listener.accept() => {
168
+ let (io, _) = accepted.expect("accepted");
169
+ let do_response = do_response.clone();
170
+ let events_tx = events_tx.clone();
171
+ tokio::spawn(async move {
172
+ low_level_server_client(io, do_response).await;
173
+ let _ = events_tx.send(Event::ConnectionClosed);
174
+ });
175
+ }
176
+ }
177
+ }
178
+ let _ = panic_tx.send(());
179
+ });
180
+ })
181
+ .expect("thread spawn");
182
+ Server {
183
+ addr,
184
+ panic_rx,
185
+ events_rx,
186
+ shutdown_tx: Some(shutdown_tx),
187
+ }
188
+ })
189
+ .join()
190
+ .unwrap()
191
+ }
192
+
193
+ #[allow(unused)]
194
+ async fn low_level_server_client<F>(mut client_socket: TcpStream, do_response: F)
195
+ where
196
+ for<'c> F: Fn(&'c [u8], &'c mut TcpStream) -> Box<dyn Future<Output = ()> + Send + 'c>,
197
+ {
198
+ loop {
199
+ let request = low_level_read_http_request(&mut client_socket)
200
+ .await
201
+ .expect("read_http_request failed");
202
+ if request.is_empty() {
203
+ // connection closed by client
204
+ break;
205
+ }
206
+
207
+ Box::into_pin(do_response(&request, &mut client_socket)).await;
208
+ }
209
+ }
210
+
211
+ #[allow(unused)]
212
+ async fn low_level_read_http_request(client_socket: &mut TcpStream) -> io::Result<Vec<u8>> {
213
+ let mut buf = Vec::new();
214
+
215
+ // Read until the delimiter "\r\n\r\n" is found
216
+ loop {
217
+ let mut temp_buffer = [0; 1024];
218
+ let n = client_socket.read(&mut temp_buffer).await?;
219
+
220
+ if n == 0 {
221
+ break;
222
+ }
223
+
224
+ buf.extend_from_slice(&temp_buffer[..n]);
225
+
226
+ if let Some(pos) = buf.windows(4).position(|window| window == b"\r\n\r\n") {
227
+ return Ok(buf.drain(..pos + 4).collect());
228
+ }
229
+ }
230
+
231
+ Ok(buf)
232
+ }