itsi-scheduler 0.2.15 → 0.2.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +15 -15
- data/ext/itsi_scheduler/Cargo.toml +1 -1
- data/ext/itsi_scheduler/extconf.rb +3 -1
- data/ext/itsi_server/Cargo.lock +1 -1
- data/ext/itsi_server/Cargo.toml +1 -1
- data/ext/itsi_server/src/lib.rs +1 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +2 -2
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +9 -11
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +6 -1
- data/ext/itsi_server/src/server/binds/listener.rs +4 -1
- data/ext/itsi_server/src/server/http_message_types.rs +1 -1
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +32 -34
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +3 -4
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +23 -38
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +65 -14
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +1 -1
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +1 -1
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +21 -8
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +1 -5
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +12 -3
- data/ext/itsi_server/src/server/process_worker.rs +2 -1
- data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +96 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +1 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +80 -136
- data/ext/itsi_server/src/server/thread_worker.rs +10 -3
- data/ext/itsi_server/src/services/itsi_http_service.rs +26 -21
- data/ext/itsi_server/src/services/mime_types.rs +185 -183
- data/ext/itsi_server/src/services/rate_limiter.rs +16 -34
- data/ext/itsi_server/src/services/static_file_server.rs +7 -13
- data/lib/itsi/scheduler/version.rb +1 -1
- metadata +2 -1
@@ -3,6 +3,7 @@ use either::Either;
|
|
3
3
|
use itsi_tracing::*;
|
4
4
|
use magnus::error::Result;
|
5
5
|
use serde::Deserialize;
|
6
|
+
use tracing::enabled;
|
6
7
|
|
7
8
|
use crate::server::http_message_types::{HttpRequest, HttpResponse};
|
8
9
|
use crate::services::itsi_http_service::HttpRequestContext;
|
@@ -39,18 +40,6 @@ pub enum LogMiddlewareLevel {
|
|
39
40
|
Error,
|
40
41
|
}
|
41
42
|
|
42
|
-
impl LogMiddlewareLevel {
|
43
|
-
pub fn log(&self, message: String) {
|
44
|
-
match self {
|
45
|
-
LogMiddlewareLevel::Trace => trace!(target: "middleware::log_requests", message),
|
46
|
-
LogMiddlewareLevel::Debug => debug!(target: "middleware::log_requests", message),
|
47
|
-
LogMiddlewareLevel::Info => info!(target: "middleware::log_requests", message),
|
48
|
-
LogMiddlewareLevel::Warn => warn!(target: "middleware::log_requests", message),
|
49
|
-
LogMiddlewareLevel::Error => error!(target: "middleware::log_requests", message),
|
50
|
-
}
|
51
|
-
}
|
52
|
-
}
|
53
|
-
|
54
43
|
#[async_trait]
|
55
44
|
impl MiddlewareLayer for LogRequests {
|
56
45
|
async fn initialize(&self) -> Result<()> {
|
@@ -64,7 +53,38 @@ impl MiddlewareLayer for LogRequests {
|
|
64
53
|
) -> Result<Either<HttpRequest, HttpResponse>> {
|
65
54
|
context.init_logging_params();
|
66
55
|
if let Some(LogConfig { level, format }) = self.before.as_ref() {
|
67
|
-
level
|
56
|
+
match level {
|
57
|
+
LogMiddlewareLevel::Trace => {
|
58
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::TRACE) {
|
59
|
+
let message = format.rewrite_request(&req, context);
|
60
|
+
trace!(target: "middleware::log_requests", message);
|
61
|
+
}
|
62
|
+
}
|
63
|
+
LogMiddlewareLevel::Debug => {
|
64
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::DEBUG) {
|
65
|
+
let message = format.rewrite_request(&req, context);
|
66
|
+
debug!(target: "middleware::log_requests", message);
|
67
|
+
}
|
68
|
+
}
|
69
|
+
LogMiddlewareLevel::Info => {
|
70
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::INFO) {
|
71
|
+
let message = format.rewrite_request(&req, context);
|
72
|
+
info!(target: "middleware::log_requests", message);
|
73
|
+
}
|
74
|
+
}
|
75
|
+
LogMiddlewareLevel::Warn => {
|
76
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::WARN) {
|
77
|
+
let message = format.rewrite_request(&req, context);
|
78
|
+
warn!(target: "middleware::log_requests", message);
|
79
|
+
}
|
80
|
+
}
|
81
|
+
LogMiddlewareLevel::Error => {
|
82
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::ERROR) {
|
83
|
+
let message = format.rewrite_request(&req, context);
|
84
|
+
error!(target: "middleware::log_requests", message);
|
85
|
+
}
|
86
|
+
}
|
87
|
+
}
|
68
88
|
}
|
69
89
|
|
70
90
|
Ok(Either::Left(req))
|
@@ -72,7 +92,38 @@ impl MiddlewareLayer for LogRequests {
|
|
72
92
|
|
73
93
|
async fn after(&self, resp: HttpResponse, context: &mut HttpRequestContext) -> HttpResponse {
|
74
94
|
if let Some(LogConfig { level, format }) = self.after.as_ref() {
|
75
|
-
level
|
95
|
+
match level {
|
96
|
+
LogMiddlewareLevel::Trace => {
|
97
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::TRACE) {
|
98
|
+
let message = format.rewrite_response(&resp, context);
|
99
|
+
trace!(target: "middleware::log_requests", message);
|
100
|
+
}
|
101
|
+
}
|
102
|
+
LogMiddlewareLevel::Debug => {
|
103
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::DEBUG) {
|
104
|
+
let message = format.rewrite_response(&resp, context);
|
105
|
+
debug!(target: "middleware::log_requests", message);
|
106
|
+
}
|
107
|
+
}
|
108
|
+
LogMiddlewareLevel::Info => {
|
109
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::INFO) {
|
110
|
+
let message = format.rewrite_response(&resp, context);
|
111
|
+
info!(target: "middleware::log_requests", message);
|
112
|
+
}
|
113
|
+
}
|
114
|
+
LogMiddlewareLevel::Warn => {
|
115
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::WARN) {
|
116
|
+
let message = format.rewrite_response(&resp, context);
|
117
|
+
warn!(target: "middleware::log_requests", message);
|
118
|
+
}
|
119
|
+
}
|
120
|
+
LogMiddlewareLevel::Error => {
|
121
|
+
if enabled!(target: "middleware::log_requests", tracing::Level::ERROR) {
|
122
|
+
let message = format.rewrite_response(&resp, context);
|
123
|
+
error!(target: "middleware::log_requests", message);
|
124
|
+
}
|
125
|
+
}
|
126
|
+
}
|
76
127
|
}
|
77
128
|
|
78
129
|
resp
|
@@ -37,7 +37,7 @@ impl MiddlewareLayer for MaxBody {
|
|
37
37
|
async fn after(&self, resp: HttpResponse, context: &mut HttpRequestContext) -> HttpResponse {
|
38
38
|
if resp.status() == StatusCode::PAYLOAD_TOO_LARGE {
|
39
39
|
self.error_response
|
40
|
-
.to_http_response(context.response_format()
|
40
|
+
.to_http_response(*context.response_format())
|
41
41
|
.await
|
42
42
|
} else {
|
43
43
|
resp
|
@@ -303,7 +303,7 @@ impl MiddlewareLayer for Proxy {
|
|
303
303
|
let url = self.to.rewrite_request(&req, context);
|
304
304
|
|
305
305
|
let accept: ResponseFormat = req.accept().into();
|
306
|
-
let error_response = self.error_response.to_http_response(accept
|
306
|
+
let error_response = self.error_response.to_http_response(accept).await;
|
307
307
|
|
308
308
|
let destination = match Url::parse(&url) {
|
309
309
|
Ok(dest) => dest,
|
@@ -6,6 +6,7 @@ use crate::services::rate_limiter::{
|
|
6
6
|
};
|
7
7
|
use async_trait::async_trait;
|
8
8
|
use either::Either;
|
9
|
+
use http::{HeaderName, HeaderValue};
|
9
10
|
use magnus::error::Result;
|
10
11
|
use serde::Deserialize;
|
11
12
|
use std::collections::HashMap;
|
@@ -24,6 +25,8 @@ pub struct RateLimit {
|
|
24
25
|
pub trusted_proxies: HashMap<String, TokenSource>,
|
25
26
|
#[serde(default = "too_many_requests_error_response")]
|
26
27
|
pub error_response: ErrorResponse,
|
28
|
+
#[serde(skip)]
|
29
|
+
pub limit_header_value: OnceLock<HeaderValue>,
|
27
30
|
}
|
28
31
|
|
29
32
|
fn too_many_requests_error_response() -> ErrorResponse {
|
@@ -38,6 +41,12 @@ pub enum RateLimitKey {
|
|
38
41
|
Parameter(TokenSource),
|
39
42
|
}
|
40
43
|
|
44
|
+
static X_RATELIMIT_LIMIT: HeaderName = HeaderName::from_static("x-ratelimit-limit");
|
45
|
+
static X_RATELIMIT_REMAINING: HeaderName = HeaderName::from_static("x-ratelimit-remaining");
|
46
|
+
static X_RATELIMIT_RESET: HeaderName = HeaderName::from_static("x-ratelimit-reset");
|
47
|
+
static RETRY_AFTER: HeaderName = HeaderName::from_static("retry-after");
|
48
|
+
static ZERO_VALUE: HeaderValue = HeaderValue::from_static("0");
|
49
|
+
|
41
50
|
#[async_trait]
|
42
51
|
impl MiddlewareLayer for RateLimit {
|
43
52
|
async fn initialize(&self) -> Result<()> {
|
@@ -46,6 +55,9 @@ impl MiddlewareLayer for RateLimit {
|
|
46
55
|
if let Ok(limiter) = get_rate_limiter(&self.store_config).await {
|
47
56
|
let _ = self.rate_limiter.set(limiter);
|
48
57
|
}
|
58
|
+
self.limit_header_value
|
59
|
+
.set(self.requests.to_string().parse().unwrap())
|
60
|
+
.ok();
|
49
61
|
Ok(())
|
50
62
|
}
|
51
63
|
|
@@ -58,8 +70,7 @@ impl MiddlewareLayer for RateLimit {
|
|
58
70
|
let key_value = match &self.key {
|
59
71
|
RateLimitKey::SocketAddress => {
|
60
72
|
// Use the socket address from the context
|
61
|
-
if self.trusted_proxies.
|
62
|
-
let source = self.trusted_proxies.get(&context.addr).unwrap();
|
73
|
+
if let Some(source) = self.trusted_proxies.get(&context.addr) {
|
63
74
|
source.extract_token(&req).unwrap_or(&context.addr)
|
64
75
|
} else {
|
65
76
|
&context.addr
|
@@ -114,18 +125,20 @@ impl MiddlewareLayer for RateLimit {
|
|
114
125
|
.error_response
|
115
126
|
.to_http_response(req.accept().into())
|
116
127
|
.await;
|
128
|
+
let ttl_header_value: HeaderValue = ttl.to_string().parse().unwrap();
|
129
|
+
response.headers_mut().insert(
|
130
|
+
X_RATELIMIT_LIMIT.clone(),
|
131
|
+
self.limit_header_value.get().unwrap().clone(),
|
132
|
+
);
|
117
133
|
response
|
118
134
|
.headers_mut()
|
119
|
-
.insert(
|
120
|
-
response
|
121
|
-
.headers_mut()
|
122
|
-
.insert("X-RateLimit-Remaining", "0".parse().unwrap());
|
135
|
+
.insert(X_RATELIMIT_REMAINING.clone(), ZERO_VALUE.clone());
|
123
136
|
response
|
124
137
|
.headers_mut()
|
125
|
-
.insert(
|
138
|
+
.insert(X_RATELIMIT_RESET.clone(), ttl_header_value.clone());
|
126
139
|
response
|
127
140
|
.headers_mut()
|
128
|
-
.insert(
|
141
|
+
.insert(RETRY_AFTER.clone(), ttl_header_value);
|
129
142
|
Ok(Either::Right(response))
|
130
143
|
}
|
131
144
|
Err(e) => {
|
@@ -119,11 +119,7 @@ impl MiddlewareLayer for RubyApp {
|
|
119
119
|
if self.sendfile {
|
120
120
|
if let Some(sendfile_header) = resp.headers().get("X-Sendfile") {
|
121
121
|
return ROOT_STATIC_FILE_SERVER
|
122
|
-
.serve_single_abs(
|
123
|
-
sendfile_header.to_str().unwrap(),
|
124
|
-
context.accept.clone(),
|
125
|
-
&[],
|
126
|
-
)
|
122
|
+
.serve_single_abs(sendfile_header.to_str().unwrap(), context.accept, &[])
|
127
123
|
.await;
|
128
124
|
}
|
129
125
|
}
|
@@ -21,6 +21,10 @@ pub struct StaticResponse {
|
|
21
21
|
body: Vec<u8>,
|
22
22
|
#[serde(skip)]
|
23
23
|
header_map: OnceLock<HeaderMap>,
|
24
|
+
#[serde(skip)]
|
25
|
+
body_bytes: OnceLock<Full<Bytes>>,
|
26
|
+
#[serde(skip)]
|
27
|
+
status_code: OnceLock<StatusCode>,
|
24
28
|
}
|
25
29
|
|
26
30
|
#[async_trait]
|
@@ -35,6 +39,12 @@ impl MiddlewareLayer for StaticResponse {
|
|
35
39
|
self.header_map
|
36
40
|
.set(header_map)
|
37
41
|
.map_err(|_| ItsiError::new("Failed to set headers"))?;
|
42
|
+
self.body_bytes
|
43
|
+
.set(Full::new(Bytes::from(self.body.clone())))
|
44
|
+
.map_err(|_| ItsiError::new("Failed to set body bytes"))?;
|
45
|
+
self.status_code
|
46
|
+
.set(StatusCode::from_u16(self.code).unwrap_or(StatusCode::OK))
|
47
|
+
.map_err(|_| ItsiError::new("Failed to set status code"))?;
|
38
48
|
Ok(())
|
39
49
|
}
|
40
50
|
|
@@ -43,9 +53,8 @@ impl MiddlewareLayer for StaticResponse {
|
|
43
53
|
_req: HttpRequest,
|
44
54
|
_context: &mut HttpRequestContext,
|
45
55
|
) -> Result<Either<HttpRequest, HttpResponse>> {
|
46
|
-
let mut resp = Response::new(BoxBody::new(
|
47
|
-
|
48
|
-
*resp.status_mut() = status;
|
56
|
+
let mut resp = Response::new(BoxBody::new(self.body_bytes.get().unwrap().clone()));
|
57
|
+
*resp.status_mut() = *self.status_code.get().unwrap();
|
49
58
|
*resp.headers_mut() = self.header_map.get().unwrap().clone();
|
50
59
|
|
51
60
|
Ok(Either::Right(resp))
|
@@ -42,7 +42,8 @@ impl Default for ProcessWorker {
|
|
42
42
|
}
|
43
43
|
}
|
44
44
|
|
45
|
-
static CORE_IDS: LazyLock<Vec<CoreId>> =
|
45
|
+
pub static CORE_IDS: LazyLock<Vec<CoreId>> =
|
46
|
+
LazyLock::new(|| core_affinity::get_core_ids().unwrap());
|
46
47
|
|
47
48
|
impl ProcessWorker {
|
48
49
|
#[instrument(skip(self, cluster_template), fields(self.worker_id = %self.worker_id))]
|
@@ -0,0 +1,96 @@
|
|
1
|
+
use std::{ops::Deref, pin::Pin, sync::Arc, time::Duration};
|
2
|
+
|
3
|
+
use hyper_util::rt::TokioIo;
|
4
|
+
use tokio::task::JoinSet;
|
5
|
+
use tracing::debug;
|
6
|
+
|
7
|
+
use crate::{
|
8
|
+
ruby_types::itsi_server::itsi_server_config::ServerParams,
|
9
|
+
server::{binds::listener::ListenerInfo, io_stream::IoStream, request_job::RequestJob},
|
10
|
+
services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
|
11
|
+
};
|
12
|
+
|
13
|
+
use super::single_mode::{RunningPhase, SingleMode};
|
14
|
+
|
15
|
+
pub struct Acceptor {
|
16
|
+
pub acceptor_args: Arc<AcceptorArgs>,
|
17
|
+
pub join_set: JoinSet<()>,
|
18
|
+
}
|
19
|
+
|
20
|
+
impl Deref for Acceptor {
|
21
|
+
type Target = Arc<AcceptorArgs>;
|
22
|
+
|
23
|
+
fn deref(&self) -> &Self::Target {
|
24
|
+
&self.acceptor_args
|
25
|
+
}
|
26
|
+
}
|
27
|
+
|
28
|
+
pub struct AcceptorArgs {
|
29
|
+
pub strategy: Arc<SingleMode>,
|
30
|
+
pub listener_info: ListenerInfo,
|
31
|
+
pub shutdown_receiver: tokio::sync::watch::Receiver<RunningPhase>,
|
32
|
+
pub job_sender: async_channel::Sender<RequestJob>,
|
33
|
+
pub nonblocking_sender: async_channel::Sender<RequestJob>,
|
34
|
+
pub server_params: Arc<ServerParams>,
|
35
|
+
}
|
36
|
+
|
37
|
+
impl Acceptor {
|
38
|
+
pub(crate) async fn serve_connection(&mut self, stream: IoStream) {
|
39
|
+
let addr = stream.addr();
|
40
|
+
let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
|
41
|
+
let mut shutdown_channel = self.shutdown_receiver.clone();
|
42
|
+
let acceptor_args = self.acceptor_args.clone();
|
43
|
+
self.join_set.spawn(async move {
|
44
|
+
let executor = &acceptor_args.strategy.executor;
|
45
|
+
let mut serve = Box::pin(executor.serve_connection_with_upgrades(
|
46
|
+
io,
|
47
|
+
ItsiHttpService {
|
48
|
+
inner: Arc::new(ItsiHttpServiceInner {
|
49
|
+
acceptor_args: acceptor_args.clone(),
|
50
|
+
addr: addr.to_string(),
|
51
|
+
}),
|
52
|
+
},
|
53
|
+
));
|
54
|
+
|
55
|
+
tokio::select! {
|
56
|
+
// Await the connection finishing naturally.
|
57
|
+
res = &mut serve => {
|
58
|
+
match res {
|
59
|
+
Ok(()) => {
|
60
|
+
debug!("Connection closed normally");
|
61
|
+
},
|
62
|
+
Err(res) => {
|
63
|
+
debug!("Connection closed abruptly: {:?}", res);
|
64
|
+
}
|
65
|
+
}
|
66
|
+
serve.as_mut().graceful_shutdown();
|
67
|
+
},
|
68
|
+
// A lifecycle event triggers shutdown.
|
69
|
+
_ = shutdown_channel.changed() => {
|
70
|
+
// Initiate graceful shutdown.
|
71
|
+
serve.as_mut().graceful_shutdown();
|
72
|
+
|
73
|
+
// Now await the connection to finish shutting down.
|
74
|
+
if let Err(e) = serve.await {
|
75
|
+
debug!("Connection shutdown error: {:?}", e);
|
76
|
+
}
|
77
|
+
}
|
78
|
+
}
|
79
|
+
});
|
80
|
+
}
|
81
|
+
|
82
|
+
pub async fn join(&mut self) {
|
83
|
+
// Join all acceptor tasks with timeout
|
84
|
+
let deadline = tokio::time::Instant::now()
|
85
|
+
+ Duration::from_secs_f64(self.server_params.shutdown_timeout);
|
86
|
+
let sleep_until = tokio::time::sleep_until(deadline);
|
87
|
+
tokio::select! {
|
88
|
+
_ = async {
|
89
|
+
while (self.join_set.join_next().await).is_some() {}
|
90
|
+
} => {},
|
91
|
+
_ = sleep_until => {
|
92
|
+
debug!("Shutdown timeout reached; abandoning remaining acceptor tasks.");
|
93
|
+
}
|
94
|
+
}
|
95
|
+
}
|
96
|
+
}
|