itsi-server 0.2.14 → 0.2.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +167 -218
  3. data/ext/itsi_scheduler/Cargo.toml +1 -1
  4. data/ext/itsi_server/Cargo.lock +1 -1
  5. data/ext/itsi_server/Cargo.toml +3 -2
  6. data/ext/itsi_server/extconf.rb +3 -1
  7. data/ext/itsi_server/src/lib.rs +1 -0
  8. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +2 -2
  9. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +9 -11
  10. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +6 -1
  11. data/ext/itsi_server/src/server/binds/listener.rs +4 -1
  12. data/ext/itsi_server/src/server/http_message_types.rs +1 -1
  13. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +32 -34
  14. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +3 -4
  15. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +23 -38
  16. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +65 -14
  17. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +1 -1
  18. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +1 -1
  19. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +21 -8
  20. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +1 -5
  21. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +8 -6
  22. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +12 -3
  23. data/ext/itsi_server/src/server/process_worker.rs +2 -1
  24. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +96 -0
  25. data/ext/itsi_server/src/server/serve_strategy/mod.rs +1 -0
  26. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +80 -136
  27. data/ext/itsi_server/src/server/thread_worker.rs +10 -3
  28. data/ext/itsi_server/src/services/itsi_http_service.rs +26 -21
  29. data/ext/itsi_server/src/services/mime_types.rs +2893 -1413
  30. data/ext/itsi_server/src/services/rate_limiter.rs +16 -34
  31. data/ext/itsi_server/src/services/static_file_server.rs +147 -121
  32. data/ext/itsi_tracing/Cargo.toml +1 -1
  33. data/lib/itsi/server/config/config_helpers.rb +1 -2
  34. data/lib/itsi/server/config/middleware/etag.md +3 -7
  35. data/lib/itsi/server/config/middleware/etag.rb +2 -4
  36. data/lib/itsi/server/config/options/listen_backlog.rb +1 -1
  37. data/lib/itsi/server/config/options/send_buffer_size.md +15 -0
  38. data/lib/itsi/server/config/options/send_buffer_size.rb +19 -0
  39. data/lib/itsi/server/config.rb +24 -25
  40. data/lib/itsi/server/route_tester.rb +1 -1
  41. data/lib/itsi/server/version.rb +1 -1
  42. metadata +21 -18
@@ -6,6 +6,7 @@ use crate::services::rate_limiter::{
6
6
  };
7
7
  use async_trait::async_trait;
8
8
  use either::Either;
9
+ use http::{HeaderName, HeaderValue};
9
10
  use magnus::error::Result;
10
11
  use serde::Deserialize;
11
12
  use std::collections::HashMap;
@@ -24,6 +25,8 @@ pub struct RateLimit {
24
25
  pub trusted_proxies: HashMap<String, TokenSource>,
25
26
  #[serde(default = "too_many_requests_error_response")]
26
27
  pub error_response: ErrorResponse,
28
+ #[serde(skip)]
29
+ pub limit_header_value: OnceLock<HeaderValue>,
27
30
  }
28
31
 
29
32
  fn too_many_requests_error_response() -> ErrorResponse {
@@ -38,6 +41,12 @@ pub enum RateLimitKey {
38
41
  Parameter(TokenSource),
39
42
  }
40
43
 
44
+ static X_RATELIMIT_LIMIT: HeaderName = HeaderName::from_static("x-ratelimit-limit");
45
+ static X_RATELIMIT_REMAINING: HeaderName = HeaderName::from_static("x-ratelimit-remaining");
46
+ static X_RATELIMIT_RESET: HeaderName = HeaderName::from_static("x-ratelimit-reset");
47
+ static RETRY_AFTER: HeaderName = HeaderName::from_static("retry-after");
48
+ static ZERO_VALUE: HeaderValue = HeaderValue::from_static("0");
49
+
41
50
  #[async_trait]
42
51
  impl MiddlewareLayer for RateLimit {
43
52
  async fn initialize(&self) -> Result<()> {
@@ -46,6 +55,9 @@ impl MiddlewareLayer for RateLimit {
46
55
  if let Ok(limiter) = get_rate_limiter(&self.store_config).await {
47
56
  let _ = self.rate_limiter.set(limiter);
48
57
  }
58
+ self.limit_header_value
59
+ .set(self.requests.to_string().parse().unwrap())
60
+ .ok();
49
61
  Ok(())
50
62
  }
51
63
 
@@ -58,8 +70,7 @@ impl MiddlewareLayer for RateLimit {
58
70
  let key_value = match &self.key {
59
71
  RateLimitKey::SocketAddress => {
60
72
  // Use the socket address from the context
61
- if self.trusted_proxies.contains_key(&context.addr) {
62
- let source = self.trusted_proxies.get(&context.addr).unwrap();
73
+ if let Some(source) = self.trusted_proxies.get(&context.addr) {
63
74
  source.extract_token(&req).unwrap_or(&context.addr)
64
75
  } else {
65
76
  &context.addr
@@ -114,18 +125,20 @@ impl MiddlewareLayer for RateLimit {
114
125
  .error_response
115
126
  .to_http_response(req.accept().into())
116
127
  .await;
128
+ let ttl_header_value: HeaderValue = ttl.to_string().parse().unwrap();
129
+ response.headers_mut().insert(
130
+ X_RATELIMIT_LIMIT.clone(),
131
+ self.limit_header_value.get().unwrap().clone(),
132
+ );
117
133
  response
118
134
  .headers_mut()
119
- .insert("X-RateLimit-Limit", limit.to_string().parse().unwrap());
120
- response
121
- .headers_mut()
122
- .insert("X-RateLimit-Remaining", "0".parse().unwrap());
135
+ .insert(X_RATELIMIT_REMAINING.clone(), ZERO_VALUE.clone());
123
136
  response
124
137
  .headers_mut()
125
- .insert("X-RateLimit-Reset", ttl.to_string().parse().unwrap());
138
+ .insert(X_RATELIMIT_RESET.clone(), ttl_header_value.clone());
126
139
  response
127
140
  .headers_mut()
128
- .insert("Retry-After", ttl.to_string().parse().unwrap());
141
+ .insert(RETRY_AFTER.clone(), ttl_header_value);
129
142
  Ok(Either::Right(response))
130
143
  }
131
144
  Err(e) => {
@@ -119,11 +119,7 @@ impl MiddlewareLayer for RubyApp {
119
119
  if self.sendfile {
120
120
  if let Some(sendfile_header) = resp.headers().get("X-Sendfile") {
121
121
  return ROOT_STATIC_FILE_SERVER
122
- .serve_single_abs(
123
- sendfile_header.to_str().unwrap(),
124
- context.accept.clone(),
125
- &[],
126
- )
122
+ .serve_single_abs(sendfile_header.to_str().unwrap(), context.accept, &[])
127
123
  .await;
128
124
  }
129
125
  }
@@ -16,10 +16,15 @@ use http::{
16
16
  };
17
17
  use itsi_error::ItsiError;
18
18
  use magnus::error::Result;
19
- use moka::sync::Cache;
19
+ use quick_cache::sync::Cache;
20
20
  use regex::Regex;
21
21
  use serde::Deserialize;
22
- use std::{collections::HashMap, path::PathBuf, sync::OnceLock, time::Duration};
22
+ use std::{
23
+ collections::HashMap,
24
+ path::PathBuf,
25
+ sync::{Arc, OnceLock},
26
+ time::Duration,
27
+ };
23
28
  use tracing::debug;
24
29
 
25
30
  #[derive(Debug, Deserialize)]
@@ -76,10 +81,7 @@ impl MiddlewareLayer for StaticAssets {
76
81
  recheck_interval: Duration::from_secs(self.file_check_interval),
77
82
  serve_hidden_files: self.serve_hidden_files,
78
83
  allowed_extensions: self.allowed_extensions.clone(),
79
- miss_cache: Cache::builder()
80
- .max_capacity(self.max_files_in_memory)
81
- .time_to_live(Duration::from_secs(self.file_check_interval))
82
- .build(),
84
+ miss_cache: Arc::new(Cache::new(self.max_files_in_memory as usize)),
83
85
  })?)
84
86
  .map_err(ItsiError::new)?;
85
87
  Ok(())
@@ -21,6 +21,10 @@ pub struct StaticResponse {
21
21
  body: Vec<u8>,
22
22
  #[serde(skip)]
23
23
  header_map: OnceLock<HeaderMap>,
24
+ #[serde(skip)]
25
+ body_bytes: OnceLock<Full<Bytes>>,
26
+ #[serde(skip)]
27
+ status_code: OnceLock<StatusCode>,
24
28
  }
25
29
 
26
30
  #[async_trait]
@@ -35,6 +39,12 @@ impl MiddlewareLayer for StaticResponse {
35
39
  self.header_map
36
40
  .set(header_map)
37
41
  .map_err(|_| ItsiError::new("Failed to set headers"))?;
42
+ self.body_bytes
43
+ .set(Full::new(Bytes::from(self.body.clone())))
44
+ .map_err(|_| ItsiError::new("Failed to set body bytes"))?;
45
+ self.status_code
46
+ .set(StatusCode::from_u16(self.code).unwrap_or(StatusCode::OK))
47
+ .map_err(|_| ItsiError::new("Failed to set status code"))?;
38
48
  Ok(())
39
49
  }
40
50
 
@@ -43,9 +53,8 @@ impl MiddlewareLayer for StaticResponse {
43
53
  _req: HttpRequest,
44
54
  _context: &mut HttpRequestContext,
45
55
  ) -> Result<Either<HttpRequest, HttpResponse>> {
46
- let mut resp = Response::new(BoxBody::new(Full::new(Bytes::from(self.body.clone()))));
47
- let status = StatusCode::from_u16(self.code).unwrap_or(StatusCode::OK);
48
- *resp.status_mut() = status;
56
+ let mut resp = Response::new(BoxBody::new(self.body_bytes.get().unwrap().clone()));
57
+ *resp.status_mut() = *self.status_code.get().unwrap();
49
58
  *resp.headers_mut() = self.header_map.get().unwrap().clone();
50
59
 
51
60
  Ok(Either::Right(resp))
@@ -42,7 +42,8 @@ impl Default for ProcessWorker {
42
42
  }
43
43
  }
44
44
 
45
- static CORE_IDS: LazyLock<Vec<CoreId>> = LazyLock::new(|| core_affinity::get_core_ids().unwrap());
45
+ pub static CORE_IDS: LazyLock<Vec<CoreId>> =
46
+ LazyLock::new(|| core_affinity::get_core_ids().unwrap());
46
47
 
47
48
  impl ProcessWorker {
48
49
  #[instrument(skip(self, cluster_template), fields(self.worker_id = %self.worker_id))]
@@ -0,0 +1,96 @@
1
+ use std::{ops::Deref, pin::Pin, sync::Arc, time::Duration};
2
+
3
+ use hyper_util::rt::TokioIo;
4
+ use tokio::task::JoinSet;
5
+ use tracing::debug;
6
+
7
+ use crate::{
8
+ ruby_types::itsi_server::itsi_server_config::ServerParams,
9
+ server::{binds::listener::ListenerInfo, io_stream::IoStream, request_job::RequestJob},
10
+ services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
11
+ };
12
+
13
+ use super::single_mode::{RunningPhase, SingleMode};
14
+
15
+ pub struct Acceptor {
16
+ pub acceptor_args: Arc<AcceptorArgs>,
17
+ pub join_set: JoinSet<()>,
18
+ }
19
+
20
+ impl Deref for Acceptor {
21
+ type Target = Arc<AcceptorArgs>;
22
+
23
+ fn deref(&self) -> &Self::Target {
24
+ &self.acceptor_args
25
+ }
26
+ }
27
+
28
+ pub struct AcceptorArgs {
29
+ pub strategy: Arc<SingleMode>,
30
+ pub listener_info: ListenerInfo,
31
+ pub shutdown_receiver: tokio::sync::watch::Receiver<RunningPhase>,
32
+ pub job_sender: async_channel::Sender<RequestJob>,
33
+ pub nonblocking_sender: async_channel::Sender<RequestJob>,
34
+ pub server_params: Arc<ServerParams>,
35
+ }
36
+
37
+ impl Acceptor {
38
+ pub(crate) async fn serve_connection(&mut self, stream: IoStream) {
39
+ let addr = stream.addr();
40
+ let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
41
+ let mut shutdown_channel = self.shutdown_receiver.clone();
42
+ let acceptor_args = self.acceptor_args.clone();
43
+ self.join_set.spawn(async move {
44
+ let executor = &acceptor_args.strategy.executor;
45
+ let mut serve = Box::pin(executor.serve_connection_with_upgrades(
46
+ io,
47
+ ItsiHttpService {
48
+ inner: Arc::new(ItsiHttpServiceInner {
49
+ acceptor_args: acceptor_args.clone(),
50
+ addr: addr.to_string(),
51
+ }),
52
+ },
53
+ ));
54
+
55
+ tokio::select! {
56
+ // Await the connection finishing naturally.
57
+ res = &mut serve => {
58
+ match res {
59
+ Ok(()) => {
60
+ debug!("Connection closed normally");
61
+ },
62
+ Err(res) => {
63
+ debug!("Connection closed abruptly: {:?}", res);
64
+ }
65
+ }
66
+ serve.as_mut().graceful_shutdown();
67
+ },
68
+ // A lifecycle event triggers shutdown.
69
+ _ = shutdown_channel.changed() => {
70
+ // Initiate graceful shutdown.
71
+ serve.as_mut().graceful_shutdown();
72
+
73
+ // Now await the connection to finish shutting down.
74
+ if let Err(e) = serve.await {
75
+ debug!("Connection shutdown error: {:?}", e);
76
+ }
77
+ }
78
+ }
79
+ });
80
+ }
81
+
82
+ pub async fn join(&mut self) {
83
+ // Join all acceptor tasks with timeout
84
+ let deadline = tokio::time::Instant::now()
85
+ + Duration::from_secs_f64(self.server_params.shutdown_timeout);
86
+ let sleep_until = tokio::time::sleep_until(deadline);
87
+ tokio::select! {
88
+ _ = async {
89
+ while (self.join_set.join_next().await).is_some() {}
90
+ } => {},
91
+ _ = sleep_until => {
92
+ debug!("Shutdown timeout reached; abandoning remaining acceptor tasks.");
93
+ }
94
+ }
95
+ }
96
+ }
@@ -4,6 +4,7 @@ use cluster_mode::ClusterMode;
4
4
  use itsi_error::Result;
5
5
  use single_mode::SingleMode;
6
6
 
7
+ pub mod acceptor;
7
8
  pub mod cluster_mode;
8
9
  pub mod single_mode;
9
10
 
@@ -1,17 +1,15 @@
1
1
  use crate::{
2
2
  ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
3
3
  server::{
4
- binds::listener::ListenerInfo,
5
- io_stream::IoStream,
6
4
  lifecycle_event::LifecycleEvent,
7
5
  request_job::RequestJob,
6
+ serve_strategy::acceptor::{Acceptor, AcceptorArgs},
8
7
  signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
9
8
  thread_worker::{build_thread_workers, ThreadWorker},
10
9
  },
11
- services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
12
10
  };
13
11
  use hyper_util::{
14
- rt::{TokioExecutor, TokioIo, TokioTimer},
12
+ rt::{TokioExecutor, TokioTimer},
15
13
  server::conn::auto::Builder,
16
14
  };
17
15
  use itsi_error::{ItsiError, Result};
@@ -22,13 +20,10 @@ use itsi_tracing::{debug, error, info};
22
20
  use magnus::{value::ReprValue, Value};
23
21
  use nix::unistd::Pid;
24
22
  use parking_lot::RwLock;
23
+ use std::sync::Arc;
25
24
  use std::{
26
25
  collections::HashMap,
27
- pin::Pin,
28
- sync::{
29
- atomic::{AtomicBool, Ordering},
30
- Arc,
31
- },
26
+ sync::atomic::{AtomicBool, Ordering},
32
27
  thread::sleep,
33
28
  time::{Duration, Instant, SystemTime, UNIX_EPOCH},
34
29
  };
@@ -60,8 +55,22 @@ impl SingleMode {
60
55
  #[instrument(parent=None, skip_all)]
61
56
  pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
62
57
  server_config.server_params.read().preload_ruby()?;
58
+ let mut executor = Builder::new(TokioExecutor::new());
59
+ executor
60
+ .http1()
61
+ .header_read_timeout(server_config.server_params.read().header_read_timeout)
62
+ .writev(true)
63
+ .timer(TokioTimer::new());
64
+ executor
65
+ .http2()
66
+ .max_concurrent_streams(100)
67
+ .max_local_error_reset_streams(100)
68
+ .enable_connect_protocol()
69
+ .max_header_list_size(10 * 1024 * 1024)
70
+ .max_send_buf_size(16 * 1024 * 1024);
71
+
63
72
  Ok(Self {
64
- executor: Builder::new(TokioExecutor::new()),
73
+ executor,
65
74
  server_config,
66
75
  lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
67
76
  restart_requested: AtomicBool::new(false),
@@ -82,7 +91,11 @@ impl SingleMode {
82
91
  };
83
92
  builder
84
93
  .thread_name("itsi-server-accept-loop")
85
- .thread_stack_size(3 * 1024 * 1024)
94
+ .thread_stack_size(512 * 1024)
95
+ .max_blocking_threads(4)
96
+ .event_interval(16)
97
+ .global_queue_interval(64)
98
+ .max_io_events_per_tick(256)
86
99
  .enable_all()
87
100
  .build()
88
101
  .expect("Failed to build Tokio runtime")
@@ -214,9 +227,6 @@ impl SingleMode {
214
227
 
215
228
  #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
216
229
  pub fn run(self: Arc<Self>) -> Result<()> {
217
- let mut listener_task_set = JoinSet::new();
218
- let runtime = self.build_runtime();
219
-
220
230
  let (thread_workers, job_sender, nonblocking_sender) =
221
231
  build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
222
232
  .inspect_err(|e| {
@@ -225,11 +235,13 @@ impl SingleMode {
225
235
  }
226
236
  })?;
227
237
 
238
+ let worker_count = thread_workers.len();
228
239
  info!(
229
- threads = thread_workers.len(),
240
+ threads = worker_count,
230
241
  binds = format!("{:?}", self.server_config.server_params.read().binds)
231
242
  );
232
243
 
244
+ let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
233
245
  let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
234
246
  let monitor_thread = self.clone().start_monitors(thread_workers.clone());
235
247
  if monitor_thread.is_none() {
@@ -240,8 +252,10 @@ impl SingleMode {
240
252
  if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
241
253
  return Ok(());
242
254
  }
255
+ let runtime = self.build_runtime();
243
256
  let result = runtime.block_on(
244
257
  async {
258
+ let mut listener_task_set = JoinSet::new();
245
259
  let server_params = self.server_config.server_params.read().clone();
246
260
  if let Err(err) = server_params.initialize_middleware().await {
247
261
  error!("Failed to initialize middleware: {}", err);
@@ -254,80 +268,72 @@ impl SingleMode {
254
268
  })
255
269
  .collect::<Vec<_>>();
256
270
 
257
- for listener in tokio_listeners.iter() {
258
- let mut lifecycle_rx = self.lifecycle_channel.subscribe();
259
-
260
- let listener_info = Arc::new(listener.listener_info());
261
- let self_ref = self.clone();
262
- let listener = listener.clone();
271
+ tokio_listeners.iter().cloned().for_each(|listener| {
263
272
  let shutdown_sender = shutdown_sender.clone();
264
273
  let job_sender = job_sender.clone();
265
274
  let nonblocking_sender = nonblocking_sender.clone();
266
- let workers_clone = thread_workers.clone();
267
- let listener_clone = listener.clone();
275
+
276
+ let mut lifecycle_rx = self.lifecycle_channel.subscribe();
268
277
  let mut shutdown_receiver = shutdown_sender.subscribe();
269
- let shutdown_receiver_clone = shutdown_receiver.clone();
278
+ let mut acceptor = Acceptor{
279
+ acceptor_args: Arc::new(
280
+ AcceptorArgs{
281
+ strategy: self.clone(),
282
+ listener_info: listener.listener_info(),
283
+ shutdown_receiver: shutdown_sender.subscribe(),
284
+ job_sender: job_sender.clone(),
285
+ nonblocking_sender: nonblocking_sender.clone(),
286
+ server_params: server_params.clone()
287
+ }
288
+ ),
289
+ join_set: JoinSet::new()
290
+ };
291
+
292
+ let shutdown_rx_for_acme_task = shutdown_receiver.clone();
293
+ let acme_task_listener_clone = listener.clone();
270
294
  listener_task_set.spawn(async move {
271
- listener_clone.spawn_state_task(shutdown_receiver_clone).await;
295
+ acme_task_listener_clone.spawn_acme_event_task(shutdown_rx_for_acme_task).await;
272
296
  });
273
297
 
274
298
  listener_task_set.spawn(async move {
275
- let strategy_clone = self_ref.clone();
276
- let mut acceptor_task_set = JoinSet::new();
277
- loop {
278
- tokio::select! {
279
- accept_result = listener.accept() => match accept_result {
280
- Ok(accept_result) => {
281
- let strategy = strategy_clone.clone();
282
- let listener_info = listener_info.clone();
283
- let shutdown_receiver = shutdown_receiver.clone();
284
- let job_sender = job_sender.clone();
285
- let nonblocking_sender = nonblocking_sender.clone();
286
- acceptor_task_set.spawn(async move {
287
- strategy.serve_connection(accept_result, job_sender, nonblocking_sender, listener_info, shutdown_receiver).await;
288
- });
299
+ loop {
300
+ tokio::select! {
301
+ accept_result = listener.accept() => {
302
+ match accept_result {
303
+ Ok(accepted) => acceptor.serve_connection(accepted).await,
304
+ Err(e) => debug!("Listener.accept failed: {:?}", e)
305
+ }
289
306
  },
290
- Err(e) => debug!("Listener.accept failed {:?}", e),
291
- },
292
- _ = shutdown_receiver.changed() => {
293
- break;
294
- }
295
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
296
- Ok(LifecycleEvent::Shutdown) => {
297
- debug!("Received lifecycle event: {:?}", lifecycle_event);
298
- shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
299
- tokio::time::sleep(Duration::from_millis(25)).await;
300
- for _i in 0..workers_clone.len() {
301
- job_sender.send(RequestJob::Shutdown).await.unwrap();
302
- nonblocking_sender.send(RequestJob::Shutdown).await.unwrap();
303
- }
304
- break;
307
+ _ = shutdown_receiver.changed() => {
308
+ debug!("Shutdown requested via receiver");
309
+ break;
305
310
  },
306
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
307
- _ => {}
311
+ lifecycle_event = lifecycle_rx.recv() => {
312
+ match lifecycle_event {
313
+ Ok(LifecycleEvent::Shutdown) => {
314
+ debug!("Received LifecycleEvent::Shutdown");
315
+ let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
316
+ for _ in 0..worker_count {
317
+ let _ = job_sender.send(RequestJob::Shutdown).await;
318
+ let _ = nonblocking_sender.send(RequestJob::Shutdown).await;
319
+ }
320
+ break;
321
+ },
322
+ Err(e) => error!("Error receiving lifecycle event: {:?}", e),
323
+ _ => ()
324
+ }
325
+ }
308
326
  }
309
- }
310
- }
311
-
312
- let deadline = Instant::now()
313
- + Duration::from_secs_f64(self_ref.server_config.server_params.read().shutdown_timeout);
314
- tokio::select! {
315
- _ = async {
316
- while let Some(_res) = acceptor_task_set.join_next().await {}
317
- } => {},
318
- _ = tokio::time::sleep_until(tokio::time::Instant::from_std(deadline)) => {},
319
- }
320
- });
321
-
322
- }
327
+ }
328
+ acceptor.join().await;
329
+ });
330
+ });
323
331
 
324
332
  if self.is_single_mode() {
325
333
  self.invoke_hook("after_start");
326
334
  }
327
335
 
328
336
  while let Some(_res) = listener_task_set.join_next().await {}
329
-
330
- // Explicitly drop all listeners to ensure file descriptors are released
331
337
  drop(tokio_listeners);
332
338
 
333
339
  Ok::<(), ItsiError>(())
@@ -346,12 +352,10 @@ impl SingleMode {
346
352
  }
347
353
 
348
354
  shutdown_sender.send(RunningPhase::Shutdown).ok();
349
- let deadline = Instant::now()
350
- + Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
351
-
352
355
  runtime.shutdown_timeout(Duration::from_millis(100));
353
-
354
356
  debug!("Shutdown timeout finished.");
357
+
358
+ let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout);
355
359
  loop {
356
360
  if thread_workers
357
361
  .iter()
@@ -380,66 +384,6 @@ impl SingleMode {
380
384
  pub fn is_single_mode(&self) -> bool {
381
385
  self.server_config.server_params.read().workers == 1
382
386
  }
383
-
384
- pub(crate) async fn serve_connection(
385
- &self,
386
- stream: IoStream,
387
- job_sender: async_channel::Sender<RequestJob>,
388
- nonblocking_sender: async_channel::Sender<RequestJob>,
389
- listener: Arc<ListenerInfo>,
390
- shutdown_channel: watch::Receiver<RunningPhase>,
391
- ) {
392
- let addr = stream.addr();
393
- let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
394
- let executor = self.executor.clone();
395
- let mut shutdown_channel_clone = shutdown_channel.clone();
396
- let mut executor = executor.clone();
397
- let mut binding = executor.http1();
398
- let shutdown_channel = shutdown_channel_clone.clone();
399
-
400
- let service = ItsiHttpService {
401
- inner: Arc::new(ItsiHttpServiceInner {
402
- sender: job_sender.clone(),
403
- nonblocking_sender: nonblocking_sender.clone(),
404
- server_params: self.server_config.server_params.read().clone(),
405
- listener,
406
- addr: addr.to_string(),
407
- shutdown_channel: shutdown_channel.clone(),
408
- }),
409
- };
410
- let mut serve = Box::pin(
411
- binding
412
- .timer(TokioTimer::new())
413
- .header_read_timeout(self.server_config.server_params.read().header_read_timeout)
414
- .serve_connection_with_upgrades(io, service),
415
- );
416
-
417
- tokio::select! {
418
- // Await the connection finishing naturally.
419
- res = &mut serve => {
420
- match res{
421
- Ok(()) => {
422
- debug!("Connection closed normally")
423
- },
424
- Err(res) => {
425
- debug!("Connection closed abruptly: {:?}", res)
426
- }
427
- }
428
- serve.as_mut().graceful_shutdown();
429
- },
430
- // A lifecycle event triggers shutdown.
431
- _ = shutdown_channel_clone.changed() => {
432
- // Initiate graceful shutdown.
433
- serve.as_mut().graceful_shutdown();
434
-
435
- // Now await the connection to finish shutting down.
436
- if let Err(e) = serve.await {
437
- debug!("Connection shutdown error: {:?}", e);
438
- }
439
- }
440
- }
441
- }
442
-
443
387
  /// Attempts to reload the config "live"
444
388
  /// Not that when running in single mode this will not unload
445
389
  /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
@@ -23,9 +23,12 @@ use std::{
23
23
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
24
24
  use tracing::instrument;
25
25
 
26
- use crate::ruby_types::{
27
- itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
28
- itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
26
+ use crate::{
27
+ ruby_types::{
28
+ itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
29
+ itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
30
+ },
31
+ server::process_worker::CORE_IDS,
29
32
  };
30
33
 
31
34
  use super::request_job::RequestJob;
@@ -184,9 +187,13 @@ impl ThreadWorker {
184
187
  let scheduler_class = self.scheduler_class;
185
188
  let params = self.params.clone();
186
189
  let self_ref = self.clone();
190
+ let id = self.id;
187
191
  call_with_gvl(|_| {
188
192
  *self.thread.write() = Some(
189
193
  create_ruby_thread(move || {
194
+ if params.pin_worker_cores {
195
+ core_affinity::set_for_current(CORE_IDS[(id as usize) % CORE_IDS.len()]);
196
+ }
190
197
  debug!("Ruby thread worker started");
191
198
  if let Some(scheduler_class) = scheduler_class {
192
199
  if let Err(err) = self_ref.fiber_accept_loop(