itsi-scheduler 0.2.16 → 0.2.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +1 -1
  3. data/ext/itsi_acme/Cargo.toml +1 -1
  4. data/ext/itsi_scheduler/Cargo.toml +1 -1
  5. data/ext/itsi_server/Cargo.toml +3 -1
  6. data/ext/itsi_server/src/lib.rs +6 -1
  7. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
  8. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +4 -4
  9. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
  10. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +64 -33
  11. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
  12. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +422 -110
  13. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +62 -15
  14. data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
  15. data/ext/itsi_server/src/server/binds/listener.rs +45 -7
  16. data/ext/itsi_server/src/server/frame_stream.rs +142 -0
  17. data/ext/itsi_server/src/server/http_message_types.rs +142 -9
  18. data/ext/itsi_server/src/server/io_stream.rs +28 -5
  19. data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
  20. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
  21. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
  22. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
  23. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
  24. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -56
  25. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +5 -7
  26. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +5 -5
  27. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +7 -10
  28. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
  29. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
  30. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +4 -6
  31. data/ext/itsi_server/src/server/mod.rs +1 -0
  32. data/ext/itsi_server/src/server/process_worker.rs +3 -4
  33. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +16 -12
  34. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +83 -31
  35. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +166 -142
  36. data/ext/itsi_server/src/server/signal.rs +37 -9
  37. data/ext/itsi_server/src/server/thread_worker.rs +84 -69
  38. data/ext/itsi_server/src/services/itsi_http_service.rs +43 -43
  39. data/ext/itsi_server/src/services/static_file_server.rs +28 -47
  40. data/lib/itsi/scheduler/version.rb +1 -1
  41. metadata +2 -1
@@ -1,22 +1,50 @@
1
- use std::sync::{
2
- atomic::{AtomicBool, AtomicI8},
3
- LazyLock,
1
+ use std::{
2
+ collections::VecDeque,
3
+ sync::atomic::{AtomicBool, AtomicI8},
4
4
  };
5
5
 
6
6
  use nix::libc::{self, sighandler_t};
7
- use tokio::sync::{self, broadcast};
7
+ use parking_lot::Mutex;
8
+ use tokio::sync::broadcast;
8
9
 
9
10
  use super::lifecycle_event::LifecycleEvent;
10
11
 
11
12
  pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
12
13
  pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
13
- pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
14
- broadcast::Sender<LifecycleEvent>,
15
- broadcast::Receiver<LifecycleEvent>,
16
- )> = LazyLock::new(|| sync::broadcast::channel(5));
14
+ pub static SIGNAL_HANDLER_CHANNEL: Mutex<Option<broadcast::Sender<LifecycleEvent>>> =
15
+ Mutex::new(None);
16
+
17
+ pub static PENDING_QUEUE: Mutex<VecDeque<LifecycleEvent>> = Mutex::new(VecDeque::new());
18
+
19
+ pub fn subscribe_runtime_to_signals() -> broadcast::Receiver<LifecycleEvent> {
20
+ let mut guard = SIGNAL_HANDLER_CHANNEL.lock();
21
+ if let Some(sender) = guard.as_ref() {
22
+ return sender.subscribe();
23
+ }
24
+ let (sender, receiver) = broadcast::channel(5);
25
+ let sender_clone = sender.clone();
26
+ std::thread::spawn(move || {
27
+ std::thread::sleep(std::time::Duration::from_millis(50));
28
+ for event in PENDING_QUEUE.lock().drain(..) {
29
+ sender_clone.send(event).ok();
30
+ }
31
+ });
32
+
33
+ guard.replace(sender);
34
+
35
+ receiver
36
+ }
37
+
38
+ pub fn unsubscribe_runtime() {
39
+ SIGNAL_HANDLER_CHANNEL.lock().take();
40
+ }
17
41
 
18
42
  pub fn send_lifecycle_event(event: LifecycleEvent) {
19
- SIGNAL_HANDLER_CHANNEL.0.send(event).ok();
43
+ if let Some(sender) = SIGNAL_HANDLER_CHANNEL.lock().as_ref() {
44
+ sender.send(event).ok();
45
+ } else {
46
+ PENDING_QUEUE.lock().push_back(event);
47
+ }
20
48
  }
21
49
 
22
50
  fn receive_signal(signum: i32, _: sighandler_t) {
@@ -3,13 +3,12 @@ use itsi_error::ItsiError;
3
3
  use itsi_rb_helpers::{
4
4
  call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
5
5
  };
6
- use itsi_tracing::{debug, error, warn};
6
+ use itsi_tracing::{debug, error};
7
7
  use magnus::{
8
8
  error::Result,
9
9
  value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
10
10
  Module, RClass, Ruby, Thread, Value,
11
11
  };
12
- use nix::unistd::Pid;
13
12
  use parking_lot::{Mutex, RwLock};
14
13
  use std::{
15
14
  ops::Deref,
@@ -17,8 +16,7 @@ use std::{
17
16
  atomic::{AtomicBool, AtomicU64, Ordering},
18
17
  Arc,
19
18
  },
20
- thread,
21
- time::{Duration, Instant, SystemTime, UNIX_EPOCH},
19
+ time::{Instant, SystemTime, UNIX_EPOCH},
22
20
  };
23
21
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
24
22
  use tracing::instrument;
@@ -35,7 +33,7 @@ use super::request_job::RequestJob;
35
33
  pub struct ThreadWorker {
36
34
  pub params: Arc<ServerParams>,
37
35
  pub id: u8,
38
- pub name: String,
36
+ pub worker_id: usize,
39
37
  pub request_id: AtomicU64,
40
38
  pub current_request_start: AtomicU64,
41
39
  pub receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -64,8 +62,11 @@ type ThreadWorkerBuildResult = Result<(
64
62
  Sender<RequestJob>,
65
63
  )>;
66
64
 
67
- #[instrument(name = "boot", parent=None, skip(params, pid))]
68
- pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorkerBuildResult {
65
+ #[instrument(name = "boot", parent=None, skip(params, worker_id))]
66
+ pub fn build_thread_workers(
67
+ params: Arc<ServerParams>,
68
+ worker_id: usize,
69
+ ) -> ThreadWorkerBuildResult {
69
70
  let blocking_thread_count = params.threads;
70
71
  let nonblocking_thread_count = params.scheduler_threads;
71
72
  let ruby_thread_request_backlog_size: usize = params
@@ -83,7 +84,7 @@ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorker
83
84
  ThreadWorker::new(
84
85
  params.clone(),
85
86
  id,
86
- format!("{:?}#{:?}", pid, id),
87
+ worker_id,
87
88
  blocking_receiver_ref.clone(),
88
89
  blocking_sender_ref.clone(),
89
90
  if nonblocking_thread_count.is_some() {
@@ -106,7 +107,7 @@ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorker
106
107
  workers.push(ThreadWorker::new(
107
108
  params.clone(),
108
109
  id,
109
- format!("{:?}#{:?}", pid, id),
110
+ worker_id,
110
111
  nonblocking_receiver_ref.clone(),
111
112
  nonblocking_sender_ref.clone(),
112
113
  Some(scheduler_class),
@@ -141,7 +142,7 @@ impl ThreadWorker {
141
142
  pub fn new(
142
143
  params: Arc<ServerParams>,
143
144
  id: u8,
144
- name: String,
145
+ worker_id: usize,
145
146
  receiver: Arc<async_channel::Receiver<RequestJob>>,
146
147
  sender: Sender<RequestJob>,
147
148
  scheduler_class: Option<Opaque<Value>>,
@@ -149,9 +150,9 @@ impl ThreadWorker {
149
150
  let worker = Arc::new(Self {
150
151
  params,
151
152
  id,
153
+ worker_id,
152
154
  request_id: AtomicU64::new(0),
153
155
  current_request_start: AtomicU64::new(0),
154
- name,
155
156
  receiver,
156
157
  sender,
157
158
  thread: RwLock::new(None),
@@ -181,24 +182,24 @@ impl ThreadWorker {
181
182
  }
182
183
 
183
184
  pub fn run(self: Arc<Self>) -> Result<()> {
184
- let name = self.name.clone();
185
185
  let receiver = self.receiver.clone();
186
186
  let terminated = self.terminated.clone();
187
187
  let scheduler_class = self.scheduler_class;
188
188
  let params = self.params.clone();
189
189
  let self_ref = self.clone();
190
- let id = self.id;
190
+ let worker_id = self.worker_id;
191
191
  call_with_gvl(|_| {
192
192
  *self.thread.write() = Some(
193
193
  create_ruby_thread(move || {
194
194
  if params.pin_worker_cores {
195
- core_affinity::set_for_current(CORE_IDS[(id as usize) % CORE_IDS.len()]);
195
+ core_affinity::set_for_current(
196
+ CORE_IDS[((2 * worker_id) + 1) % CORE_IDS.len()],
197
+ );
196
198
  }
197
199
  debug!("Ruby thread worker started");
198
200
  if let Some(scheduler_class) = scheduler_class {
199
201
  if let Err(err) = self_ref.fiber_accept_loop(
200
202
  params,
201
- name,
202
203
  receiver,
203
204
  scheduler_class,
204
205
  terminated,
@@ -206,7 +207,7 @@ impl ThreadWorker {
206
207
  error!("Error in fiber_accept_loop: {:?}", err);
207
208
  }
208
209
  } else {
209
- self_ref.accept_loop(params, name, receiver, terminated);
210
+ self_ref.accept_loop(params, receiver, terminated);
210
211
  }
211
212
  })
212
213
  .ok_or_else(|| {
@@ -262,9 +263,14 @@ impl ThreadWorker {
262
263
  }
263
264
  }
264
265
  }
266
+
265
267
  for _ in 0..MAX_BATCH_SIZE {
266
268
  if let Ok(req) = receiver.try_recv() {
269
+ let should_break = matches!(req, RequestJob::Shutdown);
267
270
  batch.push(req);
271
+ if should_break {
272
+ break;
273
+ }
268
274
  } else {
269
275
  break;
270
276
  }
@@ -307,7 +313,9 @@ impl ThreadWorker {
307
313
  ItsiGrpcCall::internal_error(ruby, response, err)
308
314
  }
309
315
  }
310
- RequestJob::Shutdown => return true,
316
+ RequestJob::Shutdown => {
317
+ return true;
318
+ }
311
319
  }
312
320
  }
313
321
  false
@@ -339,15 +347,14 @@ impl ThreadWorker {
339
347
  if yield_result.is_err() {
340
348
  break;
341
349
  }
342
- })
350
+ });
343
351
  })
344
352
  }
345
353
 
346
- #[instrument(skip_all, fields(thread_worker=name))]
354
+ #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))]
347
355
  pub fn fiber_accept_loop(
348
356
  self: Arc<Self>,
349
357
  params: Arc<ServerParams>,
350
- name: String,
351
358
  receiver: Arc<async_channel::Receiver<RequestJob>>,
352
359
  scheduler_class: Opaque<Value>,
353
360
  terminated: Arc<AtomicBool>,
@@ -422,68 +429,76 @@ impl ThreadWorker {
422
429
  });
423
430
  }
424
431
 
425
- #[instrument(skip_all, fields(thread_worker=id))]
432
+ #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))]
426
433
  pub fn accept_loop(
427
434
  self: Arc<Self>,
428
435
  params: Arc<ServerParams>,
429
- id: String,
430
436
  receiver: Arc<async_channel::Receiver<RequestJob>>,
431
437
  terminated: Arc<AtomicBool>,
432
438
  ) {
433
- let ruby = Ruby::get().unwrap();
434
439
  let mut idle_counter = 0;
435
- let self_ref = self.clone();
436
440
  call_without_gvl(|| loop {
437
- if receiver.is_empty() {
438
- if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
439
- idle_counter = (idle_counter + 1) % oob_gc_threshold;
440
- if idle_counter == 0 {
441
- call_with_gvl(|_ruby| {
442
- ruby.gc_start();
443
- });
444
- }
445
- };
446
- }
447
441
  match receiver.recv_blocking() {
448
- Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
449
- self_ref.request_id.fetch_add(1, Ordering::Relaxed);
450
- self_ref.current_request_start.store(
451
- SystemTime::now()
452
- .duration_since(UNIX_EPOCH)
453
- .unwrap()
454
- .as_secs(),
455
- Ordering::Relaxed,
456
- );
457
- call_with_gvl(|_ruby| {
458
- request.process(&ruby, app_proc).ok();
459
- });
460
- if terminated.load(Ordering::Relaxed) {
461
- break;
442
+ Err(_) => break,
443
+ Ok(RequestJob::Shutdown) => break,
444
+ Ok(request_job) => call_with_gvl(|ruby| {
445
+ self.process_one(&ruby, request_job, &terminated);
446
+ while let Ok(request_job) = receiver.try_recv() {
447
+ if matches!(request_job, RequestJob::Shutdown) {
448
+ terminated.store(true, Ordering::Relaxed);
449
+ break;
450
+ }
451
+ self.process_one(&ruby, request_job, &terminated);
462
452
  }
463
- }
464
- Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
465
- self_ref.request_id.fetch_add(1, Ordering::Relaxed);
466
- self_ref.current_request_start.store(
467
- SystemTime::now()
468
- .duration_since(UNIX_EPOCH)
469
- .unwrap()
470
- .as_secs(),
471
- Ordering::Relaxed,
472
- );
473
- call_with_gvl(|_ruby| {
474
- request.process(&ruby, app_proc).ok();
475
- });
476
- if terminated.load(Ordering::Relaxed) {
477
- break;
453
+ if let Some(thresh) = params.oob_gc_responses_threshold {
454
+ idle_counter = (idle_counter + 1) % thresh;
455
+ if idle_counter == 0 {
456
+ ruby.gc_start();
457
+ }
478
458
  }
459
+ }),
460
+ };
461
+ if terminated.load(Ordering::Relaxed) {
462
+ break;
463
+ }
464
+ });
465
+ }
466
+
467
+ fn process_one(self: &Arc<Self>, ruby: &Ruby, job: RequestJob, terminated: &Arc<AtomicBool>) {
468
+ match job {
469
+ RequestJob::ProcessHttpRequest(request, app_proc) => {
470
+ if terminated.load(Ordering::Relaxed) {
471
+ request.response().unwrap().service_unavailable();
472
+ return;
479
473
  }
480
- Ok(RequestJob::Shutdown) => {
481
- break;
482
- }
483
- Err(_) => {
484
- thread::sleep(Duration::from_micros(1));
474
+ self.request_id.fetch_add(1, Ordering::Relaxed);
475
+ self.current_request_start.store(
476
+ SystemTime::now()
477
+ .duration_since(UNIX_EPOCH)
478
+ .unwrap()
479
+ .as_secs(),
480
+ Ordering::Relaxed,
481
+ );
482
+ request.process(ruby, app_proc).ok();
483
+ }
484
+
485
+ RequestJob::ProcessGrpcRequest(request, app_proc) => {
486
+ if terminated.load(Ordering::Relaxed) {
487
+ request.stream().unwrap().close().ok();
488
+ return;
485
489
  }
490
+ self.request_id.fetch_add(1, Ordering::Relaxed);
491
+ self.current_request_start.store(
492
+ SystemTime::now()
493
+ .duration_since(UNIX_EPOCH)
494
+ .unwrap()
495
+ .as_secs(),
496
+ Ordering::Relaxed,
497
+ );
498
+ request.process(ruby, app_proc).ok();
486
499
  }
487
- });
500
+
501
+ RequestJob::Shutdown => unreachable!(),
502
+ }
488
503
  }
489
504
  }
@@ -6,22 +6,20 @@ use crate::server::http_message_types::{
6
6
  use crate::server::lifecycle_event::LifecycleEvent;
7
7
  use crate::server::middleware_stack::MiddlewareLayer;
8
8
  use crate::server::serve_strategy::acceptor::AcceptorArgs;
9
- use crate::server::signal::send_lifecycle_event;
9
+ use crate::server::signal::{send_lifecycle_event, SHUTDOWN_REQUESTED};
10
10
  use chrono::{self, DateTime, Local};
11
11
  use either::Either;
12
12
  use http::header::ACCEPT_ENCODING;
13
13
  use http::{HeaderValue, Request};
14
14
  use hyper::body::Incoming;
15
- use hyper::service::Service;
16
- use itsi_error::ItsiError;
17
15
  use regex::Regex;
16
+ use smallvec::SmallVec;
17
+ use std::ops::Deref;
18
18
  use std::sync::atomic::{AtomicBool, Ordering};
19
- use std::sync::OnceLock;
19
+ use std::sync::{Arc, OnceLock};
20
20
  use std::time::{Duration, Instant};
21
- use tracing::error;
22
-
23
- use std::{future::Future, ops::Deref, pin::Pin, sync::Arc};
24
21
  use tokio::time::timeout;
22
+ use tracing::error;
25
23
 
26
24
  #[derive(Clone)]
27
25
  pub struct ItsiHttpService {
@@ -80,12 +78,14 @@ pub struct RequestContextInner {
80
78
  pub request_start_time: OnceLock<DateTime<Local>>,
81
79
  pub start_instant: Instant,
82
80
  pub if_none_match: OnceLock<Option<String>>,
83
- pub supported_encoding_set: OnceLock<Vec<HeaderValue>>,
81
+ pub supported_encoding_set: OnceLock<AcceptEncodingSet>,
84
82
  pub is_ruby_request: Arc<AtomicBool>,
85
83
  }
86
84
 
85
+ type AcceptEncodingSet = SmallVec<[HeaderValue; 2]>;
86
+
87
87
  impl HttpRequestContext {
88
- fn new(
88
+ pub fn new(
89
89
  service: ItsiHttpService,
90
90
  matching_pattern: Option<Arc<Regex>>,
91
91
  accept: ResponseFormat,
@@ -109,12 +109,14 @@ impl HttpRequestContext {
109
109
  }
110
110
 
111
111
  pub fn set_supported_encoding_set(&self, req: &HttpRequest) {
112
- self.inner.supported_encoding_set.get_or_init(move || {
113
- req.headers()
114
- .get_all(ACCEPT_ENCODING)
115
- .into_iter()
116
- .cloned()
117
- .collect::<Vec<_>>()
112
+ self.inner.supported_encoding_set.get_or_init(|| {
113
+ let mut set: AcceptEncodingSet = SmallVec::new();
114
+
115
+ for hv in req.headers().get_all(ACCEPT_ENCODING) {
116
+ set.push(hv.clone()); // clone ≈ 16 B struct copy
117
+ }
118
+
119
+ set
118
120
  });
119
121
  }
120
122
 
@@ -164,7 +166,7 @@ impl HttpRequestContext {
164
166
  self.inner.response_format.get().unwrap()
165
167
  }
166
168
 
167
- pub fn supported_encoding_set(&self) -> Option<&Vec<HeaderValue>> {
169
+ pub fn supported_encoding_set(&self) -> Option<&AcceptEncodingSet> {
168
170
  self.inner.supported_encoding_set.get()
169
171
  }
170
172
  }
@@ -173,13 +175,8 @@ const SERVER_TOKEN_VERSION: HeaderValue =
173
175
  HeaderValue::from_static(concat!("Itsi/", env!("CARGO_PKG_VERSION")));
174
176
  const SERVER_TOKEN_NAME: HeaderValue = HeaderValue::from_static("Itsi");
175
177
 
176
- impl Service<Request<Incoming>> for ItsiHttpService {
177
- type Response = HttpResponse;
178
- type Error = ItsiError;
179
- type Future = Pin<Box<dyn Future<Output = itsi_error::Result<HttpResponse>> + Send>>;
180
-
181
- fn call(&self, req: Request<Incoming>) -> Self::Future {
182
- let self_clone = self.clone();
178
+ impl ItsiHttpService {
179
+ pub async fn handle_request(&self, req: Request<Incoming>) -> itsi_error::Result<HttpResponse> {
183
180
  let mut req = req.limit();
184
181
  let accept: ResponseFormat = req.accept().into();
185
182
  let is_single_mode = self.server_params.workers == 1;
@@ -191,7 +188,7 @@ impl Service<Request<Incoming>> for ItsiHttpService {
191
188
  let token_preference = self.server_params.itsi_server_token_preference;
192
189
 
193
190
  let service_future = async move {
194
- let middleware_stack = self_clone
191
+ let middleware_stack = self
195
192
  .server_params
196
193
  .middleware
197
194
  .get()
@@ -202,7 +199,7 @@ impl Service<Request<Incoming>> for ItsiHttpService {
202
199
  let mut resp: Option<HttpResponse> = None;
203
200
 
204
201
  let mut context =
205
- HttpRequestContext::new(self_clone.clone(), matching_pattern, accept, irr_clone);
202
+ HttpRequestContext::new(self.clone(), matching_pattern, accept, irr_clone);
206
203
  let mut depth = 0;
207
204
 
208
205
  for (index, elm) in stack.iter().enumerate() {
@@ -243,28 +240,31 @@ impl Service<Request<Incoming>> for ItsiHttpService {
243
240
  };
244
241
 
245
242
  if let Some(timeout_duration) = request_timeout {
246
- Box::pin(async move {
247
- match timeout(timeout_duration, service_future).await {
248
- Ok(result) => result,
249
- Err(_) => {
250
- // If we're still running Ruby at this point, we can't just kill the
251
- // thread as it might be in a critical section.
252
- // Instead we must ask the worker to hot restart.
253
- if is_ruby_request.load(Ordering::Relaxed) {
254
- if is_single_mode {
255
- // If we're in single mode, re-exec the whole process
256
- send_lifecycle_event(LifecycleEvent::Restart);
257
- } else {
258
- // Otherwise we can shutdown the worker and rely on the master to restart it
259
- send_lifecycle_event(LifecycleEvent::Shutdown);
260
- }
243
+ match timeout(timeout_duration, service_future).await {
244
+ Ok(result) => result,
245
+ Err(_) => {
246
+ // If we're still running Ruby at this point, we can't just kill the
247
+ // thread as it might be in a critical section.
248
+ // Instead we must ask the worker to hot restart.
249
+ // But only if we're not already shutting down
250
+ if is_ruby_request.load(Ordering::Relaxed)
251
+ && !SHUTDOWN_REQUESTED.load(Ordering::SeqCst)
252
+ {
253
+ // When we've detected a timeout, use the safer send_lifecycle_event
254
+ // which will properly handle signal-safe state transitions
255
+ if is_single_mode {
256
+ // If we're in single mode, re-exec the whole process
257
+ send_lifecycle_event(LifecycleEvent::Restart);
258
+ } else {
259
+ // Otherwise we can shutdown the worker and rely on the master to restart it
260
+ send_lifecycle_event(LifecycleEvent::Shutdown);
261
261
  }
262
- Ok(TIMEOUT_RESPONSE.to_http_response(accept).await)
263
262
  }
263
+ Ok(TIMEOUT_RESPONSE.to_http_response(accept).await)
264
264
  }
265
- })
265
+ }
266
266
  } else {
267
- Box::pin(service_future)
267
+ service_future.await
268
268
  }
269
269
  }
270
270
  }