itsi-server 0.2.15 → 0.2.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +75 -73
  3. data/exe/itsi +6 -1
  4. data/ext/itsi_acme/Cargo.toml +1 -1
  5. data/ext/itsi_scheduler/Cargo.toml +1 -1
  6. data/ext/itsi_server/Cargo.lock +1 -1
  7. data/ext/itsi_server/Cargo.toml +3 -1
  8. data/ext/itsi_server/extconf.rb +3 -1
  9. data/ext/itsi_server/src/lib.rs +7 -1
  10. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
  11. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +6 -6
  12. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
  13. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +71 -42
  14. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
  15. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +6 -15
  16. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +32 -6
  17. data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
  18. data/ext/itsi_server/src/server/binds/listener.rs +49 -8
  19. data/ext/itsi_server/src/server/frame_stream.rs +142 -0
  20. data/ext/itsi_server/src/server/http_message_types.rs +143 -10
  21. data/ext/itsi_server/src/server/io_stream.rs +28 -5
  22. data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
  23. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
  24. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
  25. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
  26. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
  27. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -58
  28. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +6 -9
  29. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +27 -42
  30. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +65 -14
  31. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +1 -1
  32. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +8 -11
  33. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +21 -8
  34. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
  35. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +1 -5
  36. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
  37. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +13 -6
  38. data/ext/itsi_server/src/server/mod.rs +1 -0
  39. data/ext/itsi_server/src/server/process_worker.rs +5 -5
  40. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +100 -0
  41. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +87 -31
  42. data/ext/itsi_server/src/server/serve_strategy/mod.rs +1 -0
  43. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +166 -206
  44. data/ext/itsi_server/src/server/signal.rs +37 -9
  45. data/ext/itsi_server/src/server/thread_worker.rs +92 -70
  46. data/ext/itsi_server/src/services/itsi_http_service.rs +67 -62
  47. data/ext/itsi_server/src/services/mime_types.rs +185 -183
  48. data/ext/itsi_server/src/services/rate_limiter.rs +16 -34
  49. data/ext/itsi_server/src/services/static_file_server.rs +35 -60
  50. data/lib/itsi/http_request.rb +31 -39
  51. data/lib/itsi/http_response.rb +5 -0
  52. data/lib/itsi/rack_env_pool.rb +59 -0
  53. data/lib/itsi/server/config/config_helpers.rb +1 -2
  54. data/lib/itsi/server/config/dsl.rb +5 -4
  55. data/lib/itsi/server/config/middleware/etag.md +3 -7
  56. data/lib/itsi/server/config/middleware/etag.rb +2 -4
  57. data/lib/itsi/server/config/middleware/proxy.rb +1 -1
  58. data/lib/itsi/server/config/middleware/rackup_file.rb +2 -2
  59. data/lib/itsi/server/config/options/auto_reload_config.rb +6 -2
  60. data/lib/itsi/server/config/options/include.rb +5 -2
  61. data/lib/itsi/server/config/options/listen_backlog.rb +1 -1
  62. data/lib/itsi/server/config/options/pipeline_flush.md +16 -0
  63. data/lib/itsi/server/config/options/pipeline_flush.rb +19 -0
  64. data/lib/itsi/server/config/options/send_buffer_size.md +15 -0
  65. data/lib/itsi/server/config/options/send_buffer_size.rb +19 -0
  66. data/lib/itsi/server/config/options/writev.md +25 -0
  67. data/lib/itsi/server/config/options/writev.rb +19 -0
  68. data/lib/itsi/server/config.rb +43 -31
  69. data/lib/itsi/server/default_config/Itsi.rb +1 -4
  70. data/lib/itsi/server/grpc/grpc_call.rb +2 -0
  71. data/lib/itsi/server/grpc/grpc_interface.rb +2 -2
  72. data/lib/itsi/server/rack/handler/itsi.rb +3 -1
  73. data/lib/itsi/server/rack_interface.rb +17 -12
  74. data/lib/itsi/server/route_tester.rb +1 -1
  75. data/lib/itsi/server/scheduler_interface.rb +2 -0
  76. data/lib/itsi/server/version.rb +1 -1
  77. data/lib/itsi/server.rb +1 -0
  78. data/lib/ruby_lsp/itsi/addon.rb +12 -13
  79. metadata +10 -1
@@ -3,13 +3,12 @@ use itsi_error::ItsiError;
3
3
  use itsi_rb_helpers::{
4
4
  call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
5
5
  };
6
- use itsi_tracing::{debug, error, warn};
6
+ use itsi_tracing::{debug, error};
7
7
  use magnus::{
8
8
  error::Result,
9
9
  value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
10
10
  Module, RClass, Ruby, Thread, Value,
11
11
  };
12
- use nix::unistd::Pid;
13
12
  use parking_lot::{Mutex, RwLock};
14
13
  use std::{
15
14
  ops::Deref,
@@ -17,22 +16,24 @@ use std::{
17
16
  atomic::{AtomicBool, AtomicU64, Ordering},
18
17
  Arc,
19
18
  },
20
- thread,
21
- time::{Duration, Instant, SystemTime, UNIX_EPOCH},
19
+ time::{Instant, SystemTime, UNIX_EPOCH},
22
20
  };
23
21
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
24
22
  use tracing::instrument;
25
23
 
26
- use crate::ruby_types::{
27
- itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
28
- itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
24
+ use crate::{
25
+ ruby_types::{
26
+ itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
27
+ itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
28
+ },
29
+ server::process_worker::CORE_IDS,
29
30
  };
30
31
 
31
32
  use super::request_job::RequestJob;
32
33
  pub struct ThreadWorker {
33
34
  pub params: Arc<ServerParams>,
34
35
  pub id: u8,
35
- pub name: String,
36
+ pub worker_id: usize,
36
37
  pub request_id: AtomicU64,
37
38
  pub current_request_start: AtomicU64,
38
39
  pub receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -61,8 +62,11 @@ type ThreadWorkerBuildResult = Result<(
61
62
  Sender<RequestJob>,
62
63
  )>;
63
64
 
64
- #[instrument(name = "boot", parent=None, skip(params, pid))]
65
- pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorkerBuildResult {
65
+ #[instrument(name = "boot", parent=None, skip(params, worker_id))]
66
+ pub fn build_thread_workers(
67
+ params: Arc<ServerParams>,
68
+ worker_id: usize,
69
+ ) -> ThreadWorkerBuildResult {
66
70
  let blocking_thread_count = params.threads;
67
71
  let nonblocking_thread_count = params.scheduler_threads;
68
72
  let ruby_thread_request_backlog_size: usize = params
@@ -80,7 +84,7 @@ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorker
80
84
  ThreadWorker::new(
81
85
  params.clone(),
82
86
  id,
83
- format!("{:?}#{:?}", pid, id),
87
+ worker_id,
84
88
  blocking_receiver_ref.clone(),
85
89
  blocking_sender_ref.clone(),
86
90
  if nonblocking_thread_count.is_some() {
@@ -103,7 +107,7 @@ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorker
103
107
  workers.push(ThreadWorker::new(
104
108
  params.clone(),
105
109
  id,
106
- format!("{:?}#{:?}", pid, id),
110
+ worker_id,
107
111
  nonblocking_receiver_ref.clone(),
108
112
  nonblocking_sender_ref.clone(),
109
113
  Some(scheduler_class),
@@ -138,7 +142,7 @@ impl ThreadWorker {
138
142
  pub fn new(
139
143
  params: Arc<ServerParams>,
140
144
  id: u8,
141
- name: String,
145
+ worker_id: usize,
142
146
  receiver: Arc<async_channel::Receiver<RequestJob>>,
143
147
  sender: Sender<RequestJob>,
144
148
  scheduler_class: Option<Opaque<Value>>,
@@ -146,9 +150,9 @@ impl ThreadWorker {
146
150
  let worker = Arc::new(Self {
147
151
  params,
148
152
  id,
153
+ worker_id,
149
154
  request_id: AtomicU64::new(0),
150
155
  current_request_start: AtomicU64::new(0),
151
- name,
152
156
  receiver,
153
157
  sender,
154
158
  thread: RwLock::new(None),
@@ -178,20 +182,24 @@ impl ThreadWorker {
178
182
  }
179
183
 
180
184
  pub fn run(self: Arc<Self>) -> Result<()> {
181
- let name = self.name.clone();
182
185
  let receiver = self.receiver.clone();
183
186
  let terminated = self.terminated.clone();
184
187
  let scheduler_class = self.scheduler_class;
185
188
  let params = self.params.clone();
186
189
  let self_ref = self.clone();
190
+ let worker_id = self.worker_id;
187
191
  call_with_gvl(|_| {
188
192
  *self.thread.write() = Some(
189
193
  create_ruby_thread(move || {
194
+ if params.pin_worker_cores {
195
+ core_affinity::set_for_current(
196
+ CORE_IDS[((2 * worker_id) + 1) % CORE_IDS.len()],
197
+ );
198
+ }
190
199
  debug!("Ruby thread worker started");
191
200
  if let Some(scheduler_class) = scheduler_class {
192
201
  if let Err(err) = self_ref.fiber_accept_loop(
193
202
  params,
194
- name,
195
203
  receiver,
196
204
  scheduler_class,
197
205
  terminated,
@@ -199,7 +207,7 @@ impl ThreadWorker {
199
207
  error!("Error in fiber_accept_loop: {:?}", err);
200
208
  }
201
209
  } else {
202
- self_ref.accept_loop(params, name, receiver, terminated);
210
+ self_ref.accept_loop(params, receiver, terminated);
203
211
  }
204
212
  })
205
213
  .ok_or_else(|| {
@@ -255,9 +263,14 @@ impl ThreadWorker {
255
263
  }
256
264
  }
257
265
  }
266
+
258
267
  for _ in 0..MAX_BATCH_SIZE {
259
268
  if let Ok(req) = receiver.try_recv() {
269
+ let should_break = matches!(req, RequestJob::Shutdown);
260
270
  batch.push(req);
271
+ if should_break {
272
+ break;
273
+ }
261
274
  } else {
262
275
  break;
263
276
  }
@@ -300,7 +313,9 @@ impl ThreadWorker {
300
313
  ItsiGrpcCall::internal_error(ruby, response, err)
301
314
  }
302
315
  }
303
- RequestJob::Shutdown => return true,
316
+ RequestJob::Shutdown => {
317
+ return true;
318
+ }
304
319
  }
305
320
  }
306
321
  false
@@ -332,15 +347,14 @@ impl ThreadWorker {
332
347
  if yield_result.is_err() {
333
348
  break;
334
349
  }
335
- })
350
+ });
336
351
  })
337
352
  }
338
353
 
339
- #[instrument(skip_all, fields(thread_worker=name))]
354
+ #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))]
340
355
  pub fn fiber_accept_loop(
341
356
  self: Arc<Self>,
342
357
  params: Arc<ServerParams>,
343
- name: String,
344
358
  receiver: Arc<async_channel::Receiver<RequestJob>>,
345
359
  scheduler_class: Opaque<Value>,
346
360
  terminated: Arc<AtomicBool>,
@@ -415,68 +429,76 @@ impl ThreadWorker {
415
429
  });
416
430
  }
417
431
 
418
- #[instrument(skip_all, fields(thread_worker=id))]
432
+ #[instrument(skip_all, fields(thread_worker=format!("{}:{}", self.id, self.worker_id)))]
419
433
  pub fn accept_loop(
420
434
  self: Arc<Self>,
421
435
  params: Arc<ServerParams>,
422
- id: String,
423
436
  receiver: Arc<async_channel::Receiver<RequestJob>>,
424
437
  terminated: Arc<AtomicBool>,
425
438
  ) {
426
- let ruby = Ruby::get().unwrap();
427
439
  let mut idle_counter = 0;
428
- let self_ref = self.clone();
429
440
  call_without_gvl(|| loop {
430
- if receiver.is_empty() {
431
- if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
432
- idle_counter = (idle_counter + 1) % oob_gc_threshold;
433
- if idle_counter == 0 {
434
- call_with_gvl(|_ruby| {
435
- ruby.gc_start();
436
- });
437
- }
438
- };
439
- }
440
441
  match receiver.recv_blocking() {
441
- Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
442
- self_ref.request_id.fetch_add(1, Ordering::Relaxed);
443
- self_ref.current_request_start.store(
444
- SystemTime::now()
445
- .duration_since(UNIX_EPOCH)
446
- .unwrap()
447
- .as_secs(),
448
- Ordering::Relaxed,
449
- );
450
- call_with_gvl(|_ruby| {
451
- request.process(&ruby, app_proc).ok();
452
- });
453
- if terminated.load(Ordering::Relaxed) {
454
- break;
442
+ Err(_) => break,
443
+ Ok(RequestJob::Shutdown) => break,
444
+ Ok(request_job) => call_with_gvl(|ruby| {
445
+ self.process_one(&ruby, request_job, &terminated);
446
+ while let Ok(request_job) = receiver.try_recv() {
447
+ if matches!(request_job, RequestJob::Shutdown) {
448
+ terminated.store(true, Ordering::Relaxed);
449
+ break;
450
+ }
451
+ self.process_one(&ruby, request_job, &terminated);
455
452
  }
456
- }
457
- Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
458
- self_ref.request_id.fetch_add(1, Ordering::Relaxed);
459
- self_ref.current_request_start.store(
460
- SystemTime::now()
461
- .duration_since(UNIX_EPOCH)
462
- .unwrap()
463
- .as_secs(),
464
- Ordering::Relaxed,
465
- );
466
- call_with_gvl(|_ruby| {
467
- request.process(&ruby, app_proc).ok();
468
- });
469
- if terminated.load(Ordering::Relaxed) {
470
- break;
453
+ if let Some(thresh) = params.oob_gc_responses_threshold {
454
+ idle_counter = (idle_counter + 1) % thresh;
455
+ if idle_counter == 0 {
456
+ ruby.gc_start();
457
+ }
471
458
  }
459
+ }),
460
+ };
461
+ if terminated.load(Ordering::Relaxed) {
462
+ break;
463
+ }
464
+ });
465
+ }
466
+
467
+ fn process_one(self: &Arc<Self>, ruby: &Ruby, job: RequestJob, terminated: &Arc<AtomicBool>) {
468
+ match job {
469
+ RequestJob::ProcessHttpRequest(request, app_proc) => {
470
+ if terminated.load(Ordering::Relaxed) {
471
+ request.response().unwrap().service_unavailable();
472
+ return;
472
473
  }
473
- Ok(RequestJob::Shutdown) => {
474
- break;
475
- }
476
- Err(_) => {
477
- thread::sleep(Duration::from_micros(1));
474
+ self.request_id.fetch_add(1, Ordering::Relaxed);
475
+ self.current_request_start.store(
476
+ SystemTime::now()
477
+ .duration_since(UNIX_EPOCH)
478
+ .unwrap()
479
+ .as_secs(),
480
+ Ordering::Relaxed,
481
+ );
482
+ request.process(ruby, app_proc).ok();
483
+ }
484
+
485
+ RequestJob::ProcessGrpcRequest(request, app_proc) => {
486
+ if terminated.load(Ordering::Relaxed) {
487
+ request.stream().unwrap().close().ok();
488
+ return;
478
489
  }
490
+ self.request_id.fetch_add(1, Ordering::Relaxed);
491
+ self.current_request_start.store(
492
+ SystemTime::now()
493
+ .duration_since(UNIX_EPOCH)
494
+ .unwrap()
495
+ .as_secs(),
496
+ Ordering::Relaxed,
497
+ );
498
+ request.process(ruby, app_proc).ok();
479
499
  }
480
- });
500
+
501
+ RequestJob::Shutdown => unreachable!(),
502
+ }
481
503
  }
482
504
  }
@@ -1,30 +1,25 @@
1
1
  use crate::default_responses::{NOT_FOUND_RESPONSE, TIMEOUT_RESPONSE};
2
- use crate::ruby_types::itsi_server::itsi_server_config::{ItsiServerTokenPreference, ServerParams};
3
- use crate::server::binds::listener::ListenerInfo;
2
+ use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerTokenPreference;
4
3
  use crate::server::http_message_types::{
5
4
  ConversionExt, HttpRequest, HttpResponse, RequestExt, ResponseFormat,
6
5
  };
7
6
  use crate::server::lifecycle_event::LifecycleEvent;
8
7
  use crate::server::middleware_stack::MiddlewareLayer;
9
- use crate::server::request_job::RequestJob;
10
- use crate::server::serve_strategy::single_mode::RunningPhase;
11
- use crate::server::signal::send_lifecycle_event;
8
+ use crate::server::serve_strategy::acceptor::AcceptorArgs;
9
+ use crate::server::signal::{send_lifecycle_event, SHUTDOWN_REQUESTED};
12
10
  use chrono::{self, DateTime, Local};
13
11
  use either::Either;
14
12
  use http::header::ACCEPT_ENCODING;
15
13
  use http::{HeaderValue, Request};
16
14
  use hyper::body::Incoming;
17
- use hyper::service::Service;
18
- use itsi_error::ItsiError;
19
15
  use regex::Regex;
16
+ use smallvec::SmallVec;
17
+ use std::ops::Deref;
20
18
  use std::sync::atomic::{AtomicBool, Ordering};
21
- use std::sync::OnceLock;
19
+ use std::sync::{Arc, OnceLock};
22
20
  use std::time::{Duration, Instant};
23
- use tracing::error;
24
-
25
- use std::{future::Future, ops::Deref, pin::Pin, sync::Arc};
26
- use tokio::sync::watch::{self};
27
21
  use tokio::time::timeout;
22
+ use tracing::error;
28
23
 
29
24
  #[derive(Clone)]
30
25
  pub struct ItsiHttpService {
@@ -40,12 +35,16 @@ impl Deref for ItsiHttpService {
40
35
  }
41
36
 
42
37
  pub struct ItsiHttpServiceInner {
43
- pub sender: async_channel::Sender<RequestJob>,
44
- pub nonblocking_sender: async_channel::Sender<RequestJob>,
45
- pub server_params: Arc<ServerParams>,
46
- pub listener: Arc<ListenerInfo>,
38
+ pub acceptor_args: Arc<AcceptorArgs>,
47
39
  pub addr: String,
48
- pub shutdown_channel: watch::Receiver<RunningPhase>,
40
+ }
41
+
42
+ impl Deref for ItsiHttpServiceInner {
43
+ type Target = Arc<AcceptorArgs>;
44
+
45
+ fn deref(&self) -> &Self::Target {
46
+ &self.acceptor_args
47
+ }
49
48
  }
50
49
 
51
50
  #[derive(Clone)]
@@ -79,12 +78,14 @@ pub struct RequestContextInner {
79
78
  pub request_start_time: OnceLock<DateTime<Local>>,
80
79
  pub start_instant: Instant,
81
80
  pub if_none_match: OnceLock<Option<String>>,
82
- pub supported_encoding_set: OnceLock<Vec<HeaderValue>>,
81
+ pub supported_encoding_set: OnceLock<AcceptEncodingSet>,
83
82
  pub is_ruby_request: Arc<AtomicBool>,
84
83
  }
85
84
 
85
+ type AcceptEncodingSet = SmallVec<[HeaderValue; 2]>;
86
+
86
87
  impl HttpRequestContext {
87
- fn new(
88
+ pub fn new(
88
89
  service: ItsiHttpService,
89
90
  matching_pattern: Option<Arc<Regex>>,
90
91
  accept: ResponseFormat,
@@ -108,12 +109,14 @@ impl HttpRequestContext {
108
109
  }
109
110
 
110
111
  pub fn set_supported_encoding_set(&self, req: &HttpRequest) {
111
- self.inner.supported_encoding_set.get_or_init(move || {
112
- req.headers()
113
- .get_all(ACCEPT_ENCODING)
114
- .into_iter()
115
- .cloned()
116
- .collect::<Vec<_>>()
112
+ self.inner.supported_encoding_set.get_or_init(|| {
113
+ let mut set: AcceptEncodingSet = SmallVec::new();
114
+
115
+ for hv in req.headers().get_all(ACCEPT_ENCODING) {
116
+ set.push(hv.clone()); // clone ≈ 16 B struct copy
117
+ }
118
+
119
+ set
117
120
  });
118
121
  }
119
122
 
@@ -163,7 +166,7 @@ impl HttpRequestContext {
163
166
  self.inner.response_format.get().unwrap()
164
167
  }
165
168
 
166
- pub fn supported_encoding_set(&self) -> Option<&Vec<HeaderValue>> {
169
+ pub fn supported_encoding_set(&self) -> Option<&AcceptEncodingSet> {
167
170
  self.inner.supported_encoding_set.get()
168
171
  }
169
172
  }
@@ -172,32 +175,31 @@ const SERVER_TOKEN_VERSION: HeaderValue =
172
175
  HeaderValue::from_static(concat!("Itsi/", env!("CARGO_PKG_VERSION")));
173
176
  const SERVER_TOKEN_NAME: HeaderValue = HeaderValue::from_static("Itsi");
174
177
 
175
- impl Service<Request<Incoming>> for ItsiHttpService {
176
- type Response = HttpResponse;
177
- type Error = ItsiError;
178
- type Future = Pin<Box<dyn Future<Output = itsi_error::Result<HttpResponse>> + Send>>;
179
-
180
- fn call(&self, req: Request<Incoming>) -> Self::Future {
181
- let params = self.server_params.clone();
182
- let self_clone = self.clone();
178
+ impl ItsiHttpService {
179
+ pub async fn handle_request(&self, req: Request<Incoming>) -> itsi_error::Result<HttpResponse> {
183
180
  let mut req = req.limit();
184
181
  let accept: ResponseFormat = req.accept().into();
185
- let accept_clone = accept.clone();
186
182
  let is_single_mode = self.server_params.workers == 1;
187
183
 
188
184
  let request_timeout = self.server_params.request_timeout;
189
185
  let is_ruby_request = Arc::new(AtomicBool::new(false));
190
186
  let irr_clone = is_ruby_request.clone();
187
+
188
+ let token_preference = self.server_params.itsi_server_token_preference;
189
+
191
190
  let service_future = async move {
191
+ let middleware_stack = self
192
+ .server_params
193
+ .middleware
194
+ .get()
195
+ .unwrap()
196
+ .stack_for(&req)
197
+ .unwrap();
198
+ let (stack, matching_pattern) = middleware_stack;
192
199
  let mut resp: Option<HttpResponse> = None;
193
- let (stack, matching_pattern) = params.middleware.get().unwrap().stack_for(&req)?;
194
200
 
195
- let mut context = HttpRequestContext::new(
196
- self_clone,
197
- matching_pattern,
198
- accept_clone.clone(),
199
- irr_clone,
200
- );
201
+ let mut context =
202
+ HttpRequestContext::new(self.clone(), matching_pattern, accept, irr_clone);
201
203
  let mut depth = 0;
202
204
 
203
205
  for (index, elm) in stack.iter().enumerate() {
@@ -217,14 +219,14 @@ impl Service<Request<Incoming>> for ItsiHttpService {
217
219
 
218
220
  let mut resp = match resp {
219
221
  Some(r) => r,
220
- None => return Ok(NOT_FOUND_RESPONSE.to_http_response(accept_clone).await),
222
+ None => return Ok(NOT_FOUND_RESPONSE.to_http_response(accept).await),
221
223
  };
222
224
 
223
225
  for elm in stack.iter().rev().skip(stack.len() - depth - 1) {
224
226
  resp = elm.after(resp, &mut context).await;
225
227
  }
226
228
 
227
- match params.itsi_server_token_preference {
229
+ match token_preference {
228
230
  ItsiServerTokenPreference::Version => {
229
231
  resp.headers_mut().insert("Server", SERVER_TOKEN_VERSION);
230
232
  }
@@ -238,28 +240,31 @@ impl Service<Request<Incoming>> for ItsiHttpService {
238
240
  };
239
241
 
240
242
  if let Some(timeout_duration) = request_timeout {
241
- Box::pin(async move {
242
- match timeout(timeout_duration, service_future).await {
243
- Ok(result) => result,
244
- Err(_) => {
245
- // If we're still running Ruby at this point, we can't just kill the
246
- // thread as it might be in a critical section.
247
- // Instead we must ask the worker to hot restart.
248
- if is_ruby_request.load(Ordering::Relaxed) {
249
- if is_single_mode {
250
- // If we're in single mode, re-exec the whole process
251
- send_lifecycle_event(LifecycleEvent::Restart);
252
- } else {
253
- // Otherwise we can shutdown the worker and rely on the master to restart it
254
- send_lifecycle_event(LifecycleEvent::Shutdown);
255
- }
243
+ match timeout(timeout_duration, service_future).await {
244
+ Ok(result) => result,
245
+ Err(_) => {
246
+ // If we're still running Ruby at this point, we can't just kill the
247
+ // thread as it might be in a critical section.
248
+ // Instead we must ask the worker to hot restart.
249
+ // But only if we're not already shutting down
250
+ if is_ruby_request.load(Ordering::Relaxed)
251
+ && !SHUTDOWN_REQUESTED.load(Ordering::SeqCst)
252
+ {
253
+ // When we've detected a timeout, use the safer send_lifecycle_event
254
+ // which will properly handle signal-safe state transitions
255
+ if is_single_mode {
256
+ // If we're in single mode, re-exec the whole process
257
+ send_lifecycle_event(LifecycleEvent::Restart);
258
+ } else {
259
+ // Otherwise we can shutdown the worker and rely on the master to restart it
260
+ send_lifecycle_event(LifecycleEvent::Shutdown);
256
261
  }
257
- Ok(TIMEOUT_RESPONSE.to_http_response(accept).await)
258
262
  }
263
+ Ok(TIMEOUT_RESPONSE.to_http_response(accept).await)
259
264
  }
260
- })
265
+ }
261
266
  } else {
262
- Box::pin(service_future)
267
+ service_future.await
263
268
  }
264
269
  }
265
270
  }