itsi 0.2.15 → 0.2.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -0
  3. data/Cargo.lock +74 -74
  4. data/crates/itsi_scheduler/Cargo.toml +1 -1
  5. data/crates/itsi_scheduler/extconf.rb +3 -1
  6. data/crates/itsi_server/Cargo.lock +1 -1
  7. data/crates/itsi_server/Cargo.toml +1 -1
  8. data/crates/itsi_server/extconf.rb +3 -1
  9. data/crates/itsi_server/src/lib.rs +1 -0
  10. data/crates/itsi_server/src/ruby_types/itsi_grpc_call.rs +2 -2
  11. data/crates/itsi_server/src/ruby_types/itsi_http_request.rs +9 -11
  12. data/crates/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +6 -1
  13. data/crates/itsi_server/src/server/binds/listener.rs +4 -1
  14. data/crates/itsi_server/src/server/http_message_types.rs +1 -1
  15. data/crates/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +32 -34
  16. data/crates/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +3 -4
  17. data/crates/itsi_server/src/server/middleware_stack/middlewares/etag.rs +23 -38
  18. data/crates/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +65 -14
  19. data/crates/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +1 -1
  20. data/crates/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +1 -1
  21. data/crates/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +21 -8
  22. data/crates/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +1 -5
  23. data/crates/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +12 -3
  24. data/crates/itsi_server/src/server/process_worker.rs +2 -1
  25. data/crates/itsi_server/src/server/serve_strategy/acceptor.rs +96 -0
  26. data/crates/itsi_server/src/server/serve_strategy/mod.rs +1 -0
  27. data/crates/itsi_server/src/server/serve_strategy/single_mode.rs +80 -136
  28. data/crates/itsi_server/src/server/thread_worker.rs +10 -3
  29. data/crates/itsi_server/src/services/itsi_http_service.rs +26 -21
  30. data/crates/itsi_server/src/services/mime_types.rs +185 -183
  31. data/crates/itsi_server/src/services/rate_limiter.rs +16 -34
  32. data/crates/itsi_server/src/services/static_file_server.rs +7 -13
  33. data/docs/content/features/_index.md +1 -1
  34. data/examples/rails_with_static_assets/Gemfile.lock +1 -1
  35. data/examples/rails_with_static_assets/Itsi.rb +4 -1
  36. data/gems/scheduler/Cargo.lock +15 -15
  37. data/gems/scheduler/lib/itsi/scheduler/version.rb +1 -1
  38. data/gems/server/Cargo.lock +73 -73
  39. data/gems/server/lib/itsi/server/config/config_helpers.rb +1 -2
  40. data/gems/server/lib/itsi/server/config/middleware/etag.md +3 -7
  41. data/gems/server/lib/itsi/server/config/middleware/etag.rb +2 -4
  42. data/gems/server/lib/itsi/server/config/options/listen_backlog.rb +1 -1
  43. data/gems/server/lib/itsi/server/config/options/send_buffer_size.md +15 -0
  44. data/gems/server/lib/itsi/server/config/options/send_buffer_size.rb +19 -0
  45. data/gems/server/lib/itsi/server/config.rb +24 -25
  46. data/gems/server/lib/itsi/server/route_tester.rb +1 -1
  47. data/gems/server/lib/itsi/server/version.rb +1 -1
  48. data/gems/server/test/middleware/etag.rb +3 -3
  49. data/gems/server/test/options/ruby_thread_request_backlog_size.rb +2 -3
  50. data/lib/itsi/version.rb +1 -1
  51. data/tasks.txt +8 -7
  52. metadata +8 -5
@@ -0,0 +1,96 @@
1
+ use std::{ops::Deref, pin::Pin, sync::Arc, time::Duration};
2
+
3
+ use hyper_util::rt::TokioIo;
4
+ use tokio::task::JoinSet;
5
+ use tracing::debug;
6
+
7
+ use crate::{
8
+ ruby_types::itsi_server::itsi_server_config::ServerParams,
9
+ server::{binds::listener::ListenerInfo, io_stream::IoStream, request_job::RequestJob},
10
+ services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
11
+ };
12
+
13
+ use super::single_mode::{RunningPhase, SingleMode};
14
+
15
+ pub struct Acceptor {
16
+ pub acceptor_args: Arc<AcceptorArgs>,
17
+ pub join_set: JoinSet<()>,
18
+ }
19
+
20
+ impl Deref for Acceptor {
21
+ type Target = Arc<AcceptorArgs>;
22
+
23
+ fn deref(&self) -> &Self::Target {
24
+ &self.acceptor_args
25
+ }
26
+ }
27
+
28
+ pub struct AcceptorArgs {
29
+ pub strategy: Arc<SingleMode>,
30
+ pub listener_info: ListenerInfo,
31
+ pub shutdown_receiver: tokio::sync::watch::Receiver<RunningPhase>,
32
+ pub job_sender: async_channel::Sender<RequestJob>,
33
+ pub nonblocking_sender: async_channel::Sender<RequestJob>,
34
+ pub server_params: Arc<ServerParams>,
35
+ }
36
+
37
+ impl Acceptor {
38
+ pub(crate) async fn serve_connection(&mut self, stream: IoStream) {
39
+ let addr = stream.addr();
40
+ let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
41
+ let mut shutdown_channel = self.shutdown_receiver.clone();
42
+ let acceptor_args = self.acceptor_args.clone();
43
+ self.join_set.spawn(async move {
44
+ let executor = &acceptor_args.strategy.executor;
45
+ let mut serve = Box::pin(executor.serve_connection_with_upgrades(
46
+ io,
47
+ ItsiHttpService {
48
+ inner: Arc::new(ItsiHttpServiceInner {
49
+ acceptor_args: acceptor_args.clone(),
50
+ addr: addr.to_string(),
51
+ }),
52
+ },
53
+ ));
54
+
55
+ tokio::select! {
56
+ // Await the connection finishing naturally.
57
+ res = &mut serve => {
58
+ match res {
59
+ Ok(()) => {
60
+ debug!("Connection closed normally");
61
+ },
62
+ Err(res) => {
63
+ debug!("Connection closed abruptly: {:?}", res);
64
+ }
65
+ }
66
+ serve.as_mut().graceful_shutdown();
67
+ },
68
+ // A lifecycle event triggers shutdown.
69
+ _ = shutdown_channel.changed() => {
70
+ // Initiate graceful shutdown.
71
+ serve.as_mut().graceful_shutdown();
72
+
73
+ // Now await the connection to finish shutting down.
74
+ if let Err(e) = serve.await {
75
+ debug!("Connection shutdown error: {:?}", e);
76
+ }
77
+ }
78
+ }
79
+ });
80
+ }
81
+
82
+ pub async fn join(&mut self) {
83
+ // Join all acceptor tasks with timeout
84
+ let deadline = tokio::time::Instant::now()
85
+ + Duration::from_secs_f64(self.server_params.shutdown_timeout);
86
+ let sleep_until = tokio::time::sleep_until(deadline);
87
+ tokio::select! {
88
+ _ = async {
89
+ while (self.join_set.join_next().await).is_some() {}
90
+ } => {},
91
+ _ = sleep_until => {
92
+ debug!("Shutdown timeout reached; abandoning remaining acceptor tasks.");
93
+ }
94
+ }
95
+ }
96
+ }
@@ -4,6 +4,7 @@ use cluster_mode::ClusterMode;
4
4
  use itsi_error::Result;
5
5
  use single_mode::SingleMode;
6
6
 
7
+ pub mod acceptor;
7
8
  pub mod cluster_mode;
8
9
  pub mod single_mode;
9
10
 
@@ -1,17 +1,15 @@
1
1
  use crate::{
2
2
  ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
3
3
  server::{
4
- binds::listener::ListenerInfo,
5
- io_stream::IoStream,
6
4
  lifecycle_event::LifecycleEvent,
7
5
  request_job::RequestJob,
6
+ serve_strategy::acceptor::{Acceptor, AcceptorArgs},
8
7
  signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
9
8
  thread_worker::{build_thread_workers, ThreadWorker},
10
9
  },
11
- services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
12
10
  };
13
11
  use hyper_util::{
14
- rt::{TokioExecutor, TokioIo, TokioTimer},
12
+ rt::{TokioExecutor, TokioTimer},
15
13
  server::conn::auto::Builder,
16
14
  };
17
15
  use itsi_error::{ItsiError, Result};
@@ -22,13 +20,10 @@ use itsi_tracing::{debug, error, info};
22
20
  use magnus::{value::ReprValue, Value};
23
21
  use nix::unistd::Pid;
24
22
  use parking_lot::RwLock;
23
+ use std::sync::Arc;
25
24
  use std::{
26
25
  collections::HashMap,
27
- pin::Pin,
28
- sync::{
29
- atomic::{AtomicBool, Ordering},
30
- Arc,
31
- },
26
+ sync::atomic::{AtomicBool, Ordering},
32
27
  thread::sleep,
33
28
  time::{Duration, Instant, SystemTime, UNIX_EPOCH},
34
29
  };
@@ -60,8 +55,22 @@ impl SingleMode {
60
55
  #[instrument(parent=None, skip_all)]
61
56
  pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
62
57
  server_config.server_params.read().preload_ruby()?;
58
+ let mut executor = Builder::new(TokioExecutor::new());
59
+ executor
60
+ .http1()
61
+ .header_read_timeout(server_config.server_params.read().header_read_timeout)
62
+ .writev(true)
63
+ .timer(TokioTimer::new());
64
+ executor
65
+ .http2()
66
+ .max_concurrent_streams(100)
67
+ .max_local_error_reset_streams(100)
68
+ .enable_connect_protocol()
69
+ .max_header_list_size(10 * 1024 * 1024)
70
+ .max_send_buf_size(16 * 1024 * 1024);
71
+
63
72
  Ok(Self {
64
- executor: Builder::new(TokioExecutor::new()),
73
+ executor,
65
74
  server_config,
66
75
  lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
67
76
  restart_requested: AtomicBool::new(false),
@@ -82,7 +91,11 @@ impl SingleMode {
82
91
  };
83
92
  builder
84
93
  .thread_name("itsi-server-accept-loop")
85
- .thread_stack_size(3 * 1024 * 1024)
94
+ .thread_stack_size(512 * 1024)
95
+ .max_blocking_threads(4)
96
+ .event_interval(16)
97
+ .global_queue_interval(64)
98
+ .max_io_events_per_tick(256)
86
99
  .enable_all()
87
100
  .build()
88
101
  .expect("Failed to build Tokio runtime")
@@ -214,9 +227,6 @@ impl SingleMode {
214
227
 
215
228
  #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
216
229
  pub fn run(self: Arc<Self>) -> Result<()> {
217
- let mut listener_task_set = JoinSet::new();
218
- let runtime = self.build_runtime();
219
-
220
230
  let (thread_workers, job_sender, nonblocking_sender) =
221
231
  build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
222
232
  .inspect_err(|e| {
@@ -225,11 +235,13 @@ impl SingleMode {
225
235
  }
226
236
  })?;
227
237
 
238
+ let worker_count = thread_workers.len();
228
239
  info!(
229
- threads = thread_workers.len(),
240
+ threads = worker_count,
230
241
  binds = format!("{:?}", self.server_config.server_params.read().binds)
231
242
  );
232
243
 
244
+ let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
233
245
  let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
234
246
  let monitor_thread = self.clone().start_monitors(thread_workers.clone());
235
247
  if monitor_thread.is_none() {
@@ -240,8 +252,10 @@ impl SingleMode {
240
252
  if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
241
253
  return Ok(());
242
254
  }
255
+ let runtime = self.build_runtime();
243
256
  let result = runtime.block_on(
244
257
  async {
258
+ let mut listener_task_set = JoinSet::new();
245
259
  let server_params = self.server_config.server_params.read().clone();
246
260
  if let Err(err) = server_params.initialize_middleware().await {
247
261
  error!("Failed to initialize middleware: {}", err);
@@ -254,80 +268,72 @@ impl SingleMode {
254
268
  })
255
269
  .collect::<Vec<_>>();
256
270
 
257
- for listener in tokio_listeners.iter() {
258
- let mut lifecycle_rx = self.lifecycle_channel.subscribe();
259
-
260
- let listener_info = Arc::new(listener.listener_info());
261
- let self_ref = self.clone();
262
- let listener = listener.clone();
271
+ tokio_listeners.iter().cloned().for_each(|listener| {
263
272
  let shutdown_sender = shutdown_sender.clone();
264
273
  let job_sender = job_sender.clone();
265
274
  let nonblocking_sender = nonblocking_sender.clone();
266
- let workers_clone = thread_workers.clone();
267
- let listener_clone = listener.clone();
275
+
276
+ let mut lifecycle_rx = self.lifecycle_channel.subscribe();
268
277
  let mut shutdown_receiver = shutdown_sender.subscribe();
269
- let shutdown_receiver_clone = shutdown_receiver.clone();
278
+ let mut acceptor = Acceptor{
279
+ acceptor_args: Arc::new(
280
+ AcceptorArgs{
281
+ strategy: self.clone(),
282
+ listener_info: listener.listener_info(),
283
+ shutdown_receiver: shutdown_sender.subscribe(),
284
+ job_sender: job_sender.clone(),
285
+ nonblocking_sender: nonblocking_sender.clone(),
286
+ server_params: server_params.clone()
287
+ }
288
+ ),
289
+ join_set: JoinSet::new()
290
+ };
291
+
292
+ let shutdown_rx_for_acme_task = shutdown_receiver.clone();
293
+ let acme_task_listener_clone = listener.clone();
270
294
  listener_task_set.spawn(async move {
271
- listener_clone.spawn_state_task(shutdown_receiver_clone).await;
295
+ acme_task_listener_clone.spawn_acme_event_task(shutdown_rx_for_acme_task).await;
272
296
  });
273
297
 
274
298
  listener_task_set.spawn(async move {
275
- let strategy_clone = self_ref.clone();
276
- let mut acceptor_task_set = JoinSet::new();
277
- loop {
278
- tokio::select! {
279
- accept_result = listener.accept() => match accept_result {
280
- Ok(accept_result) => {
281
- let strategy = strategy_clone.clone();
282
- let listener_info = listener_info.clone();
283
- let shutdown_receiver = shutdown_receiver.clone();
284
- let job_sender = job_sender.clone();
285
- let nonblocking_sender = nonblocking_sender.clone();
286
- acceptor_task_set.spawn(async move {
287
- strategy.serve_connection(accept_result, job_sender, nonblocking_sender, listener_info, shutdown_receiver).await;
288
- });
299
+ loop {
300
+ tokio::select! {
301
+ accept_result = listener.accept() => {
302
+ match accept_result {
303
+ Ok(accepted) => acceptor.serve_connection(accepted).await,
304
+ Err(e) => debug!("Listener.accept failed: {:?}", e)
305
+ }
289
306
  },
290
- Err(e) => debug!("Listener.accept failed {:?}", e),
291
- },
292
- _ = shutdown_receiver.changed() => {
293
- break;
294
- }
295
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
296
- Ok(LifecycleEvent::Shutdown) => {
297
- debug!("Received lifecycle event: {:?}", lifecycle_event);
298
- shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
299
- tokio::time::sleep(Duration::from_millis(25)).await;
300
- for _i in 0..workers_clone.len() {
301
- job_sender.send(RequestJob::Shutdown).await.unwrap();
302
- nonblocking_sender.send(RequestJob::Shutdown).await.unwrap();
303
- }
304
- break;
307
+ _ = shutdown_receiver.changed() => {
308
+ debug!("Shutdown requested via receiver");
309
+ break;
305
310
  },
306
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
307
- _ => {}
311
+ lifecycle_event = lifecycle_rx.recv() => {
312
+ match lifecycle_event {
313
+ Ok(LifecycleEvent::Shutdown) => {
314
+ debug!("Received LifecycleEvent::Shutdown");
315
+ let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
316
+ for _ in 0..worker_count {
317
+ let _ = job_sender.send(RequestJob::Shutdown).await;
318
+ let _ = nonblocking_sender.send(RequestJob::Shutdown).await;
319
+ }
320
+ break;
321
+ },
322
+ Err(e) => error!("Error receiving lifecycle event: {:?}", e),
323
+ _ => ()
324
+ }
325
+ }
308
326
  }
309
- }
310
- }
311
-
312
- let deadline = Instant::now()
313
- + Duration::from_secs_f64(self_ref.server_config.server_params.read().shutdown_timeout);
314
- tokio::select! {
315
- _ = async {
316
- while let Some(_res) = acceptor_task_set.join_next().await {}
317
- } => {},
318
- _ = tokio::time::sleep_until(tokio::time::Instant::from_std(deadline)) => {},
319
- }
320
- });
321
-
322
- }
327
+ }
328
+ acceptor.join().await;
329
+ });
330
+ });
323
331
 
324
332
  if self.is_single_mode() {
325
333
  self.invoke_hook("after_start");
326
334
  }
327
335
 
328
336
  while let Some(_res) = listener_task_set.join_next().await {}
329
-
330
- // Explicitly drop all listeners to ensure file descriptors are released
331
337
  drop(tokio_listeners);
332
338
 
333
339
  Ok::<(), ItsiError>(())
@@ -346,12 +352,10 @@ impl SingleMode {
346
352
  }
347
353
 
348
354
  shutdown_sender.send(RunningPhase::Shutdown).ok();
349
- let deadline = Instant::now()
350
- + Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
351
-
352
355
  runtime.shutdown_timeout(Duration::from_millis(100));
353
-
354
356
  debug!("Shutdown timeout finished.");
357
+
358
+ let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout);
355
359
  loop {
356
360
  if thread_workers
357
361
  .iter()
@@ -380,66 +384,6 @@ impl SingleMode {
380
384
  pub fn is_single_mode(&self) -> bool {
381
385
  self.server_config.server_params.read().workers == 1
382
386
  }
383
-
384
- pub(crate) async fn serve_connection(
385
- &self,
386
- stream: IoStream,
387
- job_sender: async_channel::Sender<RequestJob>,
388
- nonblocking_sender: async_channel::Sender<RequestJob>,
389
- listener: Arc<ListenerInfo>,
390
- shutdown_channel: watch::Receiver<RunningPhase>,
391
- ) {
392
- let addr = stream.addr();
393
- let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
394
- let executor = self.executor.clone();
395
- let mut shutdown_channel_clone = shutdown_channel.clone();
396
- let mut executor = executor.clone();
397
- let mut binding = executor.http1();
398
- let shutdown_channel = shutdown_channel_clone.clone();
399
-
400
- let service = ItsiHttpService {
401
- inner: Arc::new(ItsiHttpServiceInner {
402
- sender: job_sender.clone(),
403
- nonblocking_sender: nonblocking_sender.clone(),
404
- server_params: self.server_config.server_params.read().clone(),
405
- listener,
406
- addr: addr.to_string(),
407
- shutdown_channel: shutdown_channel.clone(),
408
- }),
409
- };
410
- let mut serve = Box::pin(
411
- binding
412
- .timer(TokioTimer::new())
413
- .header_read_timeout(self.server_config.server_params.read().header_read_timeout)
414
- .serve_connection_with_upgrades(io, service),
415
- );
416
-
417
- tokio::select! {
418
- // Await the connection finishing naturally.
419
- res = &mut serve => {
420
- match res{
421
- Ok(()) => {
422
- debug!("Connection closed normally")
423
- },
424
- Err(res) => {
425
- debug!("Connection closed abruptly: {:?}", res)
426
- }
427
- }
428
- serve.as_mut().graceful_shutdown();
429
- },
430
- // A lifecycle event triggers shutdown.
431
- _ = shutdown_channel_clone.changed() => {
432
- // Initiate graceful shutdown.
433
- serve.as_mut().graceful_shutdown();
434
-
435
- // Now await the connection to finish shutting down.
436
- if let Err(e) = serve.await {
437
- debug!("Connection shutdown error: {:?}", e);
438
- }
439
- }
440
- }
441
- }
442
-
443
387
  /// Attempts to reload the config "live"
444
388
  /// Not that when running in single mode this will not unload
445
389
  /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
@@ -23,9 +23,12 @@ use std::{
23
23
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
24
24
  use tracing::instrument;
25
25
 
26
- use crate::ruby_types::{
27
- itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
28
- itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
26
+ use crate::{
27
+ ruby_types::{
28
+ itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
29
+ itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
30
+ },
31
+ server::process_worker::CORE_IDS,
29
32
  };
30
33
 
31
34
  use super::request_job::RequestJob;
@@ -184,9 +187,13 @@ impl ThreadWorker {
184
187
  let scheduler_class = self.scheduler_class;
185
188
  let params = self.params.clone();
186
189
  let self_ref = self.clone();
190
+ let id = self.id;
187
191
  call_with_gvl(|_| {
188
192
  *self.thread.write() = Some(
189
193
  create_ruby_thread(move || {
194
+ if params.pin_worker_cores {
195
+ core_affinity::set_for_current(CORE_IDS[(id as usize) % CORE_IDS.len()]);
196
+ }
190
197
  debug!("Ruby thread worker started");
191
198
  if let Some(scheduler_class) = scheduler_class {
192
199
  if let Err(err) = self_ref.fiber_accept_loop(
@@ -1,13 +1,11 @@
1
1
  use crate::default_responses::{NOT_FOUND_RESPONSE, TIMEOUT_RESPONSE};
2
- use crate::ruby_types::itsi_server::itsi_server_config::{ItsiServerTokenPreference, ServerParams};
3
- use crate::server::binds::listener::ListenerInfo;
2
+ use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerTokenPreference;
4
3
  use crate::server::http_message_types::{
5
4
  ConversionExt, HttpRequest, HttpResponse, RequestExt, ResponseFormat,
6
5
  };
7
6
  use crate::server::lifecycle_event::LifecycleEvent;
8
7
  use crate::server::middleware_stack::MiddlewareLayer;
9
- use crate::server::request_job::RequestJob;
10
- use crate::server::serve_strategy::single_mode::RunningPhase;
8
+ use crate::server::serve_strategy::acceptor::AcceptorArgs;
11
9
  use crate::server::signal::send_lifecycle_event;
12
10
  use chrono::{self, DateTime, Local};
13
11
  use either::Either;
@@ -23,7 +21,6 @@ use std::time::{Duration, Instant};
23
21
  use tracing::error;
24
22
 
25
23
  use std::{future::Future, ops::Deref, pin::Pin, sync::Arc};
26
- use tokio::sync::watch::{self};
27
24
  use tokio::time::timeout;
28
25
 
29
26
  #[derive(Clone)]
@@ -40,12 +37,16 @@ impl Deref for ItsiHttpService {
40
37
  }
41
38
 
42
39
  pub struct ItsiHttpServiceInner {
43
- pub sender: async_channel::Sender<RequestJob>,
44
- pub nonblocking_sender: async_channel::Sender<RequestJob>,
45
- pub server_params: Arc<ServerParams>,
46
- pub listener: Arc<ListenerInfo>,
40
+ pub acceptor_args: Arc<AcceptorArgs>,
47
41
  pub addr: String,
48
- pub shutdown_channel: watch::Receiver<RunningPhase>,
42
+ }
43
+
44
+ impl Deref for ItsiHttpServiceInner {
45
+ type Target = Arc<AcceptorArgs>;
46
+
47
+ fn deref(&self) -> &Self::Target {
48
+ &self.acceptor_args
49
+ }
49
50
  }
50
51
 
51
52
  #[derive(Clone)]
@@ -178,26 +179,30 @@ impl Service<Request<Incoming>> for ItsiHttpService {
178
179
  type Future = Pin<Box<dyn Future<Output = itsi_error::Result<HttpResponse>> + Send>>;
179
180
 
180
181
  fn call(&self, req: Request<Incoming>) -> Self::Future {
181
- let params = self.server_params.clone();
182
182
  let self_clone = self.clone();
183
183
  let mut req = req.limit();
184
184
  let accept: ResponseFormat = req.accept().into();
185
- let accept_clone = accept.clone();
186
185
  let is_single_mode = self.server_params.workers == 1;
187
186
 
188
187
  let request_timeout = self.server_params.request_timeout;
189
188
  let is_ruby_request = Arc::new(AtomicBool::new(false));
190
189
  let irr_clone = is_ruby_request.clone();
190
+
191
+ let token_preference = self.server_params.itsi_server_token_preference;
192
+
191
193
  let service_future = async move {
194
+ let middleware_stack = self_clone
195
+ .server_params
196
+ .middleware
197
+ .get()
198
+ .unwrap()
199
+ .stack_for(&req)
200
+ .unwrap();
201
+ let (stack, matching_pattern) = middleware_stack;
192
202
  let mut resp: Option<HttpResponse> = None;
193
- let (stack, matching_pattern) = params.middleware.get().unwrap().stack_for(&req)?;
194
203
 
195
- let mut context = HttpRequestContext::new(
196
- self_clone,
197
- matching_pattern,
198
- accept_clone.clone(),
199
- irr_clone,
200
- );
204
+ let mut context =
205
+ HttpRequestContext::new(self_clone.clone(), matching_pattern, accept, irr_clone);
201
206
  let mut depth = 0;
202
207
 
203
208
  for (index, elm) in stack.iter().enumerate() {
@@ -217,14 +222,14 @@ impl Service<Request<Incoming>> for ItsiHttpService {
217
222
 
218
223
  let mut resp = match resp {
219
224
  Some(r) => r,
220
- None => return Ok(NOT_FOUND_RESPONSE.to_http_response(accept_clone).await),
225
+ None => return Ok(NOT_FOUND_RESPONSE.to_http_response(accept).await),
221
226
  };
222
227
 
223
228
  for elm in stack.iter().rev().skip(stack.len() - depth - 1) {
224
229
  resp = elm.after(resp, &mut context).await;
225
230
  }
226
231
 
227
- match params.itsi_server_token_preference {
232
+ match token_preference {
228
233
  ItsiServerTokenPreference::Version => {
229
234
  resp.headers_mut().insert("Server", SERVER_TOKEN_VERSION);
230
235
  }