itsi-server 0.2.15 → 0.2.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +75 -73
  3. data/exe/itsi +6 -1
  4. data/ext/itsi_acme/Cargo.toml +1 -1
  5. data/ext/itsi_scheduler/Cargo.toml +1 -1
  6. data/ext/itsi_server/Cargo.lock +1 -1
  7. data/ext/itsi_server/Cargo.toml +3 -1
  8. data/ext/itsi_server/extconf.rb +3 -1
  9. data/ext/itsi_server/src/lib.rs +7 -1
  10. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
  11. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +6 -6
  12. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
  13. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +71 -42
  14. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
  15. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +6 -15
  16. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +32 -6
  17. data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
  18. data/ext/itsi_server/src/server/binds/listener.rs +49 -8
  19. data/ext/itsi_server/src/server/frame_stream.rs +142 -0
  20. data/ext/itsi_server/src/server/http_message_types.rs +143 -10
  21. data/ext/itsi_server/src/server/io_stream.rs +28 -5
  22. data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
  23. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
  24. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
  25. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
  26. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
  27. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -58
  28. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +6 -9
  29. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +27 -42
  30. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +65 -14
  31. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +1 -1
  32. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +8 -11
  33. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +21 -8
  34. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
  35. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +1 -5
  36. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
  37. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +13 -6
  38. data/ext/itsi_server/src/server/mod.rs +1 -0
  39. data/ext/itsi_server/src/server/process_worker.rs +5 -5
  40. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +100 -0
  41. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +87 -31
  42. data/ext/itsi_server/src/server/serve_strategy/mod.rs +1 -0
  43. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +166 -206
  44. data/ext/itsi_server/src/server/signal.rs +37 -9
  45. data/ext/itsi_server/src/server/thread_worker.rs +92 -70
  46. data/ext/itsi_server/src/services/itsi_http_service.rs +67 -62
  47. data/ext/itsi_server/src/services/mime_types.rs +185 -183
  48. data/ext/itsi_server/src/services/rate_limiter.rs +16 -34
  49. data/ext/itsi_server/src/services/static_file_server.rs +35 -60
  50. data/lib/itsi/http_request.rb +31 -39
  51. data/lib/itsi/http_response.rb +5 -0
  52. data/lib/itsi/rack_env_pool.rb +59 -0
  53. data/lib/itsi/server/config/config_helpers.rb +1 -2
  54. data/lib/itsi/server/config/dsl.rb +5 -4
  55. data/lib/itsi/server/config/middleware/etag.md +3 -7
  56. data/lib/itsi/server/config/middleware/etag.rb +2 -4
  57. data/lib/itsi/server/config/middleware/proxy.rb +1 -1
  58. data/lib/itsi/server/config/middleware/rackup_file.rb +2 -2
  59. data/lib/itsi/server/config/options/auto_reload_config.rb +6 -2
  60. data/lib/itsi/server/config/options/include.rb +5 -2
  61. data/lib/itsi/server/config/options/listen_backlog.rb +1 -1
  62. data/lib/itsi/server/config/options/pipeline_flush.md +16 -0
  63. data/lib/itsi/server/config/options/pipeline_flush.rb +19 -0
  64. data/lib/itsi/server/config/options/send_buffer_size.md +15 -0
  65. data/lib/itsi/server/config/options/send_buffer_size.rb +19 -0
  66. data/lib/itsi/server/config/options/writev.md +25 -0
  67. data/lib/itsi/server/config/options/writev.rb +19 -0
  68. data/lib/itsi/server/config.rb +43 -31
  69. data/lib/itsi/server/default_config/Itsi.rb +1 -4
  70. data/lib/itsi/server/grpc/grpc_call.rb +2 -0
  71. data/lib/itsi/server/grpc/grpc_interface.rb +2 -2
  72. data/lib/itsi/server/rack/handler/itsi.rb +3 -1
  73. data/lib/itsi/server/rack_interface.rb +17 -12
  74. data/lib/itsi/server/route_tester.rb +1 -1
  75. data/lib/itsi/server/scheduler_interface.rb +2 -0
  76. data/lib/itsi/server/version.rb +1 -1
  77. data/lib/itsi/server.rb +1 -0
  78. data/lib/ruby_lsp/itsi/addon.rb +12 -13
  79. metadata +10 -1
@@ -1,17 +1,18 @@
1
1
  use crate::{
2
2
  ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
3
3
  server::{
4
- binds::listener::ListenerInfo,
5
- io_stream::IoStream,
6
4
  lifecycle_event::LifecycleEvent,
7
5
  request_job::RequestJob,
8
- signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
6
+ serve_strategy::acceptor::{Acceptor, AcceptorArgs},
7
+ signal::{
8
+ send_lifecycle_event, subscribe_runtime_to_signals, unsubscribe_runtime,
9
+ SHUTDOWN_REQUESTED,
10
+ },
9
11
  thread_worker::{build_thread_workers, ThreadWorker},
10
12
  },
11
- services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
12
13
  };
13
14
  use hyper_util::{
14
- rt::{TokioExecutor, TokioIo, TokioTimer},
15
+ rt::{TokioExecutor, TokioTimer},
15
16
  server::conn::auto::Builder,
16
17
  };
17
18
  use itsi_error::{ItsiError, Result};
@@ -22,34 +23,29 @@ use itsi_tracing::{debug, error, info};
22
23
  use magnus::{value::ReprValue, Value};
23
24
  use nix::unistd::Pid;
24
25
  use parking_lot::RwLock;
26
+ use std::sync::Arc;
25
27
  use std::{
26
28
  collections::HashMap,
27
- pin::Pin,
28
- sync::{
29
- atomic::{AtomicBool, Ordering},
30
- Arc,
31
- },
29
+ sync::atomic::{AtomicBool, Ordering},
32
30
  thread::sleep,
33
31
  time::{Duration, Instant, SystemTime, UNIX_EPOCH},
34
32
  };
35
33
  use tokio::{
36
34
  runtime::{Builder as RuntimeBuilder, Runtime},
37
- sync::{
38
- broadcast,
39
- watch::{self},
40
- },
35
+ sync::watch::{self},
41
36
  task::JoinSet,
42
37
  };
43
38
  use tracing::instrument;
44
39
 
45
40
  pub struct SingleMode {
41
+ pub worker_id: usize,
46
42
  pub executor: Builder<TokioExecutor>,
47
43
  pub server_config: Arc<ItsiServerConfig>,
48
- pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
49
44
  pub restart_requested: AtomicBool,
50
45
  pub status: RwLock<HashMap<u8, (u64, u64)>>,
51
46
  }
52
47
 
48
+ #[derive(PartialEq, Debug)]
53
49
  pub enum RunningPhase {
54
50
  Running,
55
51
  ShutdownPending,
@@ -58,17 +54,45 @@ pub enum RunningPhase {
58
54
 
59
55
  impl SingleMode {
60
56
  #[instrument(parent=None, skip_all)]
61
- pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
57
+ pub fn new(server_config: Arc<ItsiServerConfig>, worker_id: usize) -> Result<Self> {
62
58
  server_config.server_params.read().preload_ruby()?;
59
+ let executor = {
60
+ let mut executor = Builder::new(TokioExecutor::new());
61
+ let server_params = server_config.server_params.read();
62
+ let mut http1_executor = executor.http1();
63
+
64
+ http1_executor
65
+ .header_read_timeout(server_params.header_read_timeout)
66
+ .pipeline_flush(server_params.pipeline_flush)
67
+ .timer(TokioTimer::new());
68
+
69
+ if let Some(writev) = server_params.writev {
70
+ http1_executor.writev(writev);
71
+ }
72
+
73
+ executor
74
+ .http2()
75
+ .max_concurrent_streams(server_params.max_concurrent_streams)
76
+ .max_local_error_reset_streams(server_params.max_local_error_reset_streams)
77
+ .max_header_list_size(server_params.max_header_list_size)
78
+ .max_send_buf_size(server_params.max_send_buf_size)
79
+ .enable_connect_protocol();
80
+ executor
81
+ };
82
+
63
83
  Ok(Self {
64
- executor: Builder::new(TokioExecutor::new()),
84
+ worker_id,
85
+ executor,
65
86
  server_config,
66
- lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
67
87
  restart_requested: AtomicBool::new(false),
68
88
  status: RwLock::new(HashMap::new()),
69
89
  })
70
90
  }
71
91
 
92
+ pub fn is_zero_worker(&self) -> bool {
93
+ self.worker_id == 0
94
+ }
95
+
72
96
  pub fn build_runtime(&self) -> Runtime {
73
97
  let mut builder: RuntimeBuilder = if self
74
98
  .server_config
@@ -82,7 +106,11 @@ impl SingleMode {
82
106
  };
83
107
  builder
84
108
  .thread_name("itsi-server-accept-loop")
85
- .thread_stack_size(3 * 1024 * 1024)
109
+ .thread_stack_size(512 * 1024)
110
+ .max_blocking_threads(4)
111
+ .event_interval(16)
112
+ .global_queue_interval(64)
113
+ .max_io_events_per_tick(256)
86
114
  .enable_all()
87
115
  .build()
88
116
  .expect("Failed to build Tokio runtime")
@@ -90,7 +118,7 @@ impl SingleMode {
90
118
 
91
119
  pub fn stop(&self) -> Result<()> {
92
120
  SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
93
- self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
121
+ send_lifecycle_event(LifecycleEvent::Shutdown);
94
122
  Ok(())
95
123
  }
96
124
 
@@ -169,7 +197,7 @@ impl SingleMode {
169
197
  .unwrap();
170
198
  let receiver = self.clone();
171
199
  monitor_runtime.block_on({
172
- let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
200
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
173
201
  let receiver = receiver.clone();
174
202
  let thread_workers = thread_workers.clone();
175
203
  async move {
@@ -188,18 +216,19 @@ impl SingleMode {
188
216
  }
189
217
  lifecycle_event = lifecycle_rx.recv() => {
190
218
  match lifecycle_event {
191
- Ok(LifecycleEvent::Restart) => {
219
+ Ok(LifecycleEvent::Restart) | Ok(LifecycleEvent::Reload) => {
192
220
  receiver.restart().await.ok();
193
221
  }
194
- Ok(LifecycleEvent::Reload) => {
195
- receiver.reload().await.ok();
196
- }
197
222
  Ok(LifecycleEvent::Shutdown) => {
198
223
  break;
199
224
  }
200
225
  Ok(LifecycleEvent::PrintInfo) => {
201
226
  receiver.print_info(thread_workers.clone()).await.ok();
202
227
  }
228
+ Err(e) => {
229
+ debug!("Lifecycle channel closed: {:?}, exiting single mode monitor loop", e);
230
+ break;
231
+ }
203
232
  _ => {}
204
233
  }
205
234
  }
@@ -214,24 +243,26 @@ impl SingleMode {
214
243
 
215
244
  #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
216
245
  pub fn run(self: Arc<Self>) -> Result<()> {
217
- let mut listener_task_set = JoinSet::new();
218
- let runtime = self.build_runtime();
219
-
220
- let (thread_workers, job_sender, nonblocking_sender) =
221
- build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
222
- .inspect_err(|e| {
223
- if let Some(err_val) = e.value() {
224
- print_rb_backtrace(err_val);
225
- }
226
- })?;
246
+ let (thread_workers, job_sender, nonblocking_sender) = build_thread_workers(
247
+ self.server_config.server_params.read().clone(),
248
+ self.worker_id,
249
+ )
250
+ .inspect_err(|e| {
251
+ if let Some(err_val) = e.value() {
252
+ print_rb_backtrace(err_val);
253
+ }
254
+ })?;
227
255
 
256
+ let worker_count = thread_workers.len();
228
257
  info!(
229
- threads = thread_workers.len(),
258
+ threads = worker_count,
230
259
  binds = format!("{:?}", self.server_config.server_params.read().binds)
231
260
  );
232
261
 
262
+ let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
233
263
  let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
234
264
  let monitor_thread = self.clone().start_monitors(thread_workers.clone());
265
+ let is_zero_worker = self.is_zero_worker();
235
266
  if monitor_thread.is_none() {
236
267
  error!("Failed to start monitor thread");
237
268
  return Err(ItsiError::new("Failed to start monitor thread"));
@@ -240,118 +271,127 @@ impl SingleMode {
240
271
  if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
241
272
  return Ok(());
242
273
  }
243
- let result = runtime.block_on(
244
- async {
245
- let server_params = self.server_config.server_params.read().clone();
246
- if let Err(err) = server_params.initialize_middleware().await {
247
- error!("Failed to initialize middleware: {}", err);
248
- return Err(ItsiError::new("Failed to initialize middleware"))
249
- }
250
- let tokio_listeners = server_params.listeners.lock()
251
- .drain(..)
252
- .map(|list| {
253
- Arc::new(list.into_tokio_listener())
254
- })
255
- .collect::<Vec<_>>();
256
-
257
- for listener in tokio_listeners.iter() {
258
- let mut lifecycle_rx = self.lifecycle_channel.subscribe();
259
-
260
- let listener_info = Arc::new(listener.listener_info());
261
- let self_ref = self.clone();
262
- let listener = listener.clone();
263
- let shutdown_sender = shutdown_sender.clone();
264
- let job_sender = job_sender.clone();
265
- let nonblocking_sender = nonblocking_sender.clone();
266
- let workers_clone = thread_workers.clone();
267
- let listener_clone = listener.clone();
268
- let mut shutdown_receiver = shutdown_sender.subscribe();
269
- let shutdown_receiver_clone = shutdown_receiver.clone();
270
- listener_task_set.spawn(async move {
271
- listener_clone.spawn_state_task(shutdown_receiver_clone).await;
272
- });
273
-
274
- listener_task_set.spawn(async move {
275
- let strategy_clone = self_ref.clone();
276
- let mut acceptor_task_set = JoinSet::new();
274
+ let runtime = self.build_runtime();
275
+ let result = runtime.block_on(async {
276
+ let mut listener_task_set = JoinSet::new();
277
+ let server_params = self.server_config.server_params.read().clone();
278
+ if let Err(err) = server_params.initialize_middleware().await {
279
+ error!("Failed to initialize middleware: {}", err);
280
+ return Err(ItsiError::new("Failed to initialize middleware"));
281
+ }
282
+ let tokio_listeners = server_params
283
+ .listeners
284
+ .lock()
285
+ .drain(..)
286
+ .map(|list| Arc::new(list.into_tokio_listener(is_zero_worker)))
287
+ .collect::<Vec<_>>();
288
+
289
+ tokio_listeners.iter().cloned().for_each(|listener| {
290
+ let shutdown_sender = shutdown_sender.clone();
291
+ let job_sender = job_sender.clone();
292
+ let nonblocking_sender = nonblocking_sender.clone();
293
+
294
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
295
+ let mut shutdown_receiver = shutdown_sender.subscribe();
296
+ let mut acceptor = Acceptor {
297
+ acceptor_args: Arc::new(AcceptorArgs {
298
+ strategy: self.clone(),
299
+ listener_info: listener.listener_info(),
300
+ shutdown_receiver: shutdown_sender.subscribe(),
301
+ job_sender: job_sender.clone(),
302
+ nonblocking_sender: nonblocking_sender.clone(),
303
+ server_params: server_params.clone(),
304
+ }),
305
+ join_set: JoinSet::new(),
306
+ };
307
+
308
+ let shutdown_rx_for_acme_task = shutdown_receiver.clone();
309
+ let acme_task_listener_clone = listener.clone();
310
+
311
+ let mut after_accept_wait: Option<Duration> = None::<Duration>;
312
+
313
+ if cfg!(target_os = "macos") {
314
+ after_accept_wait = if server_params.workers > 1 {
315
+ Some(Duration::from_nanos(10 * server_params.workers as u64))
316
+ } else {
317
+ None
318
+ };
319
+ };
320
+
321
+ listener_task_set.spawn(async move {
322
+ acme_task_listener_clone
323
+ .spawn_acme_event_task(shutdown_rx_for_acme_task)
324
+ .await;
325
+ });
326
+
327
+ listener_task_set.spawn(async move {
277
328
  loop {
329
+ // Process any pending signals before select
278
330
  tokio::select! {
279
- accept_result = listener.accept() => match accept_result {
280
- Ok(accept_result) => {
281
- let strategy = strategy_clone.clone();
282
- let listener_info = listener_info.clone();
283
- let shutdown_receiver = shutdown_receiver.clone();
284
- let job_sender = job_sender.clone();
285
- let nonblocking_sender = nonblocking_sender.clone();
286
- acceptor_task_set.spawn(async move {
287
- strategy.serve_connection(accept_result, job_sender, nonblocking_sender, listener_info, shutdown_receiver).await;
288
- });
289
- },
290
- Err(e) => debug!("Listener.accept failed {:?}", e),
331
+ accept_result = listener.accept() => {
332
+ match accept_result {
333
+ Ok(accepted) => acceptor.serve_connection(accepted).await,
334
+ Err(e) => debug!("Listener.accept failed: {:?}", e)
335
+ }
336
+ if cfg!(target_os = "macos") {
337
+ if let Some(after_accept_wait) = after_accept_wait{
338
+ tokio::time::sleep(after_accept_wait).await;
339
+ }
340
+ }
291
341
  },
292
342
  _ = shutdown_receiver.changed() => {
293
- break;
294
- }
295
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
296
- Ok(LifecycleEvent::Shutdown) => {
297
- debug!("Received lifecycle event: {:?}", lifecycle_event);
298
- shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
299
- tokio::time::sleep(Duration::from_millis(25)).await;
300
- for _i in 0..workers_clone.len() {
301
- job_sender.send(RequestJob::Shutdown).await.unwrap();
302
- nonblocking_sender.send(RequestJob::Shutdown).await.unwrap();
303
- }
343
+ debug!("Shutdown requested via receiver");
304
344
  break;
305
- },
306
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
307
- _ => {}
308
- }
345
+ },
346
+ lifecycle_event = lifecycle_rx.recv() => {
347
+ match lifecycle_event {
348
+ Ok(LifecycleEvent::Shutdown) => {
349
+ debug!("Received LifecycleEvent::Shutdown");
350
+ let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
351
+ break;
352
+ },
353
+ Err(e) => {
354
+ debug!("Lifecycle channel closed: {:?}, exiting accept loop", e);
355
+ break
356
+ },
357
+ _ => ()
358
+ }
359
+ }
309
360
  }
310
361
  }
311
-
312
- let deadline = Instant::now()
313
- + Duration::from_secs_f64(self_ref.server_config.server_params.read().shutdown_timeout);
314
- tokio::select! {
315
- _ = async {
316
- while let Some(_res) = acceptor_task_set.join_next().await {}
317
- } => {},
318
- _ = tokio::time::sleep_until(tokio::time::Instant::from_std(deadline)) => {},
319
- }
362
+ acceptor.join().await;
320
363
  });
364
+ });
321
365
 
322
- }
323
-
324
- if self.is_single_mode() {
366
+ if self.is_single_mode() {
325
367
  self.invoke_hook("after_start");
326
- }
327
-
328
- while let Some(_res) = listener_task_set.join_next().await {}
368
+ }
329
369
 
330
- // Explicitly drop all listeners to ensure file descriptors are released
331
- drop(tokio_listeners);
370
+ while let Some(_res) = listener_task_set.join_next().await {}
371
+ drop(tokio_listeners);
332
372
 
333
- Ok::<(), ItsiError>(())
334
- });
373
+ Ok::<(), ItsiError>(())
374
+ });
335
375
 
336
376
  debug!("Single mode runtime exited.");
337
377
 
378
+ for _i in 0..thread_workers.len() {
379
+ job_sender.send_blocking(RequestJob::Shutdown).unwrap();
380
+ nonblocking_sender
381
+ .send_blocking(RequestJob::Shutdown)
382
+ .unwrap();
383
+ }
338
384
  if result.is_err() {
339
- for _i in 0..thread_workers.len() {
340
- job_sender.send_blocking(RequestJob::Shutdown).unwrap();
341
- nonblocking_sender
342
- .send_blocking(RequestJob::Shutdown)
343
- .unwrap();
344
- }
345
- self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
385
+ send_lifecycle_event(LifecycleEvent::Shutdown);
346
386
  }
347
387
 
348
388
  shutdown_sender.send(RunningPhase::Shutdown).ok();
349
- let deadline = Instant::now()
350
- + Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
351
-
352
389
  runtime.shutdown_timeout(Duration::from_millis(100));
390
+ unsubscribe_runtime();
353
391
 
354
392
  debug!("Shutdown timeout finished.");
393
+
394
+ let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout);
355
395
  loop {
356
396
  if thread_workers
357
397
  .iter()
@@ -381,86 +421,6 @@ impl SingleMode {
381
421
  self.server_config.server_params.read().workers == 1
382
422
  }
383
423
 
384
- pub(crate) async fn serve_connection(
385
- &self,
386
- stream: IoStream,
387
- job_sender: async_channel::Sender<RequestJob>,
388
- nonblocking_sender: async_channel::Sender<RequestJob>,
389
- listener: Arc<ListenerInfo>,
390
- shutdown_channel: watch::Receiver<RunningPhase>,
391
- ) {
392
- let addr = stream.addr();
393
- let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
394
- let executor = self.executor.clone();
395
- let mut shutdown_channel_clone = shutdown_channel.clone();
396
- let mut executor = executor.clone();
397
- let mut binding = executor.http1();
398
- let shutdown_channel = shutdown_channel_clone.clone();
399
-
400
- let service = ItsiHttpService {
401
- inner: Arc::new(ItsiHttpServiceInner {
402
- sender: job_sender.clone(),
403
- nonblocking_sender: nonblocking_sender.clone(),
404
- server_params: self.server_config.server_params.read().clone(),
405
- listener,
406
- addr: addr.to_string(),
407
- shutdown_channel: shutdown_channel.clone(),
408
- }),
409
- };
410
- let mut serve = Box::pin(
411
- binding
412
- .timer(TokioTimer::new())
413
- .header_read_timeout(self.server_config.server_params.read().header_read_timeout)
414
- .serve_connection_with_upgrades(io, service),
415
- );
416
-
417
- tokio::select! {
418
- // Await the connection finishing naturally.
419
- res = &mut serve => {
420
- match res{
421
- Ok(()) => {
422
- debug!("Connection closed normally")
423
- },
424
- Err(res) => {
425
- debug!("Connection closed abruptly: {:?}", res)
426
- }
427
- }
428
- serve.as_mut().graceful_shutdown();
429
- },
430
- // A lifecycle event triggers shutdown.
431
- _ = shutdown_channel_clone.changed() => {
432
- // Initiate graceful shutdown.
433
- serve.as_mut().graceful_shutdown();
434
-
435
- // Now await the connection to finish shutting down.
436
- if let Err(e) = serve.await {
437
- debug!("Connection shutdown error: {:?}", e);
438
- }
439
- }
440
- }
441
- }
442
-
443
- /// Attempts to reload the config "live"
444
- /// Not that when running in single mode this will not unload
445
- /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
446
- pub async fn reload(&self) -> Result<()> {
447
- if !self.server_config.check_config().await {
448
- return Ok(());
449
- }
450
- let should_reexec = self.server_config.clone().reload(false)?;
451
- if should_reexec {
452
- if self.is_single_mode() {
453
- self.invoke_hook("before_restart");
454
- }
455
- self.server_config.dup_fds()?;
456
- self.server_config.reload_exec()?;
457
- }
458
- self.restart_requested.store(true, Ordering::SeqCst);
459
- self.stop()?;
460
- self.server_config.server_params.read().preload_ruby()?;
461
- Ok(())
462
- }
463
-
464
424
  pub fn invoke_hook(&self, hook_name: &str) {
465
425
  if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) {
466
426
  call_with_gvl(|_| hook.call::<_, Value>(()).ok());
@@ -1,22 +1,50 @@
1
- use std::sync::{
2
- atomic::{AtomicBool, AtomicI8},
3
- LazyLock,
1
+ use std::{
2
+ collections::VecDeque,
3
+ sync::atomic::{AtomicBool, AtomicI8},
4
4
  };
5
5
 
6
6
  use nix::libc::{self, sighandler_t};
7
- use tokio::sync::{self, broadcast};
7
+ use parking_lot::Mutex;
8
+ use tokio::sync::broadcast;
8
9
 
9
10
  use super::lifecycle_event::LifecycleEvent;
10
11
 
11
12
  pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
12
13
  pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
13
- pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
14
- broadcast::Sender<LifecycleEvent>,
15
- broadcast::Receiver<LifecycleEvent>,
16
- )> = LazyLock::new(|| sync::broadcast::channel(5));
14
+ pub static SIGNAL_HANDLER_CHANNEL: Mutex<Option<broadcast::Sender<LifecycleEvent>>> =
15
+ Mutex::new(None);
16
+
17
+ pub static PENDING_QUEUE: Mutex<VecDeque<LifecycleEvent>> = Mutex::new(VecDeque::new());
18
+
19
+ pub fn subscribe_runtime_to_signals() -> broadcast::Receiver<LifecycleEvent> {
20
+ let mut guard = SIGNAL_HANDLER_CHANNEL.lock();
21
+ if let Some(sender) = guard.as_ref() {
22
+ return sender.subscribe();
23
+ }
24
+ let (sender, receiver) = broadcast::channel(5);
25
+ let sender_clone = sender.clone();
26
+ std::thread::spawn(move || {
27
+ std::thread::sleep(std::time::Duration::from_millis(50));
28
+ for event in PENDING_QUEUE.lock().drain(..) {
29
+ sender_clone.send(event).ok();
30
+ }
31
+ });
32
+
33
+ guard.replace(sender);
34
+
35
+ receiver
36
+ }
37
+
38
+ pub fn unsubscribe_runtime() {
39
+ SIGNAL_HANDLER_CHANNEL.lock().take();
40
+ }
17
41
 
18
42
  pub fn send_lifecycle_event(event: LifecycleEvent) {
19
- SIGNAL_HANDLER_CHANNEL.0.send(event).ok();
43
+ if let Some(sender) = SIGNAL_HANDLER_CHANNEL.lock().as_ref() {
44
+ sender.send(event).ok();
45
+ } else {
46
+ PENDING_QUEUE.lock().push_back(event);
47
+ }
20
48
  }
21
49
 
22
50
  fn receive_signal(signum: i32, _: sighandler_t) {