itsi-scheduler 0.2.16 → 0.2.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +1 -1
  3. data/ext/itsi_acme/Cargo.toml +1 -1
  4. data/ext/itsi_scheduler/Cargo.toml +1 -1
  5. data/ext/itsi_server/Cargo.toml +3 -1
  6. data/ext/itsi_server/src/lib.rs +6 -1
  7. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
  8. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +4 -4
  9. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
  10. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +64 -33
  11. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
  12. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +422 -110
  13. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +62 -15
  14. data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
  15. data/ext/itsi_server/src/server/binds/listener.rs +45 -7
  16. data/ext/itsi_server/src/server/frame_stream.rs +142 -0
  17. data/ext/itsi_server/src/server/http_message_types.rs +142 -9
  18. data/ext/itsi_server/src/server/io_stream.rs +28 -5
  19. data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
  20. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
  21. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
  22. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
  23. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
  24. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -56
  25. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +5 -7
  26. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +5 -5
  27. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +7 -10
  28. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
  29. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
  30. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +4 -6
  31. data/ext/itsi_server/src/server/mod.rs +1 -0
  32. data/ext/itsi_server/src/server/process_worker.rs +3 -4
  33. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +16 -12
  34. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +83 -31
  35. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +166 -142
  36. data/ext/itsi_server/src/server/signal.rs +37 -9
  37. data/ext/itsi_server/src/server/thread_worker.rs +84 -69
  38. data/ext/itsi_server/src/services/itsi_http_service.rs +43 -43
  39. data/ext/itsi_server/src/services/static_file_server.rs +28 -47
  40. data/lib/itsi/scheduler/version.rb +1 -1
  41. metadata +2 -1
@@ -1,5 +1,5 @@
1
1
  use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerConfig;
2
- use crate::server::signal::SIGNAL_HANDLER_CHANNEL;
2
+ use crate::server::signal::{subscribe_runtime_to_signals, unsubscribe_runtime};
3
3
  use crate::server::{lifecycle_event::LifecycleEvent, process_worker::ProcessWorker};
4
4
  use itsi_error::{ItsiError, Result};
5
5
  use itsi_rb_helpers::{call_with_gvl, call_without_gvl, create_ruby_thread};
@@ -7,31 +7,32 @@ use itsi_tracing::{error, info, warn};
7
7
  use magnus::Value;
8
8
  use nix::{libc::exit, unistd::Pid};
9
9
 
10
+ use std::sync::atomic::{AtomicBool, Ordering};
10
11
  use std::{
11
- sync::{atomic::AtomicUsize, Arc},
12
+ sync::Arc,
12
13
  time::{Duration, Instant},
13
14
  };
14
15
  use tokio::{
15
16
  runtime::{Builder as RuntimeBuilder, Runtime},
16
- sync::{broadcast, watch, Mutex},
17
+ sync::{watch, Mutex},
17
18
  time::{self, sleep},
18
19
  };
19
20
  use tracing::{debug, instrument};
20
21
  pub(crate) struct ClusterMode {
21
22
  pub server_config: Arc<ItsiServerConfig>,
22
23
  pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
23
- pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
24
24
  }
25
25
 
26
- static WORKER_ID: AtomicUsize = AtomicUsize::new(0);
27
26
  static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
28
27
  parking_lot::Mutex::new(None);
29
28
 
29
+ static RELOAD_IN_PROGRESS: AtomicBool = AtomicBool::new(false);
30
+
30
31
  impl ClusterMode {
31
32
  pub fn new(server_config: Arc<ItsiServerConfig>) -> Self {
32
33
  let process_workers = (0..server_config.server_params.read().workers)
33
- .map(|_| ProcessWorker {
34
- worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
34
+ .map(|id| ProcessWorker {
35
+ worker_id: id as usize,
35
36
  ..Default::default()
36
37
  })
37
38
  .collect();
@@ -39,7 +40,6 @@ impl ClusterMode {
39
40
  Self {
40
41
  server_config,
41
42
  process_workers: parking_lot::Mutex::new(process_workers),
42
- lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
43
43
  }
44
44
  }
45
45
 
@@ -60,6 +60,26 @@ impl ClusterMode {
60
60
  }
61
61
  }
62
62
 
63
+ fn next_worker_id(&self) -> usize {
64
+ let mut ids: Vec<usize> = self
65
+ .process_workers
66
+ .lock()
67
+ .iter()
68
+ .map(|w| w.worker_id)
69
+ .collect();
70
+ self.next_available_id_in(&mut ids)
71
+ }
72
+
73
+ fn next_available_id_in(&self, list: &mut [usize]) -> usize {
74
+ list.sort_unstable();
75
+ for (expected, &id) in list.iter().enumerate() {
76
+ if id != expected {
77
+ return expected;
78
+ }
79
+ }
80
+ list.len()
81
+ }
82
+
63
83
  #[allow(clippy::await_holding_lock)]
64
84
  pub async fn handle_lifecycle_event(
65
85
  self: Arc<Self>,
@@ -80,9 +100,11 @@ impl ClusterMode {
80
100
  LifecycleEvent::Restart => {
81
101
  if self.server_config.check_config().await {
82
102
  self.invoke_hook("before_restart");
103
+ self.server_config.stop_watcher()?;
83
104
  self.server_config.dup_fds()?;
84
105
  self.shutdown().await.ok();
85
106
  info!("Shutdown complete. Calling reload exec");
107
+
86
108
  self.server_config.reload_exec()?;
87
109
  }
88
110
  Ok(())
@@ -91,46 +113,65 @@ impl ClusterMode {
91
113
  if !self.server_config.check_config().await {
92
114
  return Ok(());
93
115
  }
116
+
94
117
  let should_reexec = self.server_config.clone().reload(true)?;
118
+
95
119
  if should_reexec {
120
+ self.server_config.stop_watcher()?;
96
121
  self.server_config.dup_fds()?;
97
122
  self.shutdown().await.ok();
98
123
  self.server_config.reload_exec()?;
99
124
  }
100
- let mut workers_to_load = self.server_config.server_params.read().workers;
101
- let mut next_workers = Vec::new();
102
- for worker in self.process_workers.lock().drain(..) {
103
- if workers_to_load == 0 {
104
- worker.graceful_shutdown(self.clone()).await
105
- } else {
106
- workers_to_load -= 1;
107
- worker.reboot(self.clone()).await?;
108
- next_workers.push(worker);
109
- }
125
+
126
+ if RELOAD_IN_PROGRESS
127
+ .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
128
+ .is_err()
129
+ {
130
+ warn!("Reload already in progress, ignoring request");
131
+ return Ok(());
110
132
  }
111
- self.process_workers.lock().extend(next_workers);
112
- while workers_to_load > 0 {
113
- let mut workers = self.process_workers.lock();
133
+ let workers_to_load = self.server_config.server_params.read().workers;
134
+ let mut next_workers = Vec::new();
135
+ let mut old_workers = self.process_workers.lock().drain(..).collect::<Vec<_>>();
136
+
137
+ // Spawn new workers
138
+ for i in 0..workers_to_load {
114
139
  let worker = ProcessWorker {
115
- worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
140
+ worker_id: i as usize,
116
141
  ..Default::default()
117
142
  };
118
143
  let worker_clone = worker.clone();
119
144
  let self_clone = self.clone();
120
- create_ruby_thread(move || {
121
- call_without_gvl(move || {
122
- worker_clone.boot(self_clone).ok();
123
- })
145
+
146
+ call_with_gvl(|_| {
147
+ create_ruby_thread(move || {
148
+ call_without_gvl(move || match worker_clone.boot(self_clone) {
149
+ Err(err) => error!("Worker boot failed {:?}", err),
150
+ _ => {}
151
+ })
152
+ });
124
153
  });
125
- workers.push(worker);
126
- workers_to_load -= 1
154
+
155
+ next_workers.push(worker);
156
+
157
+ if let Some(old) = old_workers.pop() {
158
+ old.graceful_shutdown(self.clone()).await;
159
+ }
160
+ }
161
+
162
+ for worker in old_workers {
163
+ worker.graceful_shutdown(self.clone()).await;
127
164
  }
165
+
166
+ self.process_workers.lock().extend(next_workers);
167
+ RELOAD_IN_PROGRESS.store(false, Ordering::SeqCst);
168
+
128
169
  Ok(())
129
170
  }
130
171
  LifecycleEvent::IncreaseWorkers => {
131
172
  let mut workers = self.process_workers.lock();
132
173
  let worker = ProcessWorker {
133
- worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
174
+ worker_id: self.next_worker_id(),
134
175
  ..Default::default()
135
176
  };
136
177
  let worker_clone = worker.clone();
@@ -171,6 +212,10 @@ impl ClusterMode {
171
212
  unsafe { exit(0) };
172
213
  }
173
214
  LifecycleEvent::ChildTerminated => {
215
+ if RELOAD_IN_PROGRESS.load(Ordering::SeqCst) {
216
+ warn!("Reload already in progress, ignoring child signal");
217
+ return Ok(());
218
+ }
174
219
  CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
175
220
  i.send(()).ok();
176
221
  });
@@ -275,6 +320,7 @@ impl ClusterMode {
275
320
  pub fn run(self: Arc<Self>) -> Result<()> {
276
321
  info!("Starting in Cluster mode");
277
322
  self.invoke_hook("before_fork");
323
+
278
324
  self.process_workers
279
325
  .lock()
280
326
  .iter()
@@ -283,10 +329,11 @@ impl ClusterMode {
283
329
  let (sender, mut receiver) = watch::channel(());
284
330
  *CHILD_SIGNAL_SENDER.lock() = Some(sender);
285
331
 
286
- let mut lifecycle_rx = self.lifecycle_channel.subscribe();
287
332
  let self_ref = self.clone();
288
333
 
289
334
  self.build_runtime().block_on(async {
335
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
336
+
290
337
  let self_ref = self_ref.clone();
291
338
  let memory_check_duration = if self_ref.server_config.server_params.read().worker_memory_limit.is_some(){
292
339
  time::Duration::from_secs(15)
@@ -338,11 +385,16 @@ impl ClusterMode {
338
385
  }
339
386
 
340
387
  },
341
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
388
+ Err(e) => {
389
+ debug!("Lifecycle channel closed: {:?}, exiting cluster monitor loop", e);
390
+ break
391
+ },
342
392
  }
343
393
  }
344
394
  }
345
395
  });
396
+
397
+ unsubscribe_runtime();
346
398
  self.server_config
347
399
  .server_params
348
400
  .write()
@@ -4,7 +4,10 @@ use crate::{
4
4
  lifecycle_event::LifecycleEvent,
5
5
  request_job::RequestJob,
6
6
  serve_strategy::acceptor::{Acceptor, AcceptorArgs},
7
- signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
7
+ signal::{
8
+ send_lifecycle_event, subscribe_runtime_to_signals, unsubscribe_runtime,
9
+ SHUTDOWN_REQUESTED,
10
+ },
8
11
  thread_worker::{build_thread_workers, ThreadWorker},
9
12
  },
10
13
  };
@@ -29,22 +32,20 @@ use std::{
29
32
  };
30
33
  use tokio::{
31
34
  runtime::{Builder as RuntimeBuilder, Runtime},
32
- sync::{
33
- broadcast,
34
- watch::{self},
35
- },
35
+ sync::watch::{self},
36
36
  task::JoinSet,
37
37
  };
38
38
  use tracing::instrument;
39
39
 
40
40
  pub struct SingleMode {
41
+ pub worker_id: usize,
41
42
  pub executor: Builder<TokioExecutor>,
42
43
  pub server_config: Arc<ItsiServerConfig>,
43
- pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
44
44
  pub restart_requested: AtomicBool,
45
45
  pub status: RwLock<HashMap<u8, (u64, u64)>>,
46
46
  }
47
47
 
48
+ #[derive(PartialEq, Debug)]
48
49
  pub enum RunningPhase {
49
50
  Running,
50
51
  ShutdownPending,
@@ -53,31 +54,45 @@ pub enum RunningPhase {
53
54
 
54
55
  impl SingleMode {
55
56
  #[instrument(parent=None, skip_all)]
56
- pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
57
+ pub fn new(server_config: Arc<ItsiServerConfig>, worker_id: usize) -> Result<Self> {
57
58
  server_config.server_params.read().preload_ruby()?;
58
- let mut executor = Builder::new(TokioExecutor::new());
59
- executor
60
- .http1()
61
- .header_read_timeout(server_config.server_params.read().header_read_timeout)
62
- .writev(true)
63
- .timer(TokioTimer::new());
64
- executor
65
- .http2()
66
- .max_concurrent_streams(100)
67
- .max_local_error_reset_streams(100)
68
- .enable_connect_protocol()
69
- .max_header_list_size(10 * 1024 * 1024)
70
- .max_send_buf_size(16 * 1024 * 1024);
59
+ let executor = {
60
+ let mut executor = Builder::new(TokioExecutor::new());
61
+ let server_params = server_config.server_params.read();
62
+ let mut http1_executor = executor.http1();
63
+
64
+ http1_executor
65
+ .header_read_timeout(server_params.header_read_timeout)
66
+ .pipeline_flush(server_params.pipeline_flush)
67
+ .timer(TokioTimer::new());
68
+
69
+ if let Some(writev) = server_params.writev {
70
+ http1_executor.writev(writev);
71
+ }
72
+
73
+ executor
74
+ .http2()
75
+ .max_concurrent_streams(server_params.max_concurrent_streams)
76
+ .max_local_error_reset_streams(server_params.max_local_error_reset_streams)
77
+ .max_header_list_size(server_params.max_header_list_size)
78
+ .max_send_buf_size(server_params.max_send_buf_size)
79
+ .enable_connect_protocol();
80
+ executor
81
+ };
71
82
 
72
83
  Ok(Self {
84
+ worker_id,
73
85
  executor,
74
86
  server_config,
75
- lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
76
87
  restart_requested: AtomicBool::new(false),
77
88
  status: RwLock::new(HashMap::new()),
78
89
  })
79
90
  }
80
91
 
92
+ pub fn is_zero_worker(&self) -> bool {
93
+ self.worker_id == 0
94
+ }
95
+
81
96
  pub fn build_runtime(&self) -> Runtime {
82
97
  let mut builder: RuntimeBuilder = if self
83
98
  .server_config
@@ -103,7 +118,7 @@ impl SingleMode {
103
118
 
104
119
  pub fn stop(&self) -> Result<()> {
105
120
  SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
106
- self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
121
+ send_lifecycle_event(LifecycleEvent::Shutdown);
107
122
  Ok(())
108
123
  }
109
124
 
@@ -182,7 +197,7 @@ impl SingleMode {
182
197
  .unwrap();
183
198
  let receiver = self.clone();
184
199
  monitor_runtime.block_on({
185
- let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
200
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
186
201
  let receiver = receiver.clone();
187
202
  let thread_workers = thread_workers.clone();
188
203
  async move {
@@ -201,18 +216,19 @@ impl SingleMode {
201
216
  }
202
217
  lifecycle_event = lifecycle_rx.recv() => {
203
218
  match lifecycle_event {
204
- Ok(LifecycleEvent::Restart) => {
219
+ Ok(LifecycleEvent::Restart) | Ok(LifecycleEvent::Reload) => {
205
220
  receiver.restart().await.ok();
206
221
  }
207
- Ok(LifecycleEvent::Reload) => {
208
- receiver.reload().await.ok();
209
- }
210
222
  Ok(LifecycleEvent::Shutdown) => {
211
223
  break;
212
224
  }
213
225
  Ok(LifecycleEvent::PrintInfo) => {
214
226
  receiver.print_info(thread_workers.clone()).await.ok();
215
227
  }
228
+ Err(e) => {
229
+ debug!("Lifecycle channel closed: {:?}, exiting single mode monitor loop", e);
230
+ break;
231
+ }
216
232
  _ => {}
217
233
  }
218
234
  }
@@ -227,13 +243,15 @@ impl SingleMode {
227
243
 
228
244
  #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
229
245
  pub fn run(self: Arc<Self>) -> Result<()> {
230
- let (thread_workers, job_sender, nonblocking_sender) =
231
- build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
232
- .inspect_err(|e| {
233
- if let Some(err_val) = e.value() {
234
- print_rb_backtrace(err_val);
235
- }
236
- })?;
246
+ let (thread_workers, job_sender, nonblocking_sender) = build_thread_workers(
247
+ self.server_config.server_params.read().clone(),
248
+ self.worker_id,
249
+ )
250
+ .inspect_err(|e| {
251
+ if let Some(err_val) = e.value() {
252
+ print_rb_backtrace(err_val);
253
+ }
254
+ })?;
237
255
 
238
256
  let worker_count = thread_workers.len();
239
257
  info!(
@@ -244,6 +262,14 @@ impl SingleMode {
244
262
  let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
245
263
  let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
246
264
  let monitor_thread = self.clone().start_monitors(thread_workers.clone());
265
+
266
+ // If we're on Linux with reuse_port enabled, we can use
267
+ // kernel level load balancing across processes sharing a port.
268
+ // To take advantage of this, these forks will rebind to the same port upon boot.
269
+ // Worker 0 is special (this one just inherits the bind from the master process).
270
+ let is_zero_worker = self.is_zero_worker();
271
+ let should_rebind = !is_zero_worker && self.server_config.use_reuse_port_load_balancing();
272
+
247
273
  if monitor_thread.is_none() {
248
274
  error!("Failed to start monitor thread");
249
275
  return Err(ItsiError::new("Failed to start monitor thread"));
@@ -253,106 +279,123 @@ impl SingleMode {
253
279
  return Ok(());
254
280
  }
255
281
  let runtime = self.build_runtime();
256
- let result = runtime.block_on(
257
- async {
258
- let mut listener_task_set = JoinSet::new();
259
- let server_params = self.server_config.server_params.read().clone();
260
- if let Err(err) = server_params.initialize_middleware().await {
261
- error!("Failed to initialize middleware: {}", err);
262
- return Err(ItsiError::new("Failed to initialize middleware"))
263
- }
264
- let tokio_listeners = server_params.listeners.lock()
265
- .drain(..)
266
- .map(|list| {
267
- Arc::new(list.into_tokio_listener())
268
- })
269
- .collect::<Vec<_>>();
270
-
271
- tokio_listeners.iter().cloned().for_each(|listener| {
272
- let shutdown_sender = shutdown_sender.clone();
273
- let job_sender = job_sender.clone();
274
- let nonblocking_sender = nonblocking_sender.clone();
275
-
276
- let mut lifecycle_rx = self.lifecycle_channel.subscribe();
277
- let mut shutdown_receiver = shutdown_sender.subscribe();
278
- let mut acceptor = Acceptor{
279
- acceptor_args: Arc::new(
280
- AcceptorArgs{
281
- strategy: self.clone(),
282
- listener_info: listener.listener_info(),
283
- shutdown_receiver: shutdown_sender.subscribe(),
284
- job_sender: job_sender.clone(),
285
- nonblocking_sender: nonblocking_sender.clone(),
286
- server_params: server_params.clone()
287
- }
288
- ),
289
- join_set: JoinSet::new()
290
- };
291
-
292
- let shutdown_rx_for_acme_task = shutdown_receiver.clone();
293
- let acme_task_listener_clone = listener.clone();
294
- listener_task_set.spawn(async move {
295
- acme_task_listener_clone.spawn_acme_event_task(shutdown_rx_for_acme_task).await;
296
- });
297
-
298
- listener_task_set.spawn(async move {
299
- loop {
300
- tokio::select! {
301
- accept_result = listener.accept() => {
302
- match accept_result {
303
- Ok(accepted) => acceptor.serve_connection(accepted).await,
304
- Err(e) => debug!("Listener.accept failed: {:?}", e)
305
- }
306
- },
307
- _ = shutdown_receiver.changed() => {
308
- debug!("Shutdown requested via receiver");
309
- break;
310
- },
311
- lifecycle_event = lifecycle_rx.recv() => {
312
- match lifecycle_event {
313
- Ok(LifecycleEvent::Shutdown) => {
314
- debug!("Received LifecycleEvent::Shutdown");
315
- let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
316
- for _ in 0..worker_count {
317
- let _ = job_sender.send(RequestJob::Shutdown).await;
318
- let _ = nonblocking_sender.send(RequestJob::Shutdown).await;
319
- }
320
- break;
321
- },
322
- Err(e) => error!("Error receiving lifecycle event: {:?}", e),
323
- _ => ()
282
+ let result = runtime.block_on(async {
283
+ let mut listener_task_set = JoinSet::new();
284
+ let server_params = self.server_config.server_params.read().clone();
285
+ if let Err(err) = server_params.initialize_middleware().await {
286
+ error!("Failed to initialize middleware: {}", err);
287
+ return Err(ItsiError::new("Failed to initialize middleware"));
288
+ }
289
+ let tokio_listeners = server_params
290
+ .listeners
291
+ .lock()
292
+ .drain(..)
293
+ .map(|list| Arc::new(list.into_tokio_listener(should_rebind)))
294
+ .collect::<Vec<_>>();
295
+
296
+ tokio_listeners.iter().cloned().for_each(|listener| {
297
+ let shutdown_sender = shutdown_sender.clone();
298
+ let job_sender = job_sender.clone();
299
+ let nonblocking_sender = nonblocking_sender.clone();
300
+
301
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
302
+ let mut shutdown_receiver = shutdown_sender.subscribe();
303
+ let mut acceptor = Acceptor {
304
+ acceptor_args: Arc::new(AcceptorArgs {
305
+ strategy: self.clone(),
306
+ listener_info: listener.listener_info(),
307
+ shutdown_receiver: shutdown_sender.subscribe(),
308
+ job_sender: job_sender.clone(),
309
+ nonblocking_sender: nonblocking_sender.clone(),
310
+ server_params: server_params.clone(),
311
+ }),
312
+ join_set: JoinSet::new(),
313
+ };
314
+
315
+ let shutdown_rx_for_acme_task = shutdown_receiver.clone();
316
+ let acme_task_listener_clone = listener.clone();
317
+
318
+ let mut after_accept_wait: Option<Duration> = None::<Duration>;
319
+
320
+ if cfg!(target_os = "macos") {
321
+ after_accept_wait = if server_params.workers > 1 && !(server_params.socket_opts.reuse_port && server_params.socket_opts.reuse_address) {
322
+ Some(Duration::from_nanos(10 * server_params.workers as u64))
323
+ } else {
324
+ None
325
+ };
326
+ };
327
+
328
+ listener_task_set.spawn(async move {
329
+ acme_task_listener_clone
330
+ .spawn_acme_event_task(shutdown_rx_for_acme_task)
331
+ .await;
332
+ });
333
+
334
+ listener_task_set.spawn(async move {
335
+ loop {
336
+ // Process any pending signals before select
337
+ tokio::select! {
338
+ accept_result = listener.accept() => {
339
+ match accept_result {
340
+ Ok(accepted) => acceptor.serve_connection(accepted).await,
341
+ Err(e) => debug!("Listener.accept failed: {:?}", e)
342
+ }
343
+ if cfg!(target_os = "macos") {
344
+ if let Some(after_accept_wait) = after_accept_wait{
345
+ tokio::time::sleep(after_accept_wait).await;
324
346
  }
325
- }
326
- }
327
- }
328
- acceptor.join().await;
329
- });
330
- });
331
-
332
- if self.is_single_mode() {
347
+ }
348
+ },
349
+ _ = shutdown_receiver.changed() => {
350
+ debug!("Shutdown requested via receiver");
351
+ break;
352
+ },
353
+ lifecycle_event = lifecycle_rx.recv() => {
354
+ match lifecycle_event {
355
+ Ok(LifecycleEvent::Shutdown) => {
356
+ debug!("Received LifecycleEvent::Shutdown");
357
+ let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
358
+ break;
359
+ },
360
+ Err(e) => {
361
+ debug!("Lifecycle channel closed: {:?}, exiting accept loop", e);
362
+ break
363
+ },
364
+ _ => ()
365
+ }
366
+ }
367
+ }
368
+ }
369
+ acceptor.join().await;
370
+ });
371
+ });
372
+
373
+ if self.is_single_mode() {
333
374
  self.invoke_hook("after_start");
334
- }
375
+ }
335
376
 
336
- while let Some(_res) = listener_task_set.join_next().await {}
337
- drop(tokio_listeners);
377
+ while let Some(_res) = listener_task_set.join_next().await {}
378
+ drop(tokio_listeners);
338
379
 
339
- Ok::<(), ItsiError>(())
340
- });
380
+ Ok::<(), ItsiError>(())
381
+ });
341
382
 
342
383
  debug!("Single mode runtime exited.");
343
384
 
385
+ for _i in 0..thread_workers.len() {
386
+ job_sender.send_blocking(RequestJob::Shutdown).unwrap();
387
+ nonblocking_sender
388
+ .send_blocking(RequestJob::Shutdown)
389
+ .unwrap();
390
+ }
344
391
  if result.is_err() {
345
- for _i in 0..thread_workers.len() {
346
- job_sender.send_blocking(RequestJob::Shutdown).unwrap();
347
- nonblocking_sender
348
- .send_blocking(RequestJob::Shutdown)
349
- .unwrap();
350
- }
351
- self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
392
+ send_lifecycle_event(LifecycleEvent::Shutdown);
352
393
  }
353
394
 
354
395
  shutdown_sender.send(RunningPhase::Shutdown).ok();
355
396
  runtime.shutdown_timeout(Duration::from_millis(100));
397
+ unsubscribe_runtime();
398
+
356
399
  debug!("Shutdown timeout finished.");
357
400
 
358
401
  let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout);
@@ -384,26 +427,6 @@ impl SingleMode {
384
427
  pub fn is_single_mode(&self) -> bool {
385
428
  self.server_config.server_params.read().workers == 1
386
429
  }
387
- /// Attempts to reload the config "live"
388
- /// Not that when running in single mode this will not unload
389
- /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
390
- pub async fn reload(&self) -> Result<()> {
391
- if !self.server_config.check_config().await {
392
- return Ok(());
393
- }
394
- let should_reexec = self.server_config.clone().reload(false)?;
395
- if should_reexec {
396
- if self.is_single_mode() {
397
- self.invoke_hook("before_restart");
398
- }
399
- self.server_config.dup_fds()?;
400
- self.server_config.reload_exec()?;
401
- }
402
- self.restart_requested.store(true, Ordering::SeqCst);
403
- self.stop()?;
404
- self.server_config.server_params.read().preload_ruby()?;
405
- Ok(())
406
- }
407
430
 
408
431
  pub fn invoke_hook(&self, hook_name: &str) {
409
432
  if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) {
@@ -418,6 +441,7 @@ impl SingleMode {
418
441
  if self.is_single_mode() {
419
442
  self.invoke_hook("before_restart");
420
443
  }
444
+ self.server_config.stop_watcher()?;
421
445
  self.server_config.dup_fds()?;
422
446
  self.server_config.reload_exec()?;
423
447
  Ok(())