itsi-server 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -243,20 +243,20 @@ impl std::fmt::Display for SockAddr {
243
243
  }
244
244
 
245
245
  impl Listener {
246
- pub fn to_tokio_listener(&self) -> TokioListener {
246
+ pub fn into_tokio_listener(self) -> TokioListener {
247
247
  match self {
248
- Listener::Tcp(listener) => TokioListener::Tcp(
249
- TokioTcpListener::from_std(TcpListener::try_clone(listener).unwrap()).unwrap(),
250
- ),
248
+ Listener::Tcp(listener) => {
249
+ TokioListener::Tcp(TokioTcpListener::from_std(listener).unwrap())
250
+ }
251
251
  Listener::TcpTls((listener, acceptor)) => TokioListener::TcpTls(
252
- TokioTcpListener::from_std(TcpListener::try_clone(listener).unwrap()).unwrap(),
252
+ TokioTcpListener::from_std(listener).unwrap(),
253
253
  acceptor.clone(),
254
254
  ),
255
- Listener::Unix(listener) => TokioListener::Unix(
256
- TokioUnixListener::from_std(UnixListener::try_clone(listener).unwrap()).unwrap(),
257
- ),
255
+ Listener::Unix(listener) => {
256
+ TokioListener::Unix(TokioUnixListener::from_std(listener).unwrap())
257
+ }
258
258
  Listener::UnixTls((listener, acceptor)) => TokioListener::UnixTls(
259
- TokioUnixListener::from_std(UnixListener::try_clone(listener).unwrap()).unwrap(),
259
+ TokioUnixListener::from_std(listener).unwrap(),
260
260
  acceptor.clone(),
261
261
  ),
262
262
  }
@@ -53,8 +53,8 @@ impl ProcessWorker {
53
53
  }
54
54
  *self.child_pid.lock() = None;
55
55
  }
56
-
57
- match call_with_gvl(|_ruby| fork(cluster_template.server.after_fork.lock().clone())) {
56
+ match call_with_gvl(|_ruby| fork(cluster_template.server.hooks.get("after_fork").cloned()))
57
+ {
58
58
  Some(pid) => {
59
59
  *self.child_pid.lock() = Some(Pid::from_raw(pid));
60
60
  }
@@ -67,7 +67,7 @@ impl ProcessWorker {
67
67
  }
68
68
  match SingleMode::new(
69
69
  cluster_template.server.clone(),
70
- cluster_template.listeners.clone(),
70
+ cluster_template.listeners.lock().drain(..).collect(),
71
71
  cluster_template.lifecycle_channel.clone(),
72
72
  ) {
73
73
  Ok(single_mode) => {
@@ -83,6 +83,13 @@ impl ProcessWorker {
83
83
  Ok(())
84
84
  }
85
85
 
86
+ pub fn pid(&self) -> i32 {
87
+ if let Some(pid) = *self.child_pid.lock() {
88
+ return pid.as_raw();
89
+ }
90
+ 0
91
+ }
92
+
86
93
  pub(crate) fn memory_usage(&self) -> Option<u64> {
87
94
  if let Some(pid) = *self.child_pid.lock() {
88
95
  let s = System::new_all();
@@ -3,8 +3,11 @@ use crate::server::{
3
3
  process_worker::ProcessWorker,
4
4
  };
5
5
  use itsi_error::{ItsiError, Result};
6
- use itsi_rb_helpers::{call_without_gvl, create_ruby_thread};
6
+ use itsi_rb_helpers::{
7
+ call_proc_and_log_errors, call_with_gvl, call_without_gvl, create_ruby_thread,
8
+ };
7
9
  use itsi_tracing::{error, info, warn};
10
+ use magnus::Value;
8
11
  use nix::{
9
12
  libc::{self, exit},
10
13
  unistd::Pid,
@@ -19,9 +22,9 @@ use tokio::{
19
22
  sync::{broadcast, watch, Mutex},
20
23
  time::{self, sleep},
21
24
  };
22
- use tracing::instrument;
25
+ use tracing::{debug, instrument};
23
26
  pub(crate) struct ClusterMode {
24
- pub listeners: Arc<Vec<Arc<Listener>>>,
27
+ pub listeners: parking_lot::Mutex<Vec<Listener>>,
25
28
  pub server: Arc<Server>,
26
29
  pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
27
30
  pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
@@ -34,12 +37,9 @@ static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
34
37
  impl ClusterMode {
35
38
  pub fn new(
36
39
  server: Arc<Server>,
37
- listeners: Arc<Vec<Arc<Listener>>>,
40
+ listeners: Vec<Listener>,
38
41
  lifecycle_channel: broadcast::Sender<LifecycleEvent>,
39
42
  ) -> Self {
40
- if let Some(f) = server.before_fork.lock().take() {
41
- f();
42
- }
43
43
  let process_workers = (0..server.workers)
44
44
  .map(|_| ProcessWorker {
45
45
  worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
@@ -48,7 +48,7 @@ impl ClusterMode {
48
48
  .collect();
49
49
 
50
50
  Self {
51
- listeners,
51
+ listeners: parking_lot::Mutex::new(listeners),
52
52
  server,
53
53
  process_workers: parking_lot::Mutex::new(process_workers),
54
54
  lifecycle_channel,
@@ -152,7 +152,7 @@ impl ClusterMode {
152
152
 
153
153
  tokio::select! {
154
154
  _ = monitor_handle => {
155
- info!("All children exited early, exit normally")
155
+ debug!("All children exited early, exit normally")
156
156
  }
157
157
  _ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
158
158
  warn!("Graceful shutdown timeout reached, force killing remaining children");
@@ -191,6 +191,9 @@ impl ClusterMode {
191
191
  #[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
192
192
  pub fn run(self: Arc<Self>) -> Result<()> {
193
193
  info!("Starting in Cluster mode");
194
+ if let Some(proc) = self.server.hooks.get("before_fork") {
195
+ call_with_gvl(|_| call_proc_and_log_errors(proc.clone()))
196
+ }
194
197
  self.process_workers
195
198
  .lock()
196
199
  .iter()
@@ -228,6 +231,9 @@ impl ClusterMode {
228
231
  if let Some(current_mem_usage) = largest_worker.memory_usage(){
229
232
  if current_mem_usage > memory_limit {
230
233
  largest_worker.reboot(self_ref.clone()).await.ok();
234
+ if let Some(hook) = self_ref.server.hooks.get("after_memory_threshold_reached") {
235
+ call_with_gvl(|_| hook.call::<_, Value>((largest_worker.pid(),)).ok() );
236
+ }
231
237
  }
232
238
  }
233
239
  }
@@ -15,10 +15,13 @@ use hyper_util::{
15
15
  server::conn::auto::Builder,
16
16
  };
17
17
  use itsi_error::{ItsiError, Result};
18
+ use itsi_rb_helpers::print_rb_backtrace;
18
19
  use itsi_tracing::{debug, error, info};
19
20
  use nix::unistd::Pid;
21
+ use parking_lot::Mutex;
20
22
  use std::{
21
23
  num::NonZeroU8,
24
+ panic,
22
25
  pin::Pin,
23
26
  sync::Arc,
24
27
  time::{Duration, Instant},
@@ -37,7 +40,7 @@ pub struct SingleMode {
37
40
  pub executor: Builder<TokioExecutor>,
38
41
  pub server: Arc<Server>,
39
42
  pub sender: async_channel::Sender<RequestJob>,
40
- pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
43
+ pub(crate) listeners: Mutex<Vec<Listener>>,
41
44
  pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
42
45
  pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
43
46
  }
@@ -52,18 +55,24 @@ impl SingleMode {
52
55
  #[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
53
56
  pub(crate) fn new(
54
57
  server: Arc<Server>,
55
- listeners: Arc<Vec<Arc<Listener>>>,
58
+ listeners: Vec<Listener>,
56
59
  lifecycle_channel: broadcast::Sender<LifecycleEvent>,
57
60
  ) -> Result<Self> {
58
61
  let (thread_workers, sender) = build_thread_workers(
62
+ server.clone(),
59
63
  Pid::this(),
60
64
  NonZeroU8::try_from(server.threads).unwrap(),
61
65
  server.app.clone(),
62
66
  server.scheduler_class.clone(),
63
- )?;
67
+ )
68
+ .inspect_err(|e| {
69
+ if let Some(err_val) = e.value() {
70
+ print_rb_backtrace(err_val);
71
+ }
72
+ })?;
64
73
  Ok(Self {
65
74
  executor: Builder::new(TokioExecutor::new()),
66
- listeners,
75
+ listeners: Mutex::new(listeners),
67
76
  server,
68
77
  sender,
69
78
  thread_workers,
@@ -83,82 +92,81 @@ impl SingleMode {
83
92
  }
84
93
 
85
94
  pub fn stop(&self) -> Result<()> {
86
- self.lifecycle_channel
87
- .send(LifecycleEvent::Shutdown)
88
- .expect("Failed to send shutdown event");
95
+ self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
89
96
  Ok(())
90
97
  }
91
98
 
92
99
  #[instrument(parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
93
100
  pub fn run(self: Arc<Self>) -> Result<()> {
94
101
  let mut listener_task_set = JoinSet::new();
95
- let self_ref = Arc::new(self);
96
- let runtime = self_ref.build_runtime();
102
+ let runtime = self.build_runtime();
97
103
 
98
104
  runtime.block_on(async {
99
- let tokio_listeners = self_ref
100
- .listeners
101
- .iter()
102
- .map(|list| Arc::new(list.to_tokio_listener()))
103
- .collect::<Vec<_>>();
104
- let (shutdown_sender, _) = watch::channel::<RunningPhase>(RunningPhase::Running);
105
- for listener in tokio_listeners.iter() {
106
- let mut lifecycle_rx = self_ref.lifecycle_channel.subscribe();
107
- let listener_info = Arc::new(listener.listener_info());
108
- let self_ref = self_ref.clone();
109
- let listener = listener.clone();
110
- let shutdown_sender = shutdown_sender.clone();
111
-
105
+ let tokio_listeners = self
106
+ .listeners.lock()
107
+ .drain(..)
108
+ .map(|list| {
109
+ Arc::new(list.into_tokio_listener())
110
+ })
111
+ .collect::<Vec<_>>();
112
+ let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
113
+ for listener in tokio_listeners.iter() {
114
+ let mut lifecycle_rx = self.lifecycle_channel.subscribe();
115
+ let listener_info = Arc::new(listener.listener_info());
116
+ let self_ref = self.clone();
117
+ let listener = listener.clone();
118
+ let shutdown_sender = shutdown_sender.clone();
112
119
 
113
- let listener_clone = listener.clone();
114
- let mut shutdown_receiver = shutdown_sender.clone().subscribe();
115
- let shutdown_receiver_clone = shutdown_receiver.clone();
116
- listener_task_set.spawn(async move {
117
- listener_clone.spawn_state_task(shutdown_receiver_clone).await;
118
- });
120
+ let listener_clone = listener.clone();
121
+ let mut shutdown_receiver = shutdown_sender.subscribe();
122
+ let shutdown_receiver_clone = shutdown_receiver.clone();
123
+ listener_task_set.spawn(async move {
124
+ listener_clone.spawn_state_task(shutdown_receiver_clone).await;
125
+ });
119
126
 
120
- listener_task_set.spawn(async move {
121
- let strategy = self_ref.clone();
122
- loop {
123
- tokio::select! {
124
- accept_result = listener.accept() => match accept_result {
125
- Ok(accept_result) => {
126
- match strategy.serve_connection(accept_result, listener_info.clone(), shutdown_receiver.clone()).await {
127
- Ok(_) => {
128
- debug!("Connection accepted and served");
127
+ listener_task_set.spawn(async move {
128
+ let strategy_clone = self_ref.clone();
129
+ let mut acceptor_task_set = JoinSet::new();
130
+ loop {
131
+ tokio::select! {
132
+ accept_result = listener.accept() => match accept_result {
133
+ Ok(accept_result) => {
134
+ let strategy = strategy_clone.clone();
135
+ let listener_info = listener_info.clone();
136
+ let shutdown_receiver = shutdown_receiver.clone();
137
+ acceptor_task_set.spawn(async move {
138
+ strategy.serve_connection(accept_result, listener_info, shutdown_receiver).await;
139
+ });
129
140
  },
130
- Err(e) => error!("Error in serve_connection {:?}", e)
131
- }
132
- },
133
- Err(e) => debug!("Listener.accept failed {:?}", e),
134
- },
135
- _ = shutdown_receiver.changed() => {
136
- break;
137
- }
138
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
139
- Ok(lifecycle_event) => {
140
- if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
141
- match e {
142
- ItsiError::Break() => break,
143
- _ => error!("Error in handle_lifecycle_event {:?}", e)
144
- }
141
+ Err(e) => debug!("Listener.accept failed {:?}", e),
142
+ },
143
+ _ = shutdown_receiver.changed() => {
144
+ break;
145
145
  }
146
+ lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
147
+ Ok(lifecycle_event) => {
148
+ if let Err(e) = self_ref.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
149
+ match e {
150
+ ItsiError::Break() => break,
151
+ _ => error!("Error in handle_lifecycle_event {:?}", e)
152
+ }
153
+ }
146
154
 
147
- },
148
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
149
- }
155
+ },
156
+ Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
157
+ }
158
+ }
150
159
  }
151
- }
152
- });
160
+ while let Some(_res) = acceptor_task_set.join_next().await {}
161
+ });
153
162
 
154
- }
163
+ }
155
164
 
156
- while let Some(_res) = listener_task_set.join_next().await {}
165
+ while let Some(_res) = listener_task_set.join_next().await {}
157
166
 
158
- });
167
+ });
159
168
  runtime.shutdown_timeout(Duration::from_millis(100));
160
-
161
- info!("Runtime has shut down");
169
+ debug!("Runtime has shut down");
162
170
  Ok(())
163
171
  }
164
172
 
@@ -167,62 +175,60 @@ impl SingleMode {
167
175
  stream: IoStream,
168
176
  listener: Arc<ListenerInfo>,
169
177
  shutdown_channel: watch::Receiver<RunningPhase>,
170
- ) -> Result<()> {
178
+ ) {
171
179
  let sender_clone = self.sender.clone();
172
180
  let addr = stream.addr();
173
181
  let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
174
182
  let server = self.server.clone();
175
183
  let executor = self.executor.clone();
176
184
  let mut shutdown_channel_clone = shutdown_channel.clone();
177
- tokio::spawn(async move {
178
- let server = server.clone();
179
- let mut executor = executor.clone();
180
- let mut binding = executor.http1();
181
- let shutdown_channel = shutdown_channel_clone.clone();
182
- let mut serve = Box::pin(
183
- binding
184
- .timer(TokioTimer::new())
185
- .header_read_timeout(Duration::from_secs(1))
186
- .serve_connection_with_upgrades(
187
- io,
188
- service_fn(move |hyper_request: Request<Incoming>| {
189
- ItsiRequest::process_request(
190
- hyper_request,
191
- sender_clone.clone(),
192
- server.clone(),
193
- listener.clone(),
194
- addr.clone(),
195
- shutdown_channel.clone(),
196
- )
197
- }),
198
- ),
199
- );
185
+ let server = server.clone();
186
+ let mut executor = executor.clone();
187
+ let mut binding = executor.http1();
188
+ let shutdown_channel = shutdown_channel_clone.clone();
189
+ let mut serve = Box::pin(
190
+ binding
191
+ .timer(TokioTimer::new())
192
+ .header_read_timeout(Duration::from_secs(1))
193
+ .serve_connection_with_upgrades(
194
+ io,
195
+ service_fn(move |hyper_request: Request<Incoming>| {
196
+ ItsiRequest::process_request(
197
+ hyper_request,
198
+ sender_clone.clone(),
199
+ server.clone(),
200
+ listener.clone(),
201
+ addr.clone(),
202
+ shutdown_channel.clone(),
203
+ )
204
+ }),
205
+ ),
206
+ );
200
207
 
201
- tokio::select! {
202
- // Await the connection finishing naturally.
203
- res = &mut serve => {
204
- match res{
205
- Ok(()) => {
206
- debug!("Connection closed normally")
207
- },
208
- Err(res) => {
209
- debug!("Connection finished with error: {:?}", res)
210
- }
211
- }
212
- serve.as_mut().graceful_shutdown();
213
- },
214
- // A lifecycle event triggers shutdown.
215
- _ = shutdown_channel_clone.changed() => {
216
- // Initiate graceful shutdown.
217
- serve.as_mut().graceful_shutdown();
218
- // Now await the connection to finish shutting down.
219
- if let Err(e) = serve.await {
220
- debug!("Connection shutdown error: {:?}", e);
208
+ tokio::select! {
209
+ // Await the connection finishing naturally.
210
+ res = &mut serve => {
211
+ match res{
212
+ Ok(()) => {
213
+ debug!("Connection closed normally")
214
+ },
215
+ Err(res) => {
216
+ debug!("Connection finished with error: {:?}", res)
221
217
  }
222
218
  }
219
+ serve.as_mut().graceful_shutdown();
220
+ },
221
+ // A lifecycle event triggers shutdown.
222
+ _ = shutdown_channel_clone.changed() => {
223
+ // Initiate graceful shutdown.
224
+ serve.as_mut().graceful_shutdown();
225
+
226
+ // Now await the connection to finish shutting down.
227
+ if let Err(e) = serve.await {
228
+ debug!("Connection shutdown error: {:?}", e);
229
+ }
223
230
  }
224
- });
225
- Ok(())
231
+ }
226
232
  }
227
233
 
228
234
  pub async fn handle_lifecycle_event(
@@ -232,13 +238,20 @@ impl SingleMode {
232
238
  ) -> Result<()> {
233
239
  info!("Handling lifecycle event: {:?}", lifecycle_event);
234
240
  if let LifecycleEvent::Shutdown = lifecycle_event {
241
+ //1. Stop accepting new connections.
235
242
  shutdown_sender.send(RunningPhase::ShutdownPending).ok();
236
- let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
243
+ tokio::time::sleep(Duration::from_millis(25)).await;
244
+
245
+ //2. Break out of work queues.
237
246
  for worker in &*self.thread_workers {
238
247
  worker.request_shutdown().await;
239
248
  }
249
+
250
+ tokio::time::sleep(Duration::from_millis(25)).await;
251
+
252
+ //3. Wait for all threads to finish.
253
+ let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
240
254
  while Instant::now() < deadline {
241
- tokio::time::sleep(Duration::from_millis(50)).await;
242
255
  let alive_threads = self
243
256
  .thread_workers
244
257
  .iter()
@@ -250,7 +263,7 @@ impl SingleMode {
250
263
  tokio::time::sleep(Duration::from_millis(200)).await;
251
264
  }
252
265
 
253
- info!("Sending shutdown signal");
266
+ //4. Force shutdown any stragglers
254
267
  shutdown_sender.send(RunningPhase::Shutdown).ok();
255
268
  self.thread_workers.iter().for_each(|worker| {
256
269
  worker.poll_shutdown(deadline);
@@ -11,10 +11,7 @@ pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
11
11
  )> = LazyLock::new(|| sync::broadcast::channel(5));
12
12
 
13
13
  pub fn send_shutdown_event() {
14
- SIGNAL_HANDLER_CHANNEL
15
- .0
16
- .send(LifecycleEvent::Shutdown)
17
- .expect("Failed to send shutdown event");
14
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
18
15
  }
19
16
 
20
17
  pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);