itsi 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +11 -2
  3. data/Rakefile +5 -2
  4. data/crates/itsi_rb_helpers/src/lib.rs +27 -4
  5. data/crates/itsi_server/Cargo.toml +4 -1
  6. data/crates/itsi_server/src/lib.rs +69 -1
  7. data/crates/itsi_server/src/request/itsi_request.rs +2 -9
  8. data/crates/itsi_server/src/response/itsi_response.rs +2 -2
  9. data/crates/itsi_server/src/server/bind.rs +16 -12
  10. data/crates/itsi_server/src/server/itsi_server.rs +43 -49
  11. data/crates/itsi_server/src/server/listener.rs +9 -9
  12. data/crates/itsi_server/src/server/process_worker.rs +10 -3
  13. data/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
  14. data/crates/itsi_server/src/server/serve_strategy/single_mode.rs +124 -111
  15. data/crates/itsi_server/src/server/signal.rs +1 -4
  16. data/crates/itsi_server/src/server/thread_worker.rs +52 -20
  17. data/crates/itsi_server/src/server/tls.rs +1 -1
  18. data/gems/scheduler/ext/itsi_rb_helpers/src/lib.rs +27 -4
  19. data/gems/scheduler/ext/itsi_server/Cargo.toml +4 -1
  20. data/gems/scheduler/ext/itsi_server/src/lib.rs +69 -1
  21. data/gems/scheduler/ext/itsi_server/src/request/itsi_request.rs +2 -9
  22. data/gems/scheduler/ext/itsi_server/src/response/itsi_response.rs +2 -2
  23. data/gems/scheduler/ext/itsi_server/src/server/bind.rs +16 -12
  24. data/gems/scheduler/ext/itsi_server/src/server/itsi_server.rs +43 -49
  25. data/gems/scheduler/ext/itsi_server/src/server/listener.rs +9 -9
  26. data/gems/scheduler/ext/itsi_server/src/server/process_worker.rs +10 -3
  27. data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
  28. data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/single_mode.rs +124 -111
  29. data/gems/scheduler/ext/itsi_server/src/server/signal.rs +1 -4
  30. data/gems/scheduler/ext/itsi_server/src/server/thread_worker.rs +52 -20
  31. data/gems/scheduler/ext/itsi_server/src/server/tls.rs +1 -1
  32. data/gems/scheduler/lib/itsi/scheduler/version.rb +1 -1
  33. data/gems/server/Cargo.lock +11 -2
  34. data/gems/server/exe/itsi +53 -23
  35. data/gems/server/ext/itsi_rb_helpers/src/lib.rs +27 -4
  36. data/gems/server/ext/itsi_server/Cargo.toml +4 -1
  37. data/gems/server/ext/itsi_server/src/lib.rs +69 -1
  38. data/gems/server/ext/itsi_server/src/request/itsi_request.rs +2 -9
  39. data/gems/server/ext/itsi_server/src/response/itsi_response.rs +2 -2
  40. data/gems/server/ext/itsi_server/src/server/bind.rs +16 -12
  41. data/gems/server/ext/itsi_server/src/server/itsi_server.rs +43 -49
  42. data/gems/server/ext/itsi_server/src/server/listener.rs +9 -9
  43. data/gems/server/ext/itsi_server/src/server/process_worker.rs +10 -3
  44. data/gems/server/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
  45. data/gems/server/ext/itsi_server/src/server/serve_strategy/single_mode.rs +124 -111
  46. data/gems/server/ext/itsi_server/src/server/signal.rs +1 -4
  47. data/gems/server/ext/itsi_server/src/server/thread_worker.rs +52 -20
  48. data/gems/server/ext/itsi_server/src/server/tls.rs +1 -1
  49. data/gems/server/lib/itsi/server/Itsi.rb +127 -0
  50. data/gems/server/lib/itsi/server/config.rb +36 -0
  51. data/gems/server/lib/itsi/server/options_dsl.rb +401 -0
  52. data/gems/server/lib/itsi/server/rack/handler/itsi.rb +18 -6
  53. data/gems/server/lib/itsi/server/rack_interface.rb +1 -5
  54. data/gems/server/lib/itsi/server/signal_trap.rb +0 -1
  55. data/gems/server/lib/itsi/server/version.rb +1 -1
  56. data/gems/server/lib/itsi/server.rb +7 -3
  57. data/gems/server/test/helpers/test_helper.rb +7 -5
  58. data/gems/server/test/test_itsi_server.rb +21 -2
  59. data/lib/itsi/version.rb +1 -1
  60. data/location_dsl.rb +381 -0
  61. data/sandbox/itsi_itsi_file/Itsi.rb +119 -0
  62. data/sandbox/itsi_sandbox_async/Gemfile +1 -1
  63. data/sandbox/itsi_sandbox_rack/Gemfile.lock +2 -2
  64. data/sandbox/itsi_sandbox_rails/Gemfile.lock +2 -2
  65. data/tasks.txt +27 -4
  66. metadata +14 -9
@@ -3,8 +3,11 @@ use crate::server::{
3
3
  process_worker::ProcessWorker,
4
4
  };
5
5
  use itsi_error::{ItsiError, Result};
6
- use itsi_rb_helpers::{call_without_gvl, create_ruby_thread};
6
+ use itsi_rb_helpers::{
7
+ call_proc_and_log_errors, call_with_gvl, call_without_gvl, create_ruby_thread,
8
+ };
7
9
  use itsi_tracing::{error, info, warn};
10
+ use magnus::Value;
8
11
  use nix::{
9
12
  libc::{self, exit},
10
13
  unistd::Pid,
@@ -19,9 +22,9 @@ use tokio::{
19
22
  sync::{broadcast, watch, Mutex},
20
23
  time::{self, sleep},
21
24
  };
22
- use tracing::instrument;
25
+ use tracing::{debug, instrument};
23
26
  pub(crate) struct ClusterMode {
24
- pub listeners: Arc<Vec<Arc<Listener>>>,
27
+ pub listeners: parking_lot::Mutex<Vec<Listener>>,
25
28
  pub server: Arc<Server>,
26
29
  pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
27
30
  pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
@@ -34,12 +37,9 @@ static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
34
37
  impl ClusterMode {
35
38
  pub fn new(
36
39
  server: Arc<Server>,
37
- listeners: Arc<Vec<Arc<Listener>>>,
40
+ listeners: Vec<Listener>,
38
41
  lifecycle_channel: broadcast::Sender<LifecycleEvent>,
39
42
  ) -> Self {
40
- if let Some(f) = server.before_fork.lock().take() {
41
- f();
42
- }
43
43
  let process_workers = (0..server.workers)
44
44
  .map(|_| ProcessWorker {
45
45
  worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
@@ -48,7 +48,7 @@ impl ClusterMode {
48
48
  .collect();
49
49
 
50
50
  Self {
51
- listeners,
51
+ listeners: parking_lot::Mutex::new(listeners),
52
52
  server,
53
53
  process_workers: parking_lot::Mutex::new(process_workers),
54
54
  lifecycle_channel,
@@ -152,7 +152,7 @@ impl ClusterMode {
152
152
 
153
153
  tokio::select! {
154
154
  _ = monitor_handle => {
155
- info!("All children exited early, exit normally")
155
+ debug!("All children exited early, exit normally")
156
156
  }
157
157
  _ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
158
158
  warn!("Graceful shutdown timeout reached, force killing remaining children");
@@ -191,6 +191,9 @@ impl ClusterMode {
191
191
  #[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
192
192
  pub fn run(self: Arc<Self>) -> Result<()> {
193
193
  info!("Starting in Cluster mode");
194
+ if let Some(proc) = self.server.hooks.get("before_fork") {
195
+ call_with_gvl(|_| call_proc_and_log_errors(proc.clone()))
196
+ }
194
197
  self.process_workers
195
198
  .lock()
196
199
  .iter()
@@ -228,6 +231,9 @@ impl ClusterMode {
228
231
  if let Some(current_mem_usage) = largest_worker.memory_usage(){
229
232
  if current_mem_usage > memory_limit {
230
233
  largest_worker.reboot(self_ref.clone()).await.ok();
234
+ if let Some(hook) = self_ref.server.hooks.get("after_memory_threshold_reached") {
235
+ call_with_gvl(|_| hook.call::<_, Value>((largest_worker.pid(),)).ok() );
236
+ }
231
237
  }
232
238
  }
233
239
  }
@@ -15,10 +15,13 @@ use hyper_util::{
15
15
  server::conn::auto::Builder,
16
16
  };
17
17
  use itsi_error::{ItsiError, Result};
18
+ use itsi_rb_helpers::print_rb_backtrace;
18
19
  use itsi_tracing::{debug, error, info};
19
20
  use nix::unistd::Pid;
21
+ use parking_lot::Mutex;
20
22
  use std::{
21
23
  num::NonZeroU8,
24
+ panic,
22
25
  pin::Pin,
23
26
  sync::Arc,
24
27
  time::{Duration, Instant},
@@ -37,7 +40,7 @@ pub struct SingleMode {
37
40
  pub executor: Builder<TokioExecutor>,
38
41
  pub server: Arc<Server>,
39
42
  pub sender: async_channel::Sender<RequestJob>,
40
- pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
43
+ pub(crate) listeners: Mutex<Vec<Listener>>,
41
44
  pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
42
45
  pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
43
46
  }
@@ -52,18 +55,24 @@ impl SingleMode {
52
55
  #[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
53
56
  pub(crate) fn new(
54
57
  server: Arc<Server>,
55
- listeners: Arc<Vec<Arc<Listener>>>,
58
+ listeners: Vec<Listener>,
56
59
  lifecycle_channel: broadcast::Sender<LifecycleEvent>,
57
60
  ) -> Result<Self> {
58
61
  let (thread_workers, sender) = build_thread_workers(
62
+ server.clone(),
59
63
  Pid::this(),
60
64
  NonZeroU8::try_from(server.threads).unwrap(),
61
65
  server.app.clone(),
62
66
  server.scheduler_class.clone(),
63
- )?;
67
+ )
68
+ .inspect_err(|e| {
69
+ if let Some(err_val) = e.value() {
70
+ print_rb_backtrace(err_val);
71
+ }
72
+ })?;
64
73
  Ok(Self {
65
74
  executor: Builder::new(TokioExecutor::new()),
66
- listeners,
75
+ listeners: Mutex::new(listeners),
67
76
  server,
68
77
  sender,
69
78
  thread_workers,
@@ -83,82 +92,81 @@ impl SingleMode {
83
92
  }
84
93
 
85
94
  pub fn stop(&self) -> Result<()> {
86
- self.lifecycle_channel
87
- .send(LifecycleEvent::Shutdown)
88
- .expect("Failed to send shutdown event");
95
+ self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
89
96
  Ok(())
90
97
  }
91
98
 
92
99
  #[instrument(parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
93
100
  pub fn run(self: Arc<Self>) -> Result<()> {
94
101
  let mut listener_task_set = JoinSet::new();
95
- let self_ref = Arc::new(self);
96
- let runtime = self_ref.build_runtime();
102
+ let runtime = self.build_runtime();
97
103
 
98
104
  runtime.block_on(async {
99
- let tokio_listeners = self_ref
100
- .listeners
101
- .iter()
102
- .map(|list| Arc::new(list.to_tokio_listener()))
103
- .collect::<Vec<_>>();
104
- let (shutdown_sender, _) = watch::channel::<RunningPhase>(RunningPhase::Running);
105
- for listener in tokio_listeners.iter() {
106
- let mut lifecycle_rx = self_ref.lifecycle_channel.subscribe();
107
- let listener_info = Arc::new(listener.listener_info());
108
- let self_ref = self_ref.clone();
109
- let listener = listener.clone();
110
- let shutdown_sender = shutdown_sender.clone();
111
-
105
+ let tokio_listeners = self
106
+ .listeners.lock()
107
+ .drain(..)
108
+ .map(|list| {
109
+ Arc::new(list.into_tokio_listener())
110
+ })
111
+ .collect::<Vec<_>>();
112
+ let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
113
+ for listener in tokio_listeners.iter() {
114
+ let mut lifecycle_rx = self.lifecycle_channel.subscribe();
115
+ let listener_info = Arc::new(listener.listener_info());
116
+ let self_ref = self.clone();
117
+ let listener = listener.clone();
118
+ let shutdown_sender = shutdown_sender.clone();
112
119
 
113
- let listener_clone = listener.clone();
114
- let mut shutdown_receiver = shutdown_sender.clone().subscribe();
115
- let shutdown_receiver_clone = shutdown_receiver.clone();
116
- listener_task_set.spawn(async move {
117
- listener_clone.spawn_state_task(shutdown_receiver_clone).await;
118
- });
120
+ let listener_clone = listener.clone();
121
+ let mut shutdown_receiver = shutdown_sender.subscribe();
122
+ let shutdown_receiver_clone = shutdown_receiver.clone();
123
+ listener_task_set.spawn(async move {
124
+ listener_clone.spawn_state_task(shutdown_receiver_clone).await;
125
+ });
119
126
 
120
- listener_task_set.spawn(async move {
121
- let strategy = self_ref.clone();
122
- loop {
123
- tokio::select! {
124
- accept_result = listener.accept() => match accept_result {
125
- Ok(accept_result) => {
126
- match strategy.serve_connection(accept_result, listener_info.clone(), shutdown_receiver.clone()).await {
127
- Ok(_) => {
128
- debug!("Connection accepted and served");
127
+ listener_task_set.spawn(async move {
128
+ let strategy_clone = self_ref.clone();
129
+ let mut acceptor_task_set = JoinSet::new();
130
+ loop {
131
+ tokio::select! {
132
+ accept_result = listener.accept() => match accept_result {
133
+ Ok(accept_result) => {
134
+ let strategy = strategy_clone.clone();
135
+ let listener_info = listener_info.clone();
136
+ let shutdown_receiver = shutdown_receiver.clone();
137
+ acceptor_task_set.spawn(async move {
138
+ strategy.serve_connection(accept_result, listener_info, shutdown_receiver).await;
139
+ });
129
140
  },
130
- Err(e) => error!("Error in serve_connection {:?}", e)
131
- }
132
- },
133
- Err(e) => debug!("Listener.accept failed {:?}", e),
134
- },
135
- _ = shutdown_receiver.changed() => {
136
- break;
137
- }
138
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
139
- Ok(lifecycle_event) => {
140
- if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
141
- match e {
142
- ItsiError::Break() => break,
143
- _ => error!("Error in handle_lifecycle_event {:?}", e)
144
- }
141
+ Err(e) => debug!("Listener.accept failed {:?}", e),
142
+ },
143
+ _ = shutdown_receiver.changed() => {
144
+ break;
145
145
  }
146
+ lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
147
+ Ok(lifecycle_event) => {
148
+ if let Err(e) = self_ref.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
149
+ match e {
150
+ ItsiError::Break() => break,
151
+ _ => error!("Error in handle_lifecycle_event {:?}", e)
152
+ }
153
+ }
146
154
 
147
- },
148
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
149
- }
155
+ },
156
+ Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
157
+ }
158
+ }
150
159
  }
151
- }
152
- });
160
+ while let Some(_res) = acceptor_task_set.join_next().await {}
161
+ });
153
162
 
154
- }
163
+ }
155
164
 
156
- while let Some(_res) = listener_task_set.join_next().await {}
165
+ while let Some(_res) = listener_task_set.join_next().await {}
157
166
 
158
- });
167
+ });
159
168
  runtime.shutdown_timeout(Duration::from_millis(100));
160
-
161
- info!("Runtime has shut down");
169
+ debug!("Runtime has shut down");
162
170
  Ok(())
163
171
  }
164
172
 
@@ -167,62 +175,60 @@ impl SingleMode {
167
175
  stream: IoStream,
168
176
  listener: Arc<ListenerInfo>,
169
177
  shutdown_channel: watch::Receiver<RunningPhase>,
170
- ) -> Result<()> {
178
+ ) {
171
179
  let sender_clone = self.sender.clone();
172
180
  let addr = stream.addr();
173
181
  let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
174
182
  let server = self.server.clone();
175
183
  let executor = self.executor.clone();
176
184
  let mut shutdown_channel_clone = shutdown_channel.clone();
177
- tokio::spawn(async move {
178
- let server = server.clone();
179
- let mut executor = executor.clone();
180
- let mut binding = executor.http1();
181
- let shutdown_channel = shutdown_channel_clone.clone();
182
- let mut serve = Box::pin(
183
- binding
184
- .timer(TokioTimer::new())
185
- .header_read_timeout(Duration::from_secs(1))
186
- .serve_connection_with_upgrades(
187
- io,
188
- service_fn(move |hyper_request: Request<Incoming>| {
189
- ItsiRequest::process_request(
190
- hyper_request,
191
- sender_clone.clone(),
192
- server.clone(),
193
- listener.clone(),
194
- addr.clone(),
195
- shutdown_channel.clone(),
196
- )
197
- }),
198
- ),
199
- );
185
+ let server = server.clone();
186
+ let mut executor = executor.clone();
187
+ let mut binding = executor.http1();
188
+ let shutdown_channel = shutdown_channel_clone.clone();
189
+ let mut serve = Box::pin(
190
+ binding
191
+ .timer(TokioTimer::new())
192
+ .header_read_timeout(Duration::from_secs(1))
193
+ .serve_connection_with_upgrades(
194
+ io,
195
+ service_fn(move |hyper_request: Request<Incoming>| {
196
+ ItsiRequest::process_request(
197
+ hyper_request,
198
+ sender_clone.clone(),
199
+ server.clone(),
200
+ listener.clone(),
201
+ addr.clone(),
202
+ shutdown_channel.clone(),
203
+ )
204
+ }),
205
+ ),
206
+ );
200
207
 
201
- tokio::select! {
202
- // Await the connection finishing naturally.
203
- res = &mut serve => {
204
- match res{
205
- Ok(()) => {
206
- debug!("Connection closed normally")
207
- },
208
- Err(res) => {
209
- debug!("Connection finished with error: {:?}", res)
210
- }
211
- }
212
- serve.as_mut().graceful_shutdown();
213
- },
214
- // A lifecycle event triggers shutdown.
215
- _ = shutdown_channel_clone.changed() => {
216
- // Initiate graceful shutdown.
217
- serve.as_mut().graceful_shutdown();
218
- // Now await the connection to finish shutting down.
219
- if let Err(e) = serve.await {
220
- debug!("Connection shutdown error: {:?}", e);
208
+ tokio::select! {
209
+ // Await the connection finishing naturally.
210
+ res = &mut serve => {
211
+ match res{
212
+ Ok(()) => {
213
+ debug!("Connection closed normally")
214
+ },
215
+ Err(res) => {
216
+ debug!("Connection finished with error: {:?}", res)
221
217
  }
222
218
  }
219
+ serve.as_mut().graceful_shutdown();
220
+ },
221
+ // A lifecycle event triggers shutdown.
222
+ _ = shutdown_channel_clone.changed() => {
223
+ // Initiate graceful shutdown.
224
+ serve.as_mut().graceful_shutdown();
225
+
226
+ // Now await the connection to finish shutting down.
227
+ if let Err(e) = serve.await {
228
+ debug!("Connection shutdown error: {:?}", e);
229
+ }
223
230
  }
224
- });
225
- Ok(())
231
+ }
226
232
  }
227
233
 
228
234
  pub async fn handle_lifecycle_event(
@@ -232,13 +238,20 @@ impl SingleMode {
232
238
  ) -> Result<()> {
233
239
  info!("Handling lifecycle event: {:?}", lifecycle_event);
234
240
  if let LifecycleEvent::Shutdown = lifecycle_event {
241
+ //1. Stop accepting new connections.
235
242
  shutdown_sender.send(RunningPhase::ShutdownPending).ok();
236
- let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
243
+ tokio::time::sleep(Duration::from_millis(25)).await;
244
+
245
+ //2. Break out of work queues.
237
246
  for worker in &*self.thread_workers {
238
247
  worker.request_shutdown().await;
239
248
  }
249
+
250
+ tokio::time::sleep(Duration::from_millis(25)).await;
251
+
252
+ //3. Wait for all threads to finish.
253
+ let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
240
254
  while Instant::now() < deadline {
241
- tokio::time::sleep(Duration::from_millis(50)).await;
242
255
  let alive_threads = self
243
256
  .thread_workers
244
257
  .iter()
@@ -250,7 +263,7 @@ impl SingleMode {
250
263
  tokio::time::sleep(Duration::from_millis(200)).await;
251
264
  }
252
265
 
253
- info!("Sending shutdown signal");
266
+ //4. Force shutdown any stragglers
254
267
  shutdown_sender.send(RunningPhase::Shutdown).ok();
255
268
  self.thread_workers.iter().for_each(|worker| {
256
269
  worker.poll_shutdown(deadline);
@@ -11,10 +11,7 @@ pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
11
11
  )> = LazyLock::new(|| sync::broadcast::channel(5));
12
12
 
13
13
  pub fn send_shutdown_event() {
14
- SIGNAL_HANDLER_CHANNEL
15
- .0
16
- .send(LifecycleEvent::Shutdown)
17
- .expect("Failed to send shutdown event");
14
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
18
15
  }
19
16
 
20
17
  pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
@@ -1,4 +1,4 @@
1
- use super::itsi_server::RequestJob;
1
+ use super::itsi_server::{RequestJob, Server};
2
2
  use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
3
3
  use itsi_rb_helpers::{
4
4
  call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapVal, HeapValue,
@@ -24,6 +24,7 @@ use std::{
24
24
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
25
25
  use tracing::instrument;
26
26
  pub struct ThreadWorker {
27
+ pub server: Arc<Server>,
27
28
  pub id: String,
28
29
  pub app: Opaque<Value>,
29
30
  pub receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -48,8 +49,9 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
48
49
 
49
50
  pub struct TerminateWakerSignal(bool);
50
51
 
51
- #[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
52
+ #[instrument(name = "Boot", parent=None, skip(server, threads, app, pid, scheduler_class))]
52
53
  pub fn build_thread_workers(
54
+ server: Arc<Server>,
53
55
  pid: Pid,
54
56
  threads: NonZeroU8,
55
57
  app: HeapVal,
@@ -65,6 +67,7 @@ pub fn build_thread_workers(
65
67
  .map(|id| {
66
68
  info!(pid = pid.as_raw(), id, "Thread");
67
69
  ThreadWorker::new(
70
+ server.clone(),
68
71
  format!("{:?}#{:?}", pid, id),
69
72
  app,
70
73
  receiver_ref.clone(),
@@ -83,10 +86,7 @@ pub fn load_app(
83
86
  scheduler_class: Option<String>,
84
87
  ) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
85
88
  call_with_gvl(|ruby| {
86
- let app = Opaque::from(
87
- app.funcall::<_, _, Value>(*ID_CALL, ())
88
- .expect("Couldn't load app"),
89
- );
89
+ let app = Opaque::from(app.funcall::<_, _, Value>(*ID_CALL, ())?);
90
90
  let scheduler_class = if let Some(scheduler_class) = scheduler_class {
91
91
  Some(Opaque::from(
92
92
  ruby.module_kernel()
@@ -100,6 +100,7 @@ pub fn load_app(
100
100
  }
101
101
  impl ThreadWorker {
102
102
  pub fn new(
103
+ server: Arc<Server>,
103
104
  id: String,
104
105
  app: Opaque<Value>,
105
106
  receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -107,6 +108,7 @@ impl ThreadWorker {
107
108
  scheduler_class: Option<Opaque<Value>>,
108
109
  ) -> Result<Self> {
109
110
  let mut worker = Self {
111
+ server,
110
112
  id,
111
113
  app,
112
114
  receiver,
@@ -125,7 +127,7 @@ impl ThreadWorker {
125
127
  Ok(_) => {}
126
128
  Err(err) => error!("Failed to send shutdown request: {}", err),
127
129
  };
128
- info!("Requesting shutdown");
130
+ debug!("Requesting shutdown");
129
131
  }
130
132
 
131
133
  #[instrument(skip(self, deadline), fields(id = self.id))]
@@ -140,7 +142,7 @@ impl ThreadWorker {
140
142
  if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
141
143
  return true;
142
144
  }
143
- info!("Thread has shut down");
145
+ debug!("Thread has shut down");
144
146
  }
145
147
  self.thread.write().take();
146
148
 
@@ -154,17 +156,23 @@ impl ThreadWorker {
154
156
  let receiver = self.receiver.clone();
155
157
  let terminated = self.terminated.clone();
156
158
  let scheduler_class = self.scheduler_class;
159
+ let server = self.server.clone();
157
160
  call_with_gvl(|_| {
158
161
  *self.thread.write() = Some(
159
162
  create_ruby_thread(move || {
160
163
  if let Some(scheduler_class) = scheduler_class {
161
- if let Err(err) =
162
- Self::fiber_accept_loop(id, app, receiver, scheduler_class, terminated)
163
- {
164
+ if let Err(err) = Self::fiber_accept_loop(
165
+ server,
166
+ id,
167
+ app,
168
+ receiver,
169
+ scheduler_class,
170
+ terminated,
171
+ ) {
164
172
  error!("Error in fiber_accept_loop: {:?}", err);
165
173
  }
166
174
  } else {
167
- Self::accept_loop(id, app, receiver, terminated);
175
+ Self::accept_loop(server, id, app, receiver, terminated);
168
176
  }
169
177
  })
170
178
  .into(),
@@ -180,6 +188,7 @@ impl ThreadWorker {
180
188
  receiver: &Arc<async_channel::Receiver<RequestJob>>,
181
189
  terminated: &Arc<AtomicBool>,
182
190
  waker_sender: &watch::Sender<TerminateWakerSignal>,
191
+ oob_gc_responses_threshold: Option<u64>,
183
192
  ) -> magnus::block::Proc {
184
193
  let leader = leader.clone();
185
194
  let receiver = receiver.clone();
@@ -243,10 +252,15 @@ impl ThreadWorker {
243
252
  }
244
253
 
245
254
  let yield_result = if receiver.is_empty() {
255
+ let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
256
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
257
+ idle_counter == 0
258
+ } else {
259
+ false
260
+ };
246
261
  waker_sender.send(TerminateWakerSignal(false)).unwrap();
247
- idle_counter = (idle_counter + 1) % 100;
248
262
  call_with_gvl(|ruby| {
249
- if idle_counter == 0 {
263
+ if should_gc {
250
264
  ruby.gc_start();
251
265
  }
252
266
  scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
@@ -264,6 +278,8 @@ impl ThreadWorker {
264
278
 
265
279
  #[instrument(skip_all, fields(thread_worker=id))]
266
280
  pub fn fiber_accept_loop(
281
+ server: Arc<Server>,
282
+
267
283
  id: String,
268
284
  app: Opaque<Value>,
269
285
  receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -273,10 +289,16 @@ impl ThreadWorker {
273
289
  let ruby = Ruby::get().unwrap();
274
290
  let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
275
291
  let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
276
- let server = ruby.get_inner(&ITSI_SERVER);
277
- let scheduler_proc =
278
- Self::build_scheduler_proc(app, &leader, &receiver, &terminated, &waker_sender);
279
- let (scheduler, scheduler_fiber) = server.funcall::<_, _, (Value, Value)>(
292
+ let server_class = ruby.get_inner(&ITSI_SERVER);
293
+ let scheduler_proc = Self::build_scheduler_proc(
294
+ app,
295
+ &leader,
296
+ &receiver,
297
+ &terminated,
298
+ &waker_sender,
299
+ server.oob_gc_responses_threshold,
300
+ );
301
+ let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
280
302
  "start_scheduler_loop",
281
303
  (scheduler_class, scheduler_proc),
282
304
  )?;
@@ -337,21 +359,31 @@ impl ThreadWorker {
337
359
 
338
360
  #[instrument(skip_all, fields(thread_worker=id))]
339
361
  pub fn accept_loop(
362
+ server: Arc<Server>,
340
363
  id: String,
341
364
  app: Opaque<Value>,
342
365
  receiver: Arc<async_channel::Receiver<RequestJob>>,
343
366
  terminated: Arc<AtomicBool>,
344
367
  ) {
345
368
  let ruby = Ruby::get().unwrap();
346
- let server = ruby.get_inner(&ITSI_SERVER);
369
+ let server_class = ruby.get_inner(&ITSI_SERVER);
370
+ let mut idle_counter = 0;
347
371
  call_without_gvl(|| loop {
372
+ if receiver.is_empty() {
373
+ if let Some(oob_gc_threshold) = server.oob_gc_responses_threshold {
374
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
375
+ if idle_counter == 0 {
376
+ ruby.gc_start();
377
+ }
378
+ };
379
+ }
348
380
  match receiver.recv_blocking() {
349
381
  Ok(RequestJob::ProcessRequest(request)) => {
350
382
  if terminated.load(Ordering::Relaxed) {
351
383
  break;
352
384
  }
353
385
  call_with_gvl(|_ruby| {
354
- request.process(&ruby, server, app).ok();
386
+ request.process(&ruby, server_class, app).ok();
355
387
  })
356
388
  }
357
389
  Ok(RequestJob::Shutdown) => {
@@ -63,7 +63,7 @@ pub fn configure_tls(
63
63
  .map(|s| s.to_string())
64
64
  .or_else(|| (*ITSI_ACME_CONTACT_EMAIL).as_ref().ok().map(|s| s.to_string()))
65
65
  .ok_or_else(|| itsi_error::ItsiError::ArgumentError(
66
- "acme_cert query param or ITSI_ACME_CONTACT_EMAIL must be set before you can auto-generate let's encrypt certificates".to_string(),
66
+ "acme_email query param or ITSI_ACME_CONTACT_EMAIL must be set before you can auto-generate let's encrypt certificates".to_string(),
67
67
  ))?;
68
68
 
69
69
  let acme_config = AcmeConfig::new(domains)