itsi 0.1.8 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +11 -2
  3. data/Rakefile +6 -2
  4. data/crates/itsi_rb_helpers/src/lib.rs +27 -4
  5. data/crates/itsi_server/Cargo.toml +4 -1
  6. data/crates/itsi_server/src/lib.rs +74 -1
  7. data/crates/itsi_server/src/request/itsi_request.rs +32 -11
  8. data/crates/itsi_server/src/response/itsi_response.rs +14 -4
  9. data/crates/itsi_server/src/server/bind.rs +16 -12
  10. data/crates/itsi_server/src/server/itsi_server.rs +146 -95
  11. data/crates/itsi_server/src/server/listener.rs +10 -10
  12. data/crates/itsi_server/src/server/process_worker.rs +10 -3
  13. data/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
  14. data/crates/itsi_server/src/server/serve_strategy/single_mode.rs +134 -115
  15. data/crates/itsi_server/src/server/signal.rs +4 -0
  16. data/crates/itsi_server/src/server/thread_worker.rs +55 -24
  17. data/crates/itsi_server/src/server/tls.rs +11 -8
  18. data/crates/itsi_tracing/src/lib.rs +18 -1
  19. data/gems/scheduler/Cargo.lock +12 -12
  20. data/gems/scheduler/ext/itsi_rb_helpers/src/lib.rs +27 -4
  21. data/gems/scheduler/ext/itsi_server/Cargo.toml +4 -1
  22. data/gems/scheduler/ext/itsi_server/src/lib.rs +74 -1
  23. data/gems/scheduler/ext/itsi_server/src/request/itsi_request.rs +32 -11
  24. data/gems/scheduler/ext/itsi_server/src/response/itsi_response.rs +14 -4
  25. data/gems/scheduler/ext/itsi_server/src/server/bind.rs +16 -12
  26. data/gems/scheduler/ext/itsi_server/src/server/itsi_server.rs +146 -95
  27. data/gems/scheduler/ext/itsi_server/src/server/listener.rs +10 -10
  28. data/gems/scheduler/ext/itsi_server/src/server/process_worker.rs +10 -3
  29. data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
  30. data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/single_mode.rs +134 -115
  31. data/gems/scheduler/ext/itsi_server/src/server/signal.rs +4 -0
  32. data/gems/scheduler/ext/itsi_server/src/server/thread_worker.rs +55 -24
  33. data/gems/scheduler/ext/itsi_server/src/server/tls.rs +11 -8
  34. data/gems/scheduler/ext/itsi_tracing/src/lib.rs +18 -1
  35. data/gems/scheduler/lib/itsi/scheduler/version.rb +1 -1
  36. data/gems/scheduler/test/test_address_resolve.rb +0 -1
  37. data/gems/scheduler/test/test_file_io.rb +0 -1
  38. data/gems/scheduler/test/test_kernel_sleep.rb +3 -4
  39. data/gems/server/Cargo.lock +11 -2
  40. data/gems/server/Rakefile +8 -1
  41. data/gems/server/exe/itsi +53 -23
  42. data/gems/server/ext/itsi_rb_helpers/src/lib.rs +27 -4
  43. data/gems/server/ext/itsi_server/Cargo.toml +4 -1
  44. data/gems/server/ext/itsi_server/src/lib.rs +74 -1
  45. data/gems/server/ext/itsi_server/src/request/itsi_request.rs +32 -11
  46. data/gems/server/ext/itsi_server/src/response/itsi_response.rs +14 -4
  47. data/gems/server/ext/itsi_server/src/server/bind.rs +16 -12
  48. data/gems/server/ext/itsi_server/src/server/itsi_server.rs +146 -95
  49. data/gems/server/ext/itsi_server/src/server/listener.rs +10 -10
  50. data/gems/server/ext/itsi_server/src/server/process_worker.rs +10 -3
  51. data/gems/server/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
  52. data/gems/server/ext/itsi_server/src/server/serve_strategy/single_mode.rs +134 -115
  53. data/gems/server/ext/itsi_server/src/server/signal.rs +4 -0
  54. data/gems/server/ext/itsi_server/src/server/thread_worker.rs +55 -24
  55. data/gems/server/ext/itsi_server/src/server/tls.rs +11 -8
  56. data/gems/server/ext/itsi_tracing/src/lib.rs +18 -1
  57. data/gems/server/lib/itsi/request.rb +29 -21
  58. data/gems/server/lib/itsi/server/Itsi.rb +127 -0
  59. data/gems/server/lib/itsi/server/config.rb +36 -0
  60. data/gems/server/lib/itsi/server/options_dsl.rb +401 -0
  61. data/gems/server/lib/itsi/server/rack/handler/itsi.rb +18 -7
  62. data/gems/server/lib/itsi/server/rack_interface.rb +75 -0
  63. data/gems/server/lib/itsi/server/scheduler_interface.rb +21 -0
  64. data/gems/server/lib/itsi/server/signal_trap.rb +23 -0
  65. data/gems/server/lib/itsi/server/version.rb +1 -1
  66. data/gems/server/lib/itsi/server.rb +71 -101
  67. data/gems/server/test/helpers/test_helper.rb +30 -0
  68. data/gems/server/test/test_itsi_server.rb +294 -3
  69. data/lib/itsi/version.rb +1 -1
  70. data/location_dsl.rb +381 -0
  71. data/sandbox/deploy/main.tf +1 -0
  72. data/sandbox/itsi_itsi_file/Itsi.rb +119 -0
  73. data/sandbox/itsi_sandbox_async/Gemfile +1 -1
  74. data/sandbox/itsi_sandbox_rack/Gemfile.lock +2 -2
  75. data/sandbox/itsi_sandbox_rails/Gemfile.lock +2 -2
  76. data/tasks.txt +25 -8
  77. metadata +21 -14
  78. data/gems/server/lib/itsi/signals.rb +0 -23
  79. data/gems/server/test/test_helper.rb +0 -7
  80. /data/gems/server/lib/itsi/{index.html.erb → index.html} +0 -0
@@ -15,17 +15,23 @@ use hyper_util::{
15
15
  server::conn::auto::Builder,
16
16
  };
17
17
  use itsi_error::{ItsiError, Result};
18
+ use itsi_rb_helpers::print_rb_backtrace;
18
19
  use itsi_tracing::{debug, error, info};
19
20
  use nix::unistd::Pid;
21
+ use parking_lot::Mutex;
20
22
  use std::{
21
23
  num::NonZeroU8,
24
+ panic,
22
25
  pin::Pin,
23
26
  sync::Arc,
24
27
  time::{Duration, Instant},
25
28
  };
26
29
  use tokio::{
27
30
  runtime::{Builder as RuntimeBuilder, Runtime},
28
- sync::broadcast,
31
+ sync::{
32
+ broadcast,
33
+ watch::{self, Sender},
34
+ },
29
35
  task::JoinSet,
30
36
  };
31
37
  use tracing::instrument;
@@ -34,7 +40,7 @@ pub struct SingleMode {
34
40
  pub executor: Builder<TokioExecutor>,
35
41
  pub server: Arc<Server>,
36
42
  pub sender: async_channel::Sender<RequestJob>,
37
- pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
43
+ pub(crate) listeners: Mutex<Vec<Listener>>,
38
44
  pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
39
45
  pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
40
46
  }
@@ -49,18 +55,24 @@ impl SingleMode {
49
55
  #[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
50
56
  pub(crate) fn new(
51
57
  server: Arc<Server>,
52
- listeners: Arc<Vec<Arc<Listener>>>,
58
+ listeners: Vec<Listener>,
53
59
  lifecycle_channel: broadcast::Sender<LifecycleEvent>,
54
60
  ) -> Result<Self> {
55
61
  let (thread_workers, sender) = build_thread_workers(
62
+ server.clone(),
56
63
  Pid::this(),
57
64
  NonZeroU8::try_from(server.threads).unwrap(),
58
- server.app,
65
+ server.app.clone(),
59
66
  server.scheduler_class.clone(),
60
- )?;
67
+ )
68
+ .inspect_err(|e| {
69
+ if let Some(err_val) = e.value() {
70
+ print_rb_backtrace(err_val);
71
+ }
72
+ })?;
61
73
  Ok(Self {
62
74
  executor: Builder::new(TokioExecutor::new()),
63
- listeners,
75
+ listeners: Mutex::new(listeners),
64
76
  server,
65
77
  sender,
66
78
  thread_workers,
@@ -80,76 +92,81 @@ impl SingleMode {
80
92
  }
81
93
 
82
94
  pub fn stop(&self) -> Result<()> {
95
+ self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
83
96
  Ok(())
84
97
  }
85
98
 
86
99
  #[instrument(parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
87
100
  pub fn run(self: Arc<Self>) -> Result<()> {
88
101
  let mut listener_task_set = JoinSet::new();
89
- let self_ref = Arc::new(self);
90
- let runtime = self_ref.build_runtime();
102
+ let runtime = self.build_runtime();
91
103
 
92
104
  runtime.block_on(async {
93
- let tokio_listeners = self_ref
94
- .listeners
95
- .iter()
96
- .map(|list| Arc::new(list.to_tokio_listener()))
97
- .collect::<Vec<_>>();
98
- for listener in tokio_listeners.iter() {
99
- let mut lifecycle_rx = self_ref.lifecycle_channel.subscribe();
100
- let listener_info = Arc::new(listener.listener_info());
101
- let self_ref = self_ref.clone();
102
- let listener = listener.clone();
103
- let (shutdown_sender, mut shutdown_receiver) = tokio::sync::watch::channel::<RunningPhase>(RunningPhase::Running);
104
- let listener_clone = listener.clone();
105
+ let tokio_listeners = self
106
+ .listeners.lock()
107
+ .drain(..)
108
+ .map(|list| {
109
+ Arc::new(list.into_tokio_listener())
110
+ })
111
+ .collect::<Vec<_>>();
112
+ let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
113
+ for listener in tokio_listeners.iter() {
114
+ let mut lifecycle_rx = self.lifecycle_channel.subscribe();
115
+ let listener_info = Arc::new(listener.listener_info());
116
+ let self_ref = self.clone();
117
+ let listener = listener.clone();
118
+ let shutdown_sender = shutdown_sender.clone();
105
119
 
106
- let shutdown_receiver_clone = shutdown_receiver.clone();
107
- listener_task_set.spawn(async move {
108
- listener_clone.spawn_state_task(shutdown_receiver_clone).await;
109
- });
120
+ let listener_clone = listener.clone();
121
+ let mut shutdown_receiver = shutdown_sender.subscribe();
122
+ let shutdown_receiver_clone = shutdown_receiver.clone();
123
+ listener_task_set.spawn(async move {
124
+ listener_clone.spawn_state_task(shutdown_receiver_clone).await;
125
+ });
110
126
 
111
- listener_task_set.spawn(async move {
112
- let strategy = self_ref.clone();
113
- loop {
114
- tokio::select! {
115
- accept_result = listener.accept() => match accept_result {
116
- Ok(accept_result) => {
117
- match strategy.serve_connection(accept_result, listener_info.clone(), shutdown_receiver.clone()).await {
118
- Ok(_) => {
119
- debug!("Connection accepted and served");
127
+ listener_task_set.spawn(async move {
128
+ let strategy_clone = self_ref.clone();
129
+ let mut acceptor_task_set = JoinSet::new();
130
+ loop {
131
+ tokio::select! {
132
+ accept_result = listener.accept() => match accept_result {
133
+ Ok(accept_result) => {
134
+ let strategy = strategy_clone.clone();
135
+ let listener_info = listener_info.clone();
136
+ let shutdown_receiver = shutdown_receiver.clone();
137
+ acceptor_task_set.spawn(async move {
138
+ strategy.serve_connection(accept_result, listener_info, shutdown_receiver).await;
139
+ });
120
140
  },
121
- Err(e) => error!("Error in serve_connection {:?}", e)
122
- }
123
- },
124
- Err(e) => debug!("Listener.accept failed {:?}", e),
125
- },
126
- _ = shutdown_receiver.changed() => {
127
- break;
128
- }
129
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
130
- Ok(lifecycle_event) => {
131
- if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
132
- match e {
133
- ItsiError::Break() => break,
134
- _ => error!("Error in handle_lifecycle_event {:?}", e)
135
- }
141
+ Err(e) => debug!("Listener.accept failed {:?}", e),
142
+ },
143
+ _ = shutdown_receiver.changed() => {
144
+ break;
136
145
  }
146
+ lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
147
+ Ok(lifecycle_event) => {
148
+ if let Err(e) = self_ref.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
149
+ match e {
150
+ ItsiError::Break() => break,
151
+ _ => error!("Error in handle_lifecycle_event {:?}", e)
152
+ }
153
+ }
137
154
 
138
- },
139
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
140
- }
155
+ },
156
+ Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
157
+ }
158
+ }
141
159
  }
142
- }
143
- });
160
+ while let Some(_res) = acceptor_task_set.join_next().await {}
161
+ });
144
162
 
145
- }
163
+ }
146
164
 
147
- while let Some(_res) = listener_task_set.join_next().await {}
165
+ while let Some(_res) = listener_task_set.join_next().await {}
148
166
 
149
- });
167
+ });
150
168
  runtime.shutdown_timeout(Duration::from_millis(100));
151
-
152
- info!("Runtime has shut down");
169
+ debug!("Runtime has shut down");
153
170
  Ok(())
154
171
  }
155
172
 
@@ -157,80 +174,84 @@ impl SingleMode {
157
174
  &self,
158
175
  stream: IoStream,
159
176
  listener: Arc<ListenerInfo>,
160
- shutdown_channel: tokio::sync::watch::Receiver<RunningPhase>,
161
- ) -> Result<()> {
177
+ shutdown_channel: watch::Receiver<RunningPhase>,
178
+ ) {
162
179
  let sender_clone = self.sender.clone();
163
180
  let addr = stream.addr();
164
181
  let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
165
182
  let server = self.server.clone();
166
183
  let executor = self.executor.clone();
167
184
  let mut shutdown_channel_clone = shutdown_channel.clone();
168
- tokio::spawn(async move {
169
- let server = server.clone();
170
- let mut executor = executor.clone();
171
- let mut binding = executor.http1();
172
- let shutdown_channel = shutdown_channel_clone.clone();
173
- let mut serve = Box::pin(
174
- binding
175
- .timer(TokioTimer::new())
176
- .header_read_timeout(Duration::from_secs(1))
177
- .serve_connection_with_upgrades(
178
- io,
179
- service_fn(move |hyper_request: Request<Incoming>| {
180
- ItsiRequest::process_request(
181
- hyper_request,
182
- sender_clone.clone(),
183
- server.clone(),
184
- listener.clone(),
185
- addr.clone(),
186
- shutdown_channel.clone(),
187
- )
188
- }),
189
- ),
190
- );
185
+ let server = server.clone();
186
+ let mut executor = executor.clone();
187
+ let mut binding = executor.http1();
188
+ let shutdown_channel = shutdown_channel_clone.clone();
189
+ let mut serve = Box::pin(
190
+ binding
191
+ .timer(TokioTimer::new())
192
+ .header_read_timeout(Duration::from_secs(1))
193
+ .serve_connection_with_upgrades(
194
+ io,
195
+ service_fn(move |hyper_request: Request<Incoming>| {
196
+ ItsiRequest::process_request(
197
+ hyper_request,
198
+ sender_clone.clone(),
199
+ server.clone(),
200
+ listener.clone(),
201
+ addr.clone(),
202
+ shutdown_channel.clone(),
203
+ )
204
+ }),
205
+ ),
206
+ );
191
207
 
192
- tokio::select! {
193
- // Await the connection finishing naturally.
194
- res = &mut serve => {
195
- match res{
196
- Ok(()) => {
197
- debug!("Connection closed normally")
198
- },
199
- Err(res) => {
200
- debug!("Connection finished with error: {:?}", res)
201
- }
202
- }
203
- serve.as_mut().graceful_shutdown();
204
- },
205
- // A lifecycle event triggers shutdown.
206
- _ = shutdown_channel_clone.changed() => {
207
- // Initiate graceful shutdown.
208
- serve.as_mut().graceful_shutdown();
209
- // Now await the connection to finish shutting down.
210
- if let Err(e) = serve.await {
211
- debug!("Connection shutdown error: {:?}", e);
208
+ tokio::select! {
209
+ // Await the connection finishing naturally.
210
+ res = &mut serve => {
211
+ match res{
212
+ Ok(()) => {
213
+ debug!("Connection closed normally")
214
+ },
215
+ Err(res) => {
216
+ debug!("Connection finished with error: {:?}", res)
212
217
  }
213
218
  }
219
+ serve.as_mut().graceful_shutdown();
220
+ },
221
+ // A lifecycle event triggers shutdown.
222
+ _ = shutdown_channel_clone.changed() => {
223
+ // Initiate graceful shutdown.
224
+ serve.as_mut().graceful_shutdown();
225
+
226
+ // Now await the connection to finish shutting down.
227
+ if let Err(e) = serve.await {
228
+ debug!("Connection shutdown error: {:?}", e);
229
+ }
214
230
  }
215
- });
216
- Ok(())
231
+ }
217
232
  }
218
233
 
219
234
  pub async fn handle_lifecycle_event(
220
235
  &self,
221
236
  lifecycle_event: LifecycleEvent,
222
- shutdown_sender: tokio::sync::watch::Sender<RunningPhase>,
237
+ shutdown_sender: Sender<RunningPhase>,
223
238
  ) -> Result<()> {
239
+ info!("Handling lifecycle event: {:?}", lifecycle_event);
224
240
  if let LifecycleEvent::Shutdown = lifecycle_event {
225
- shutdown_sender
226
- .send(RunningPhase::ShutdownPending)
227
- .expect("Failed to send shutdown pending signal");
228
- let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
241
+ //1. Stop accepting new connections.
242
+ shutdown_sender.send(RunningPhase::ShutdownPending).ok();
243
+ tokio::time::sleep(Duration::from_millis(25)).await;
244
+
245
+ //2. Break out of work queues.
229
246
  for worker in &*self.thread_workers {
230
247
  worker.request_shutdown().await;
231
248
  }
249
+
250
+ tokio::time::sleep(Duration::from_millis(25)).await;
251
+
252
+ //3. Wait for all threads to finish.
253
+ let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
232
254
  while Instant::now() < deadline {
233
- tokio::time::sleep(Duration::from_millis(50)).await;
234
255
  let alive_threads = self
235
256
  .thread_workers
236
257
  .iter()
@@ -242,10 +263,8 @@ impl SingleMode {
242
263
  tokio::time::sleep(Duration::from_millis(200)).await;
243
264
  }
244
265
 
245
- info!("Sending shutdown signal");
246
- shutdown_sender
247
- .send(RunningPhase::Shutdown)
248
- .expect("Failed to send shutdown signal");
266
+ //4. Force shutdown any stragglers
267
+ shutdown_sender.send(RunningPhase::Shutdown).ok();
249
268
  self.thread_workers.iter().for_each(|worker| {
250
269
  worker.poll_shutdown(deadline);
251
270
  });
@@ -10,6 +10,10 @@ pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
10
10
  broadcast::Receiver<LifecycleEvent>,
11
11
  )> = LazyLock::new(|| sync::broadcast::channel(5));
12
12
 
13
+ pub fn send_shutdown_event() {
14
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
15
+ }
16
+
13
17
  pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
14
18
  fn receive_signal(signum: i32, _: sighandler_t) {
15
19
  SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
@@ -1,7 +1,7 @@
1
- use super::itsi_server::RequestJob;
1
+ use super::itsi_server::{RequestJob, Server};
2
2
  use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
3
3
  use itsi_rb_helpers::{
4
- call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
4
+ call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapVal, HeapValue,
5
5
  };
6
6
  use itsi_tracing::{debug, error, info, warn};
7
7
  use magnus::{
@@ -24,6 +24,7 @@ use std::{
24
24
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
25
25
  use tracing::instrument;
26
26
  pub struct ThreadWorker {
27
+ pub server: Arc<Server>,
27
28
  pub id: String,
28
29
  pub app: Opaque<Value>,
29
30
  pub receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -48,11 +49,12 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
48
49
 
49
50
  pub struct TerminateWakerSignal(bool);
50
51
 
51
- #[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
52
+ #[instrument(name = "Boot", parent=None, skip(server, threads, app, pid, scheduler_class))]
52
53
  pub fn build_thread_workers(
54
+ server: Arc<Server>,
53
55
  pid: Pid,
54
56
  threads: NonZeroU8,
55
- app: Opaque<Value>,
57
+ app: HeapVal,
56
58
  scheduler_class: Option<String>,
57
59
  ) -> Result<(Arc<Vec<ThreadWorker>>, async_channel::Sender<RequestJob>)> {
58
60
  let (sender, receiver) = async_channel::bounded(20);
@@ -65,6 +67,7 @@ pub fn build_thread_workers(
65
67
  .map(|id| {
66
68
  info!(pid = pid.as_raw(), id, "Thread");
67
69
  ThreadWorker::new(
70
+ server.clone(),
68
71
  format!("{:?}#{:?}", pid, id),
69
72
  app,
70
73
  receiver_ref.clone(),
@@ -79,15 +82,11 @@ pub fn build_thread_workers(
79
82
  }
80
83
 
81
84
  pub fn load_app(
82
- app: Opaque<Value>,
85
+ app: HeapVal,
83
86
  scheduler_class: Option<String>,
84
87
  ) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
85
88
  call_with_gvl(|ruby| {
86
- let app = app.get_inner_with(&ruby);
87
- let app = Opaque::from(
88
- app.funcall::<_, _, Value>(*ID_CALL, ())
89
- .expect("Couldn't load app"),
90
- );
89
+ let app = Opaque::from(app.funcall::<_, _, Value>(*ID_CALL, ())?);
91
90
  let scheduler_class = if let Some(scheduler_class) = scheduler_class {
92
91
  Some(Opaque::from(
93
92
  ruby.module_kernel()
@@ -101,6 +100,7 @@ pub fn load_app(
101
100
  }
102
101
  impl ThreadWorker {
103
102
  pub fn new(
103
+ server: Arc<Server>,
104
104
  id: String,
105
105
  app: Opaque<Value>,
106
106
  receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -108,6 +108,7 @@ impl ThreadWorker {
108
108
  scheduler_class: Option<Opaque<Value>>,
109
109
  ) -> Result<Self> {
110
110
  let mut worker = Self {
111
+ server,
111
112
  id,
112
113
  app,
113
114
  receiver,
@@ -126,7 +127,7 @@ impl ThreadWorker {
126
127
  Ok(_) => {}
127
128
  Err(err) => error!("Failed to send shutdown request: {}", err),
128
129
  };
129
- info!("Requesting shutdown");
130
+ debug!("Requesting shutdown");
130
131
  }
131
132
 
132
133
  #[instrument(skip(self, deadline), fields(id = self.id))]
@@ -141,7 +142,7 @@ impl ThreadWorker {
141
142
  if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
142
143
  return true;
143
144
  }
144
- info!("Thread has shut down");
145
+ debug!("Thread has shut down");
145
146
  }
146
147
  self.thread.write().take();
147
148
 
@@ -155,17 +156,23 @@ impl ThreadWorker {
155
156
  let receiver = self.receiver.clone();
156
157
  let terminated = self.terminated.clone();
157
158
  let scheduler_class = self.scheduler_class;
159
+ let server = self.server.clone();
158
160
  call_with_gvl(|_| {
159
161
  *self.thread.write() = Some(
160
162
  create_ruby_thread(move || {
161
163
  if let Some(scheduler_class) = scheduler_class {
162
- if let Err(err) =
163
- Self::fiber_accept_loop(id, app, receiver, scheduler_class, terminated)
164
- {
164
+ if let Err(err) = Self::fiber_accept_loop(
165
+ server,
166
+ id,
167
+ app,
168
+ receiver,
169
+ scheduler_class,
170
+ terminated,
171
+ ) {
165
172
  error!("Error in fiber_accept_loop: {:?}", err);
166
173
  }
167
174
  } else {
168
- Self::accept_loop(id, app, receiver, terminated);
175
+ Self::accept_loop(server, id, app, receiver, terminated);
169
176
  }
170
177
  })
171
178
  .into(),
@@ -181,6 +188,7 @@ impl ThreadWorker {
181
188
  receiver: &Arc<async_channel::Receiver<RequestJob>>,
182
189
  terminated: &Arc<AtomicBool>,
183
190
  waker_sender: &watch::Sender<TerminateWakerSignal>,
191
+ oob_gc_responses_threshold: Option<u64>,
184
192
  ) -> magnus::block::Proc {
185
193
  let leader = leader.clone();
186
194
  let receiver = receiver.clone();
@@ -244,10 +252,15 @@ impl ThreadWorker {
244
252
  }
245
253
 
246
254
  let yield_result = if receiver.is_empty() {
255
+ let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
256
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
257
+ idle_counter == 0
258
+ } else {
259
+ false
260
+ };
247
261
  waker_sender.send(TerminateWakerSignal(false)).unwrap();
248
- idle_counter = (idle_counter + 1) % 100;
249
262
  call_with_gvl(|ruby| {
250
- if idle_counter == 0 {
263
+ if should_gc {
251
264
  ruby.gc_start();
252
265
  }
253
266
  scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
@@ -265,6 +278,8 @@ impl ThreadWorker {
265
278
 
266
279
  #[instrument(skip_all, fields(thread_worker=id))]
267
280
  pub fn fiber_accept_loop(
281
+ server: Arc<Server>,
282
+
268
283
  id: String,
269
284
  app: Opaque<Value>,
270
285
  receiver: Arc<async_channel::Receiver<RequestJob>>,
@@ -274,10 +289,16 @@ impl ThreadWorker {
274
289
  let ruby = Ruby::get().unwrap();
275
290
  let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
276
291
  let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
277
- let server = ruby.get_inner(&ITSI_SERVER);
278
- let scheduler_proc =
279
- Self::build_scheduler_proc(app, &leader, &receiver, &terminated, &waker_sender);
280
- let (scheduler, scheduler_fiber) = server.funcall::<_, _, (Value, Value)>(
292
+ let server_class = ruby.get_inner(&ITSI_SERVER);
293
+ let scheduler_proc = Self::build_scheduler_proc(
294
+ app,
295
+ &leader,
296
+ &receiver,
297
+ &terminated,
298
+ &waker_sender,
299
+ server.oob_gc_responses_threshold,
300
+ );
301
+ let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
281
302
  "start_scheduler_loop",
282
303
  (scheduler_class, scheduler_proc),
283
304
  )?;
@@ -338,21 +359,31 @@ impl ThreadWorker {
338
359
 
339
360
  #[instrument(skip_all, fields(thread_worker=id))]
340
361
  pub fn accept_loop(
362
+ server: Arc<Server>,
341
363
  id: String,
342
364
  app: Opaque<Value>,
343
365
  receiver: Arc<async_channel::Receiver<RequestJob>>,
344
366
  terminated: Arc<AtomicBool>,
345
367
  ) {
346
368
  let ruby = Ruby::get().unwrap();
347
- let server = ruby.get_inner(&ITSI_SERVER);
369
+ let server_class = ruby.get_inner(&ITSI_SERVER);
370
+ let mut idle_counter = 0;
348
371
  call_without_gvl(|| loop {
372
+ if receiver.is_empty() {
373
+ if let Some(oob_gc_threshold) = server.oob_gc_responses_threshold {
374
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
375
+ if idle_counter == 0 {
376
+ ruby.gc_start();
377
+ }
378
+ };
379
+ }
349
380
  match receiver.recv_blocking() {
350
381
  Ok(RequestJob::ProcessRequest(request)) => {
351
382
  if terminated.load(Ordering::Relaxed) {
352
383
  break;
353
384
  }
354
385
  call_with_gvl(|_ruby| {
355
- request.process(&ruby, server, app).ok();
386
+ request.process(&ruby, server_class, app).ok();
356
387
  })
357
388
  }
358
389
  Ok(RequestJob::Shutdown) => {
@@ -48,23 +48,26 @@ pub fn configure_tls(
48
48
  ) -> Result<ItsiTlsAcceptor> {
49
49
  let domains = query_params
50
50
  .get("domains")
51
- .map(|v| v.split(',').map(String::from).collect::<Vec<_>>());
51
+ .map(|v| v.split(',').map(String::from).collect::<Vec<_>>())
52
+ .or_else(|| query_params.get("domain").map(|v| vec![v.to_string()]));
52
53
 
53
- if query_params.get("cert").is_some_and(|c| c == "auto") {
54
+ if query_params.get("cert").is_some_and(|c| c == "acme") {
54
55
  if let Some(domains) = domains {
55
56
  let directory_url = &*ITSI_ACME_DIRECTORY_URL;
56
57
  info!(
57
58
  domains = format!("{:?}", domains),
58
59
  directory_url, "Requesting acme cert"
59
60
  );
61
+ let acme_contact_email = query_params
62
+ .get("acme_email")
63
+ .map(|s| s.to_string())
64
+ .or_else(|| (*ITSI_ACME_CONTACT_EMAIL).as_ref().ok().map(|s| s.to_string()))
65
+ .ok_or_else(|| itsi_error::ItsiError::ArgumentError(
66
+ "acme_email query param or ITSI_ACME_CONTACT_EMAIL must be set before you can auto-generate let's encrypt certificates".to_string(),
67
+ ))?;
60
68
 
61
69
  let acme_config = AcmeConfig::new(domains)
62
- .contact([format!("mailto:{}", (*ITSI_ACME_CONTACT_EMAIL).as_ref().map_err(|_| {
63
- itsi_error::ItsiError::ArgumentError(
64
- "ITSI_ACME_CONTACT_EMAIL must be set before you can auto-generate production certificates"
65
- .to_string(),
66
- )
67
- })?)])
70
+ .contact([format!("mailto:{}", acme_contact_email)])
68
71
  .cache(LockedDirCache::new(&*ITSI_ACME_CACHE_DIR))
69
72
  .directory(directory_url);
70
73
 
@@ -1,11 +1,13 @@
1
1
  use std::env;
2
2
 
3
3
  use atty::{Stream, is};
4
+ use tracing::level_filters::LevelFilter;
4
5
  pub use tracing::{debug, error, info, trace, warn};
5
6
  pub use tracing_attributes::instrument; // Explicitly export from tracing-attributes
6
7
  use tracing_subscriber::{
7
- EnvFilter,
8
+ EnvFilter, Layer,
8
9
  fmt::{self, format},
10
+ layer::SubscriberExt,
9
11
  };
10
12
 
11
13
  #[instrument]
@@ -39,3 +41,18 @@ pub fn init() {
39
41
  .init();
40
42
  }
41
43
  }
44
+
45
+ pub fn run_silently<F, R>(f: F) -> R
46
+ where
47
+ F: FnOnce() -> R,
48
+ {
49
+ // Build a minimal subscriber that filters *everything* out
50
+ let no_op_subscriber =
51
+ tracing_subscriber::registry().with(fmt::layer().with_filter(LevelFilter::OFF));
52
+
53
+ // Turn that subscriber into a `Dispatch`
54
+ let no_op_dispatch = tracing::dispatcher::Dispatch::new(no_op_subscriber);
55
+
56
+ // Temporarily set `no_op_dispatch` as the *default* within this closure
57
+ tracing::dispatcher::with_default(&no_op_dispatch, f)
58
+ }