itsi 0.1.0 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +524 -44
  3. data/Rakefile +22 -33
  4. data/crates/itsi_error/Cargo.toml +2 -0
  5. data/crates/itsi_error/src/from.rs +70 -0
  6. data/crates/itsi_error/src/lib.rs +10 -37
  7. data/crates/itsi_instrument_entry/Cargo.toml +15 -0
  8. data/crates/itsi_instrument_entry/src/lib.rs +31 -0
  9. data/crates/itsi_rb_helpers/Cargo.toml +2 -0
  10. data/crates/itsi_rb_helpers/src/heap_value.rs +121 -0
  11. data/crates/itsi_rb_helpers/src/lib.rs +90 -10
  12. data/crates/itsi_scheduler/Cargo.toml +9 -1
  13. data/crates/itsi_scheduler/extconf.rb +1 -1
  14. data/crates/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
  15. data/crates/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
  16. data/crates/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
  17. data/crates/itsi_scheduler/src/itsi_scheduler.rs +308 -0
  18. data/crates/itsi_scheduler/src/lib.rs +31 -10
  19. data/crates/itsi_server/Cargo.toml +14 -2
  20. data/crates/itsi_server/extconf.rb +1 -1
  21. data/crates/itsi_server/src/body_proxy/big_bytes.rs +104 -0
  22. data/crates/itsi_server/src/body_proxy/itsi_body_proxy.rs +122 -0
  23. data/crates/itsi_server/src/body_proxy/mod.rs +2 -0
  24. data/crates/itsi_server/src/lib.rs +58 -7
  25. data/crates/itsi_server/src/request/itsi_request.rs +238 -104
  26. data/crates/itsi_server/src/response/itsi_response.rs +347 -0
  27. data/crates/itsi_server/src/response/mod.rs +1 -0
  28. data/crates/itsi_server/src/server/bind.rs +50 -20
  29. data/crates/itsi_server/src/server/bind_protocol.rs +37 -0
  30. data/crates/itsi_server/src/server/io_stream.rs +104 -0
  31. data/crates/itsi_server/src/server/itsi_ca/itsi_ca.crt +11 -30
  32. data/crates/itsi_server/src/server/itsi_ca/itsi_ca.key +3 -50
  33. data/crates/itsi_server/src/server/itsi_server.rs +196 -134
  34. data/crates/itsi_server/src/server/lifecycle_event.rs +9 -0
  35. data/crates/itsi_server/src/server/listener.rs +184 -127
  36. data/crates/itsi_server/src/server/mod.rs +7 -1
  37. data/crates/itsi_server/src/server/process_worker.rs +196 -0
  38. data/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs +254 -0
  39. data/crates/itsi_server/src/server/serve_strategy/mod.rs +27 -0
  40. data/crates/itsi_server/src/server/serve_strategy/single_mode.rs +241 -0
  41. data/crates/itsi_server/src/server/signal.rs +70 -0
  42. data/crates/itsi_server/src/server/thread_worker.rs +368 -0
  43. data/crates/itsi_server/src/server/tls.rs +42 -28
  44. data/crates/itsi_tracing/Cargo.toml +4 -0
  45. data/crates/itsi_tracing/src/lib.rs +36 -6
  46. data/gems/scheduler/Cargo.lock +219 -23
  47. data/gems/scheduler/Rakefile +7 -1
  48. data/gems/scheduler/ext/itsi_error/Cargo.toml +2 -0
  49. data/gems/scheduler/ext/itsi_error/src/from.rs +70 -0
  50. data/gems/scheduler/ext/itsi_error/src/lib.rs +10 -37
  51. data/gems/scheduler/ext/itsi_instrument_entry/Cargo.toml +15 -0
  52. data/gems/scheduler/ext/itsi_instrument_entry/src/lib.rs +31 -0
  53. data/gems/scheduler/ext/itsi_rb_helpers/Cargo.toml +2 -0
  54. data/gems/scheduler/ext/itsi_rb_helpers/src/heap_value.rs +121 -0
  55. data/gems/scheduler/ext/itsi_rb_helpers/src/lib.rs +90 -10
  56. data/gems/scheduler/ext/itsi_scheduler/Cargo.toml +9 -1
  57. data/gems/scheduler/ext/itsi_scheduler/extconf.rb +1 -1
  58. data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
  59. data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
  60. data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
  61. data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
  62. data/gems/scheduler/ext/itsi_scheduler/src/lib.rs +31 -10
  63. data/gems/scheduler/ext/itsi_server/Cargo.toml +41 -0
  64. data/gems/scheduler/ext/itsi_server/extconf.rb +6 -0
  65. data/gems/scheduler/ext/itsi_server/src/body_proxy/big_bytes.rs +104 -0
  66. data/gems/scheduler/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +122 -0
  67. data/gems/scheduler/ext/itsi_server/src/body_proxy/mod.rs +2 -0
  68. data/gems/scheduler/ext/itsi_server/src/lib.rs +103 -0
  69. data/gems/scheduler/ext/itsi_server/src/request/itsi_request.rs +277 -0
  70. data/gems/scheduler/ext/itsi_server/src/request/mod.rs +1 -0
  71. data/gems/scheduler/ext/itsi_server/src/response/itsi_response.rs +347 -0
  72. data/gems/scheduler/ext/itsi_server/src/response/mod.rs +1 -0
  73. data/gems/scheduler/ext/itsi_server/src/server/bind.rs +168 -0
  74. data/gems/scheduler/ext/itsi_server/src/server/bind_protocol.rs +37 -0
  75. data/gems/scheduler/ext/itsi_server/src/server/io_stream.rs +104 -0
  76. data/gems/scheduler/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +13 -0
  77. data/gems/scheduler/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +5 -0
  78. data/gems/scheduler/ext/itsi_server/src/server/itsi_server.rs +244 -0
  79. data/gems/scheduler/ext/itsi_server/src/server/lifecycle_event.rs +9 -0
  80. data/gems/scheduler/ext/itsi_server/src/server/listener.rs +275 -0
  81. data/gems/scheduler/ext/itsi_server/src/server/mod.rs +11 -0
  82. data/gems/scheduler/ext/itsi_server/src/server/process_worker.rs +196 -0
  83. data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +254 -0
  84. data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/mod.rs +27 -0
  85. data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/single_mode.rs +241 -0
  86. data/gems/scheduler/ext/itsi_server/src/server/signal.rs +70 -0
  87. data/gems/scheduler/ext/itsi_server/src/server/thread_worker.rs +368 -0
  88. data/gems/scheduler/ext/itsi_server/src/server/tls.rs +152 -0
  89. data/gems/scheduler/ext/itsi_tracing/Cargo.toml +4 -0
  90. data/gems/scheduler/ext/itsi_tracing/src/lib.rs +36 -6
  91. data/gems/scheduler/itsi-scheduler.gemspec +2 -3
  92. data/gems/scheduler/lib/itsi/scheduler/version.rb +1 -1
  93. data/gems/scheduler/lib/itsi/scheduler.rb +137 -1
  94. data/gems/scheduler/test/helpers/test_helper.rb +24 -0
  95. data/gems/scheduler/test/test_active_record.rb +158 -0
  96. data/gems/scheduler/test/test_address_resolve.rb +23 -0
  97. data/gems/scheduler/test/test_block_unblock.rb +229 -0
  98. data/gems/scheduler/test/test_file_io.rb +193 -0
  99. data/gems/scheduler/test/test_itsi_scheduler.rb +24 -1
  100. data/gems/scheduler/test/test_kernel_sleep.rb +91 -0
  101. data/gems/scheduler/test/test_nested_fibers.rb +286 -0
  102. data/gems/scheduler/test/test_network_io.rb +274 -0
  103. data/gems/scheduler/test/test_process_wait.rb +26 -0
  104. data/gems/server/exe/itsi +88 -28
  105. data/gems/server/ext/itsi_error/Cargo.toml +2 -0
  106. data/gems/server/ext/itsi_error/src/from.rs +70 -0
  107. data/gems/server/ext/itsi_error/src/lib.rs +10 -37
  108. data/gems/server/ext/itsi_instrument_entry/Cargo.toml +15 -0
  109. data/gems/server/ext/itsi_instrument_entry/src/lib.rs +31 -0
  110. data/gems/server/ext/itsi_rb_helpers/Cargo.toml +2 -0
  111. data/gems/server/ext/itsi_rb_helpers/src/heap_value.rs +121 -0
  112. data/gems/server/ext/itsi_rb_helpers/src/lib.rs +90 -10
  113. data/gems/server/ext/itsi_scheduler/Cargo.toml +24 -0
  114. data/gems/server/ext/itsi_scheduler/extconf.rb +6 -0
  115. data/gems/server/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
  116. data/gems/server/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
  117. data/gems/server/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
  118. data/gems/server/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
  119. data/gems/server/ext/itsi_scheduler/src/lib.rs +38 -0
  120. data/gems/server/ext/itsi_server/Cargo.toml +14 -2
  121. data/gems/server/ext/itsi_server/extconf.rb +1 -1
  122. data/gems/server/ext/itsi_server/src/body_proxy/big_bytes.rs +104 -0
  123. data/gems/server/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +122 -0
  124. data/gems/server/ext/itsi_server/src/body_proxy/mod.rs +2 -0
  125. data/gems/server/ext/itsi_server/src/lib.rs +58 -7
  126. data/gems/server/ext/itsi_server/src/request/itsi_request.rs +238 -104
  127. data/gems/server/ext/itsi_server/src/response/itsi_response.rs +347 -0
  128. data/gems/server/ext/itsi_server/src/response/mod.rs +1 -0
  129. data/gems/server/ext/itsi_server/src/server/bind.rs +50 -20
  130. data/gems/server/ext/itsi_server/src/server/bind_protocol.rs +37 -0
  131. data/gems/server/ext/itsi_server/src/server/io_stream.rs +104 -0
  132. data/gems/server/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +11 -30
  133. data/gems/server/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +3 -50
  134. data/gems/server/ext/itsi_server/src/server/itsi_server.rs +196 -134
  135. data/gems/server/ext/itsi_server/src/server/lifecycle_event.rs +9 -0
  136. data/gems/server/ext/itsi_server/src/server/listener.rs +184 -127
  137. data/gems/server/ext/itsi_server/src/server/mod.rs +7 -1
  138. data/gems/server/ext/itsi_server/src/server/process_worker.rs +196 -0
  139. data/gems/server/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +254 -0
  140. data/gems/server/ext/itsi_server/src/server/serve_strategy/mod.rs +27 -0
  141. data/gems/server/ext/itsi_server/src/server/serve_strategy/single_mode.rs +241 -0
  142. data/gems/server/ext/itsi_server/src/server/signal.rs +70 -0
  143. data/gems/server/ext/itsi_server/src/server/thread_worker.rs +368 -0
  144. data/gems/server/ext/itsi_server/src/server/tls.rs +42 -28
  145. data/gems/server/ext/itsi_tracing/Cargo.toml +4 -0
  146. data/gems/server/ext/itsi_tracing/src/lib.rs +36 -6
  147. data/gems/server/itsi-server.gemspec +4 -5
  148. data/gems/server/lib/itsi/request.rb +30 -14
  149. data/gems/server/lib/itsi/server/rack/handler/itsi.rb +25 -0
  150. data/gems/server/lib/itsi/server/scheduler_mode.rb +6 -0
  151. data/gems/server/lib/itsi/server/version.rb +1 -1
  152. data/gems/server/lib/itsi/server.rb +82 -2
  153. data/gems/server/lib/itsi/signals.rb +23 -0
  154. data/gems/server/lib/itsi/stream_io.rb +38 -0
  155. data/gems/server/test/test_helper.rb +2 -0
  156. data/gems/server/test/test_itsi_server.rb +1 -1
  157. data/lib/itsi/version.rb +1 -1
  158. data/tasks.txt +18 -0
  159. metadata +102 -12
  160. data/crates/itsi_server/src/server/transfer_protocol.rs +0 -23
  161. data/crates/itsi_server/src/stream_writer/mod.rs +0 -21
  162. data/gems/scheduler/test/test_helper.rb +0 -6
  163. data/gems/server/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
  164. data/gems/server/ext/itsi_server/src/stream_writer/mod.rs +0 -21
@@ -0,0 +1,241 @@
1
+ use crate::{
2
+ request::itsi_request::ItsiRequest,
3
+ server::{
4
+ io_stream::IoStream,
5
+ itsi_server::{RequestJob, Server},
6
+ lifecycle_event::LifecycleEvent,
7
+ listener::{Listener, TokioListener},
8
+ thread_worker::{build_thread_workers, ThreadWorker},
9
+ },
10
+ };
11
+ use http::Request;
12
+ use hyper::{body::Incoming, service::service_fn};
13
+ use hyper_util::{
14
+ rt::{TokioExecutor, TokioIo, TokioTimer},
15
+ server::conn::auto::Builder,
16
+ };
17
+ use itsi_error::{ItsiError, Result};
18
+ use itsi_tracing::{debug, error, info};
19
+ use nix::unistd::Pid;
20
+ use std::{
21
+ num::NonZeroU8,
22
+ pin::Pin,
23
+ sync::Arc,
24
+ time::{Duration, Instant},
25
+ };
26
+ use tokio::{
27
+ runtime::{Builder as RuntimeBuilder, Runtime},
28
+ sync::broadcast,
29
+ task::JoinSet,
30
+ };
31
+ use tracing::instrument;
32
+
33
+ pub struct SingleMode {
34
+ pub executor: Builder<TokioExecutor>,
35
+ pub server: Arc<Server>,
36
+ pub sender: async_channel::Sender<RequestJob>,
37
+ pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
38
+ pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
39
+ pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
40
+ }
41
+
42
+ pub enum RunningPhase {
43
+ Running,
44
+ ShutdownPending,
45
+ Shutdown,
46
+ }
47
+
48
+ impl SingleMode {
49
+ #[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
50
+ pub(crate) fn new(
51
+ server: Arc<Server>,
52
+ listeners: Arc<Vec<Arc<Listener>>>,
53
+ lifecycle_channel: broadcast::Sender<LifecycleEvent>,
54
+ ) -> Result<Self> {
55
+ let (thread_workers, sender) = build_thread_workers(
56
+ Pid::this(),
57
+ NonZeroU8::try_from(server.threads).unwrap(),
58
+ server.app,
59
+ server.scheduler_class.clone(),
60
+ )?;
61
+ Ok(Self {
62
+ executor: Builder::new(TokioExecutor::new()),
63
+ listeners,
64
+ server,
65
+ sender,
66
+ thread_workers,
67
+ lifecycle_channel,
68
+ })
69
+ }
70
+
71
+ pub fn build_runtime(&self) -> Runtime {
72
+ let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
73
+ builder
74
+ .thread_name("itsi-server-accept-loop")
75
+ .thread_stack_size(3 * 1024 * 1024)
76
+ .enable_io()
77
+ .enable_time()
78
+ .build()
79
+ .expect("Failed to build Tokio runtime")
80
+ }
81
+
82
+ pub fn stop(&self) -> Result<()> {
83
+ Ok(())
84
+ }
85
+
86
+ #[instrument(parent=None, skip(self))]
87
+ pub fn run(self: Arc<Self>) -> Result<()> {
88
+ let mut listener_task_set = JoinSet::new();
89
+ let self_ref = Arc::new(self);
90
+ self_ref.build_runtime().block_on(async {
91
+
92
+ for listener in self_ref.listeners.clone().iter() {
93
+ let listener = Arc::new(listener.to_tokio_listener());
94
+ let mut lifecycle_rx = self_ref.lifecycle_channel.subscribe();
95
+ let self_ref = self_ref.clone();
96
+ let listener = listener.clone();
97
+ let (shutdown_sender, mut shutdown_receiver) = tokio::sync::watch::channel::<RunningPhase>(RunningPhase::Running);
98
+ listener_task_set.spawn(async move {
99
+ let strategy = self_ref.clone();
100
+ loop {
101
+ tokio::select! {
102
+ accept_result = listener.accept() => match accept_result {
103
+ Ok(accept_result) => {
104
+ if let Err(e) = strategy.serve_connection(accept_result, listener.clone(), shutdown_receiver.clone()).await {
105
+ error!("Error in serve_connection {:?}", e)
106
+ }
107
+ },
108
+ Err(e) => debug!("Listener.accept failed {:?}", e),
109
+ },
110
+ _ = shutdown_receiver.changed() => {
111
+ break;
112
+ }
113
+ lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
114
+ Ok(lifecycle_event) => {
115
+ if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
116
+ match e {
117
+ ItsiError::Break() => break,
118
+ _ => error!("Error in handle_lifecycle_event {:?}", e)
119
+ }
120
+ }
121
+
122
+ },
123
+ Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
124
+ }
125
+ }
126
+ }
127
+ if let Ok(listener) = Arc::try_unwrap(listener){
128
+ listener.unbind();
129
+ }
130
+ });
131
+
132
+ }
133
+
134
+ while let Some(_res) = listener_task_set.join_next().await {}
135
+ });
136
+
137
+ Ok(())
138
+ }
139
+
140
+ pub(crate) async fn serve_connection(
141
+ &self,
142
+ stream: IoStream,
143
+ listener: Arc<TokioListener>,
144
+ shutdown_channel: tokio::sync::watch::Receiver<RunningPhase>,
145
+ ) -> Result<()> {
146
+ let sender_clone = self.sender.clone();
147
+ let addr = stream.addr();
148
+ let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
149
+ let server = self.server.clone();
150
+ let executor = self.executor.clone();
151
+ let mut shutdown_channel_clone = shutdown_channel.clone();
152
+ tokio::spawn(async move {
153
+ let server = server.clone();
154
+ let mut executor = executor.clone();
155
+ let mut binding = executor.http1();
156
+ let shutdown_channel = shutdown_channel_clone.clone();
157
+ let mut serve = Box::pin(
158
+ binding
159
+ .timer(TokioTimer::new())
160
+ .header_read_timeout(Duration::from_secs(1))
161
+ .serve_connection_with_upgrades(
162
+ io,
163
+ service_fn(move |hyper_request: Request<Incoming>| {
164
+ ItsiRequest::process_request(
165
+ hyper_request,
166
+ sender_clone.clone(),
167
+ server.clone(),
168
+ listener.clone(),
169
+ addr.clone(),
170
+ shutdown_channel.clone(),
171
+ )
172
+ }),
173
+ ),
174
+ );
175
+
176
+ tokio::select! {
177
+ // Await the connection finishing naturally.
178
+ res = &mut serve => {
179
+ match res{
180
+ Ok(()) => {
181
+ debug!("Connection closed normally")
182
+ },
183
+ Err(res) => {
184
+ debug!("Connection finished with error: {:?}", res)
185
+ }
186
+ }
187
+ serve.as_mut().graceful_shutdown();
188
+ },
189
+ // A lifecycle event triggers shutdown.
190
+ _ = shutdown_channel_clone.changed() => {
191
+ // Initiate graceful shutdown.
192
+ serve.as_mut().graceful_shutdown();
193
+ // Now await the connection to finish shutting down.
194
+ if let Err(e) = serve.await {
195
+ debug!("Connection shutdown error: {:?}", e);
196
+ }
197
+ }
198
+ }
199
+ });
200
+ Ok(())
201
+ }
202
+
203
+ pub async fn handle_lifecycle_event(
204
+ &self,
205
+ lifecycle_event: LifecycleEvent,
206
+ shutdown_sender: tokio::sync::watch::Sender<RunningPhase>,
207
+ ) -> Result<()> {
208
+ if let LifecycleEvent::Shutdown = lifecycle_event {
209
+ shutdown_sender
210
+ .send(RunningPhase::ShutdownPending)
211
+ .expect("Failed to send shutdown pending signal");
212
+ let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
213
+ for worker in &*self.thread_workers {
214
+ worker.request_shutdown().await;
215
+ }
216
+ while Instant::now() < deadline {
217
+ tokio::time::sleep(Duration::from_millis(50)).await;
218
+ let alive_threads = self
219
+ .thread_workers
220
+ .iter()
221
+ .filter(|worker| worker.poll_shutdown(deadline))
222
+ .count();
223
+ if alive_threads == 0 {
224
+ break;
225
+ }
226
+ tokio::time::sleep(Duration::from_millis(200)).await;
227
+ }
228
+
229
+ info!("Sending shutdown signal");
230
+ shutdown_sender
231
+ .send(RunningPhase::Shutdown)
232
+ .expect("Failed to send shutdown signal");
233
+ self.thread_workers.iter().for_each(|worker| {
234
+ worker.poll_shutdown(deadline);
235
+ });
236
+
237
+ return Err(ItsiError::Break());
238
+ }
239
+ Ok(())
240
+ }
241
+ }
@@ -0,0 +1,70 @@
1
+ use std::sync::{atomic::AtomicI8, LazyLock};
2
+
3
+ use nix::libc::{self, sighandler_t};
4
+ use tokio::sync::{self, broadcast};
5
+
6
+ use super::lifecycle_event::LifecycleEvent;
7
+
8
+ pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
9
+ broadcast::Sender<LifecycleEvent>,
10
+ broadcast::Receiver<LifecycleEvent>,
11
+ )> = LazyLock::new(|| sync::broadcast::channel(5));
12
+
13
+ pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
14
+ fn receive_signal(signum: i32, _: sighandler_t) {
15
+ SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
16
+ match signum {
17
+ libc::SIGTERM | libc::SIGINT => {
18
+ SIGINT_COUNT.fetch_add(2, std::sync::atomic::Ordering::SeqCst);
19
+ if SIGINT_COUNT.load(std::sync::atomic::Ordering::SeqCst) < 2 {
20
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
21
+ } else {
22
+ // Not messing about. Force shutdown.
23
+ SIGNAL_HANDLER_CHANNEL
24
+ .0
25
+ .send(LifecycleEvent::ForceShutdown)
26
+ .ok();
27
+ }
28
+ }
29
+ libc::SIGUSR1 => {
30
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Restart).ok();
31
+ }
32
+ libc::SIGTTIN => {
33
+ SIGNAL_HANDLER_CHANNEL
34
+ .0
35
+ .send(LifecycleEvent::IncreaseWorkers)
36
+ .ok();
37
+ }
38
+ libc::SIGTTOU => {
39
+ SIGNAL_HANDLER_CHANNEL
40
+ .0
41
+ .send(LifecycleEvent::DecreaseWorkers)
42
+ .ok();
43
+ }
44
+ _ => {}
45
+ }
46
+ }
47
+
48
+ pub fn reset_signal_handlers() -> bool {
49
+ SIGINT_COUNT.store(0, std::sync::atomic::Ordering::SeqCst);
50
+ unsafe {
51
+ libc::signal(libc::SIGTERM, receive_signal as usize);
52
+ libc::signal(libc::SIGINT, receive_signal as usize);
53
+ libc::signal(libc::SIGUSR1, receive_signal as usize);
54
+ libc::signal(libc::SIGUSR2, receive_signal as usize);
55
+ libc::signal(libc::SIGTTIN, receive_signal as usize);
56
+ libc::signal(libc::SIGTTOU, receive_signal as usize);
57
+ }
58
+ true
59
+ }
60
+
61
+ pub fn clear_signal_handlers() {
62
+ unsafe {
63
+ libc::signal(libc::SIGTERM, libc::SIG_DFL);
64
+ libc::signal(libc::SIGINT, libc::SIG_DFL);
65
+ libc::signal(libc::SIGUSR1, libc::SIG_DFL);
66
+ libc::signal(libc::SIGUSR2, libc::SIG_DFL);
67
+ libc::signal(libc::SIGTTIN, libc::SIG_DFL);
68
+ libc::signal(libc::SIGTTOU, libc::SIG_DFL);
69
+ }
70
+ }
@@ -0,0 +1,368 @@
1
+ use super::itsi_server::RequestJob;
2
+ use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
3
+ use itsi_rb_helpers::{
4
+ call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
5
+ };
6
+ use itsi_tracing::{debug, error, info, warn};
7
+ use magnus::{
8
+ error::Result,
9
+ value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
10
+ Module, RClass, Ruby, Thread, Value,
11
+ };
12
+ use nix::unistd::Pid;
13
+ use parking_lot::{Mutex, RwLock};
14
+ use std::{
15
+ num::NonZeroU8,
16
+ ops::Deref,
17
+ sync::{
18
+ atomic::{AtomicBool, Ordering},
19
+ Arc,
20
+ },
21
+ thread,
22
+ time::{Duration, Instant},
23
+ };
24
+ use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
25
+ use tracing::instrument;
26
+ pub struct ThreadWorker {
27
+ pub id: String,
28
+ pub app: Opaque<Value>,
29
+ pub receiver: Arc<async_channel::Receiver<RequestJob>>,
30
+ pub sender: async_channel::Sender<RequestJob>,
31
+ pub thread: RwLock<Option<HeapValue<Thread>>>,
32
+ pub terminated: Arc<AtomicBool>,
33
+ pub scheduler_class: Option<Opaque<Value>>,
34
+ }
35
+
36
+ static ID_CALL: LazyId = LazyId::new("call");
37
+ static ID_ALIVE: LazyId = LazyId::new("alive?");
38
+ static ID_SCHEDULER: LazyId = LazyId::new("scheduler");
39
+ static ID_SCHEDULE: LazyId = LazyId::new("schedule");
40
+ static ID_BLOCK: LazyId = LazyId::new("block");
41
+ static ID_YIELD: LazyId = LazyId::new("yield");
42
+ static ID_CONST_GET: LazyId = LazyId::new("const_get");
43
+ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
44
+ ruby.module_kernel()
45
+ .const_get::<_, RClass>("Fiber")
46
+ .unwrap()
47
+ });
48
+
49
+ pub struct TerminateWakerSignal(bool);
50
+
51
+ #[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
52
+ pub fn build_thread_workers(
53
+ pid: Pid,
54
+ threads: NonZeroU8,
55
+ app: Opaque<Value>,
56
+ scheduler_class: Option<String>,
57
+ ) -> Result<(Arc<Vec<ThreadWorker>>, async_channel::Sender<RequestJob>)> {
58
+ let (sender, receiver) = async_channel::bounded(20);
59
+ let receiver_ref = Arc::new(receiver);
60
+ let sender_ref = sender;
61
+ let (app, scheduler_class) = load_app(app, scheduler_class)?;
62
+ Ok((
63
+ Arc::new(
64
+ (1..=u8::from(threads))
65
+ .map(|id| {
66
+ info!(pid = pid.as_raw(), id, "Thread");
67
+ ThreadWorker::new(
68
+ format!("{:?}#{:?}", pid, id),
69
+ app,
70
+ receiver_ref.clone(),
71
+ sender_ref.clone(),
72
+ scheduler_class,
73
+ )
74
+ })
75
+ .collect::<Result<Vec<_>>>()?,
76
+ ),
77
+ sender_ref,
78
+ ))
79
+ }
80
+
81
+ pub fn load_app(
82
+ app: Opaque<Value>,
83
+ scheduler_class: Option<String>,
84
+ ) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
85
+ call_with_gvl(|ruby| {
86
+ let app = app.get_inner_with(&ruby);
87
+ let app = Opaque::from(
88
+ app.funcall::<_, _, Value>(*ID_CALL, ())
89
+ .expect("Couldn't load app"),
90
+ );
91
+ let scheduler_class = if let Some(scheduler_class) = scheduler_class {
92
+ Some(Opaque::from(
93
+ ruby.module_kernel()
94
+ .funcall::<_, _, Value>(*ID_CONST_GET, (scheduler_class,))?,
95
+ ))
96
+ } else {
97
+ None
98
+ };
99
+ Ok((app, scheduler_class))
100
+ })
101
+ }
102
+ impl ThreadWorker {
103
+ pub fn new(
104
+ id: String,
105
+ app: Opaque<Value>,
106
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
107
+ sender: async_channel::Sender<RequestJob>,
108
+ scheduler_class: Option<Opaque<Value>>,
109
+ ) -> Result<Self> {
110
+ let mut worker = Self {
111
+ id,
112
+ app,
113
+ receiver,
114
+ sender,
115
+ thread: RwLock::new(None),
116
+ terminated: Arc::new(AtomicBool::new(false)),
117
+ scheduler_class,
118
+ };
119
+ worker.run()?;
120
+ Ok(worker)
121
+ }
122
+
123
+ #[instrument(skip(self), fields(id = self.id))]
124
+ pub async fn request_shutdown(&self) {
125
+ match self.sender.send(RequestJob::Shutdown).await {
126
+ Ok(_) => {}
127
+ Err(err) => error!("Failed to send shutdown request: {}", err),
128
+ };
129
+ info!("Requesting shutdown");
130
+ }
131
+
132
+ #[instrument(skip(self, deadline), fields(id = self.id))]
133
+ pub fn poll_shutdown(&self, deadline: Instant) -> bool {
134
+ call_with_gvl(|_ruby| {
135
+ if let Some(thread) = self.thread.read().deref() {
136
+ if Instant::now() > deadline {
137
+ warn!("Worker shutdown timed out. Killing thread");
138
+ self.terminated.store(true, Ordering::SeqCst);
139
+ kill_threads(vec![thread.as_value()]);
140
+ }
141
+ if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
142
+ return true;
143
+ }
144
+ info!("Thread has shut down");
145
+ }
146
+ self.thread.write().take();
147
+
148
+ false
149
+ })
150
+ }
151
+
152
+ pub fn run(&mut self) -> Result<()> {
153
+ let id = self.id.clone();
154
+ let app = self.app;
155
+ let receiver = self.receiver.clone();
156
+ let terminated = self.terminated.clone();
157
+ let scheduler_class = self.scheduler_class;
158
+ call_with_gvl(|_| {
159
+ *self.thread.write() = Some(
160
+ create_ruby_thread(move || {
161
+ if let Some(scheduler_class) = scheduler_class {
162
+ if let Err(err) =
163
+ Self::fiber_accept_loop(id, app, receiver, scheduler_class, terminated)
164
+ {
165
+ error!("Error in fiber_accept_loop: {:?}", err);
166
+ }
167
+ } else {
168
+ Self::accept_loop(id, app, receiver, terminated);
169
+ }
170
+ })
171
+ .into(),
172
+ );
173
+ Ok::<(), magnus::Error>(())
174
+ })?;
175
+ Ok(())
176
+ }
177
+
178
+ pub fn build_scheduler_proc(
179
+ app: Opaque<Value>,
180
+ leader: &Arc<Mutex<Option<RequestJob>>>,
181
+ receiver: &Arc<async_channel::Receiver<RequestJob>>,
182
+ terminated: &Arc<AtomicBool>,
183
+ waker_sender: &watch::Sender<TerminateWakerSignal>,
184
+ ) -> magnus::block::Proc {
185
+ let leader = leader.clone();
186
+ let receiver = receiver.clone();
187
+ let terminated = terminated.clone();
188
+ let waker_sender = waker_sender.clone();
189
+ Ruby::get().unwrap().proc_from_fn(move |ruby, _args, _blk| {
190
+ let scheduler = ruby
191
+ .get_inner(&CLASS_FIBER)
192
+ .funcall::<_, _, Value>(*ID_SCHEDULER, ())
193
+ .unwrap();
194
+ let server = ruby.get_inner(&ITSI_SERVER);
195
+ let thread_current = ruby.thread_current();
196
+ let leader_clone = leader.clone();
197
+ let receiver = receiver.clone();
198
+ let terminated = terminated.clone();
199
+ let waker_sender = waker_sender.clone();
200
+ let mut batch = Vec::with_capacity(MAX_BATCH_SIZE as usize);
201
+
202
+ static MAX_BATCH_SIZE: i32 = 25;
203
+ call_without_gvl(move || loop {
204
+ let mut idle_counter = 0;
205
+ if let Some(v) = leader_clone.lock().take() {
206
+ match v {
207
+ RequestJob::ProcessRequest(itsi_request) => {
208
+ batch.push(RequestJob::ProcessRequest(itsi_request))
209
+ }
210
+ RequestJob::Shutdown => {
211
+ waker_sender.send(TerminateWakerSignal(true)).unwrap();
212
+ break;
213
+ }
214
+ }
215
+ }
216
+ for _ in 0..MAX_BATCH_SIZE {
217
+ if let Ok(req) = receiver.try_recv() {
218
+ batch.push(req);
219
+ } else {
220
+ break;
221
+ }
222
+ }
223
+
224
+ let shutdown_requested = call_with_gvl(|_| {
225
+ for req in batch.drain(..) {
226
+ match req {
227
+ RequestJob::ProcessRequest(request) => {
228
+ let response = request.response.clone();
229
+ if let Err(err) =
230
+ server.funcall::<_, _, Value>(*ID_SCHEDULE, (app, request))
231
+ {
232
+ ItsiRequest::internal_error(ruby, response, err)
233
+ }
234
+ }
235
+ RequestJob::Shutdown => return true,
236
+ }
237
+ }
238
+ false
239
+ });
240
+
241
+ if shutdown_requested || terminated.load(Ordering::Relaxed) {
242
+ waker_sender.send(TerminateWakerSignal(true)).unwrap();
243
+ break;
244
+ }
245
+
246
+ let yield_result = if receiver.is_empty() {
247
+ waker_sender.send(TerminateWakerSignal(false)).unwrap();
248
+ idle_counter = (idle_counter + 1) % 100;
249
+ call_with_gvl(|ruby| {
250
+ if idle_counter == 0 {
251
+ ruby.gc_start();
252
+ }
253
+ scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
254
+ })
255
+ } else {
256
+ call_with_gvl(|_| scheduler.funcall::<_, _, Value>(*ID_YIELD, ()))
257
+ };
258
+
259
+ if yield_result.is_err() {
260
+ break;
261
+ }
262
+ })
263
+ })
264
+ }
265
+
266
+ #[instrument(skip_all, fields(thread_worker=id))]
267
+ pub fn fiber_accept_loop(
268
+ id: String,
269
+ app: Opaque<Value>,
270
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
271
+ scheduler_class: Opaque<Value>,
272
+ terminated: Arc<AtomicBool>,
273
+ ) -> Result<()> {
274
+ let ruby = Ruby::get().unwrap();
275
+ let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
276
+ let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
277
+ let server = ruby.get_inner(&ITSI_SERVER);
278
+ let scheduler_proc =
279
+ Self::build_scheduler_proc(app, &leader, &receiver, &terminated, &waker_sender);
280
+ let (scheduler, scheduler_fiber) = server.funcall::<_, _, (Value, Value)>(
281
+ "start_scheduler_loop",
282
+ (scheduler_class, scheduler_proc),
283
+ )?;
284
+ Self::start_waker_thread(
285
+ scheduler.into(),
286
+ scheduler_fiber.into(),
287
+ leader,
288
+ receiver,
289
+ waker_receiver,
290
+ );
291
+ Ok(())
292
+ }
293
+
294
+ #[allow(clippy::await_holding_lock)]
295
+ pub fn start_waker_thread(
296
+ scheduler: Opaque<Value>,
297
+ scheduler_fiber: Opaque<Value>,
298
+ leader: Arc<Mutex<Option<RequestJob>>>,
299
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
300
+ mut waker_receiver: watch::Receiver<TerminateWakerSignal>,
301
+ ) {
302
+ create_ruby_thread(move || {
303
+ let scheduler = scheduler.get_inner_with(&Ruby::get().unwrap());
304
+ let leader = leader.clone();
305
+ call_without_gvl(|| {
306
+ RuntimeBuilder::new_current_thread()
307
+ .build()
308
+ .expect("Failed to build Tokio runtime")
309
+ .block_on(async {
310
+ loop {
311
+ waker_receiver.changed().await.ok();
312
+ if waker_receiver.borrow().0 {
313
+ break;
314
+ }
315
+ tokio::select! {
316
+ _ = waker_receiver.changed() => {
317
+ if waker_receiver.borrow().0 {
318
+ break;
319
+ }
320
+ },
321
+ next_msg = receiver.recv() => {
322
+ *leader.lock() = next_msg.ok();
323
+ call_with_gvl(|_| {
324
+ scheduler
325
+ .funcall::<_, _, Value>(
326
+ "unblock",
327
+ (None::<u8>, scheduler_fiber),
328
+ )
329
+ .ok();
330
+ });
331
+ }
332
+ }
333
+ }
334
+ })
335
+ });
336
+ });
337
+ }
338
+
339
+ #[instrument(skip_all, fields(thread_worker=id))]
340
+ pub fn accept_loop(
341
+ id: String,
342
+ app: Opaque<Value>,
343
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
344
+ terminated: Arc<AtomicBool>,
345
+ ) {
346
+ let ruby = Ruby::get().unwrap();
347
+ let server = ruby.get_inner(&ITSI_SERVER);
348
+ call_without_gvl(|| loop {
349
+ match receiver.recv_blocking() {
350
+ Ok(RequestJob::ProcessRequest(request)) => {
351
+ if terminated.load(Ordering::Relaxed) {
352
+ break;
353
+ }
354
+ call_with_gvl(|_ruby| {
355
+ request.process(&ruby, server, app).ok();
356
+ })
357
+ }
358
+ Ok(RequestJob::Shutdown) => {
359
+ debug!("Shutting down thread worker");
360
+ break;
361
+ }
362
+ Err(_) => {
363
+ thread::sleep(Duration::from_micros(1));
364
+ }
365
+ }
366
+ });
367
+ }
368
+ }