itsi-scheduler 0.1.5 → 0.1.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of itsi-scheduler might be problematic. Click here for more details.

Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/CODE_OF_CONDUCT.md +7 -0
  3. data/Cargo.lock +83 -22
  4. data/README.md +5 -0
  5. data/_index.md +7 -0
  6. data/ext/itsi_error/src/from.rs +26 -29
  7. data/ext/itsi_error/src/lib.rs +10 -1
  8. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  9. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  10. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  11. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  12. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  13. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  14. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  15. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  16. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  17. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  18. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  19. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  20. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  21. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  22. data/ext/itsi_rb_helpers/Cargo.toml +1 -0
  23. data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
  24. data/ext/itsi_rb_helpers/src/lib.rs +59 -9
  25. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  26. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  27. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  28. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  29. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  30. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  31. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  32. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  33. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  34. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  35. data/ext/itsi_server/Cargo.lock +2956 -0
  36. data/ext/itsi_server/Cargo.toml +69 -26
  37. data/ext/itsi_server/src/env.rs +43 -0
  38. data/ext/itsi_server/src/lib.rs +81 -75
  39. data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
  40. data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +22 -3
  41. data/ext/itsi_server/src/ruby_types/itsi_grpc_request.rs +147 -0
  42. data/ext/itsi_server/src/ruby_types/itsi_grpc_response.rs +19 -0
  43. data/ext/itsi_server/src/ruby_types/itsi_grpc_stream/mod.rs +216 -0
  44. data/ext/itsi_server/src/{request/itsi_request.rs → ruby_types/itsi_http_request.rs} +108 -103
  45. data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +79 -38
  46. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
  47. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +355 -0
  48. data/ext/itsi_server/src/ruby_types/itsi_server.rs +82 -0
  49. data/ext/itsi_server/src/ruby_types/mod.rs +55 -0
  50. data/ext/itsi_server/src/server/bind.rs +33 -20
  51. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  52. data/ext/itsi_server/src/server/cache_store.rs +74 -0
  53. data/ext/itsi_server/src/server/itsi_service.rs +172 -0
  54. data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
  55. data/ext/itsi_server/src/server/listener.rs +197 -106
  56. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +153 -0
  57. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +47 -0
  58. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +58 -0
  59. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +82 -0
  60. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +264 -0
  61. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +139 -0
  62. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +300 -0
  63. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +287 -0
  64. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +48 -0
  65. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +127 -0
  66. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +191 -0
  67. data/ext/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +72 -0
  68. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +85 -0
  69. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +195 -0
  70. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  71. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +82 -0
  72. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +216 -0
  73. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +124 -0
  74. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
  75. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +43 -0
  76. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +34 -0
  77. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +93 -0
  78. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +162 -0
  79. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +158 -0
  80. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
  81. data/ext/itsi_server/src/server/middleware_stack/mod.rs +315 -0
  82. data/ext/itsi_server/src/server/mod.rs +8 -1
  83. data/ext/itsi_server/src/server/process_worker.rs +44 -11
  84. data/ext/itsi_server/src/server/rate_limiter.rs +565 -0
  85. data/ext/itsi_server/src/server/request_job.rs +11 -0
  86. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +129 -46
  87. data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
  88. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +337 -163
  89. data/ext/itsi_server/src/server/signal.rs +25 -2
  90. data/ext/itsi_server/src/server/static_file_server.rs +984 -0
  91. data/ext/itsi_server/src/server/thread_worker.rs +164 -88
  92. data/ext/itsi_server/src/server/tls/locked_dir_cache.rs +55 -17
  93. data/ext/itsi_server/src/server/tls.rs +104 -28
  94. data/ext/itsi_server/src/server/types.rs +43 -0
  95. data/ext/itsi_tracing/Cargo.toml +1 -0
  96. data/ext/itsi_tracing/src/lib.rs +222 -34
  97. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  98. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  99. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  100. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  101. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  102. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  103. data/lib/itsi/scheduler/version.rb +1 -1
  104. data/lib/itsi/scheduler.rb +2 -2
  105. metadata +79 -14
  106. data/ext/itsi_server/extconf.rb +0 -6
  107. data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
  108. data/ext/itsi_server/src/request/mod.rs +0 -1
  109. data/ext/itsi_server/src/response/mod.rs +0 -1
  110. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
  111. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
  112. data/ext/itsi_server/src/server/itsi_server.rs +0 -244
@@ -1,42 +1,54 @@
1
1
  use crate::{
2
- request::itsi_request::ItsiRequest,
2
+ ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
3
3
  server::{
4
4
  io_stream::IoStream,
5
- itsi_server::{RequestJob, Server},
5
+ itsi_service::{IstiServiceInner, ItsiService},
6
6
  lifecycle_event::LifecycleEvent,
7
- listener::{Listener, TokioListener},
7
+ listener::ListenerInfo,
8
+ request_job::RequestJob,
9
+ signal::SIGNAL_HANDLER_CHANNEL,
8
10
  thread_worker::{build_thread_workers, ThreadWorker},
9
11
  },
10
12
  };
11
- use http::Request;
12
- use hyper::{body::Incoming, service::service_fn};
13
13
  use hyper_util::{
14
14
  rt::{TokioExecutor, TokioIo, TokioTimer},
15
15
  server::conn::auto::Builder,
16
16
  };
17
17
  use itsi_error::{ItsiError, Result};
18
+ use itsi_rb_helpers::{
19
+ call_with_gvl, call_without_gvl, create_ruby_thread, funcall_no_ret, print_rb_backtrace,
20
+ };
18
21
  use itsi_tracing::{debug, error, info};
22
+ use magnus::value::ReprValue;
19
23
  use nix::unistd::Pid;
24
+ use parking_lot::RwLock;
20
25
  use std::{
26
+ collections::HashMap,
21
27
  num::NonZeroU8,
22
28
  pin::Pin,
23
- sync::Arc,
24
- time::{Duration, Instant},
29
+ sync::{
30
+ atomic::{AtomicBool, Ordering},
31
+ Arc,
32
+ },
33
+ thread::sleep,
34
+ time::{Duration, Instant, SystemTime, UNIX_EPOCH},
25
35
  };
26
36
  use tokio::{
27
37
  runtime::{Builder as RuntimeBuilder, Runtime},
28
- sync::broadcast,
38
+ sync::{
39
+ broadcast,
40
+ watch::{self},
41
+ },
29
42
  task::JoinSet,
30
43
  };
31
44
  use tracing::instrument;
32
45
 
33
46
  pub struct SingleMode {
34
47
  pub executor: Builder<TokioExecutor>,
35
- pub server: Arc<Server>,
36
- pub sender: async_channel::Sender<RequestJob>,
37
- pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
38
- pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
48
+ pub server_config: Arc<ItsiServerConfig>,
39
49
  pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
50
+ pub restart_requested: AtomicBool,
51
+ pub status: RwLock<HashMap<u8, (u64, u64)>>,
40
52
  }
41
53
 
42
54
  pub enum RunningPhase {
@@ -46,30 +58,29 @@ pub enum RunningPhase {
46
58
  }
47
59
 
48
60
  impl SingleMode {
49
- #[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
50
- pub(crate) fn new(
51
- server: Arc<Server>,
52
- listeners: Arc<Vec<Arc<Listener>>>,
53
- lifecycle_channel: broadcast::Sender<LifecycleEvent>,
54
- ) -> Result<Self> {
55
- let (thread_workers, sender) = build_thread_workers(
56
- Pid::this(),
57
- NonZeroU8::try_from(server.threads).unwrap(),
58
- server.app,
59
- server.scheduler_class.clone(),
60
- )?;
61
+ #[instrument(parent=None, skip_all)]
62
+ pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
63
+ server_config.server_params.read().preload_ruby()?;
61
64
  Ok(Self {
62
65
  executor: Builder::new(TokioExecutor::new()),
63
- listeners,
64
- server,
65
- sender,
66
- thread_workers,
67
- lifecycle_channel,
66
+ server_config,
67
+ lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
68
+ restart_requested: AtomicBool::new(false),
69
+ status: RwLock::new(HashMap::new()),
68
70
  })
69
71
  }
70
72
 
71
73
  pub fn build_runtime(&self) -> Runtime {
72
- let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
74
+ let mut builder: RuntimeBuilder = if self
75
+ .server_config
76
+ .server_params
77
+ .read()
78
+ .multithreaded_reactor
79
+ {
80
+ RuntimeBuilder::new_multi_thread()
81
+ } else {
82
+ RuntimeBuilder::new_current_thread()
83
+ };
73
84
  builder
74
85
  .thread_name("itsi-server-accept-loop")
75
86
  .thread_stack_size(3 * 1024 * 1024)
@@ -80,168 +91,331 @@ impl SingleMode {
80
91
  }
81
92
 
82
93
  pub fn stop(&self) -> Result<()> {
94
+ self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
83
95
  Ok(())
84
96
  }
85
97
 
86
- #[instrument(parent=None, skip(self))]
98
+ pub async fn print_info(&self, thread_workers: Arc<Vec<Arc<ThreadWorker>>>) -> Result<()> {
99
+ println!(" └─ Worker");
100
+ println!(
101
+ " - binds: {:?}",
102
+ self.server_config.server_params.read().binds
103
+ );
104
+
105
+ println!(
106
+ " ─ script_name: {:?}",
107
+ self.server_config.server_params.read().script_name
108
+ );
109
+ println!(
110
+ " ─ streaming body: {:?}",
111
+ self.server_config.server_params.read().streamable_body
112
+ );
113
+ println!(
114
+ " ─ multithreaded runtime: {:?}",
115
+ self.server_config
116
+ .server_params
117
+ .read()
118
+ .multithreaded_reactor
119
+ );
120
+ println!(
121
+ " ─ scheduler: {:?}",
122
+ self.server_config.server_params.read().scheduler_class
123
+ );
124
+ println!(
125
+ " ─ OOB GC Response threadhold: {:?}",
126
+ self.server_config
127
+ .server_params
128
+ .read()
129
+ .oob_gc_responses_threshold
130
+ );
131
+ for worker in thread_workers.iter() {
132
+ println!(" └─ - Thread : {:?}", worker.id);
133
+ println!(" - # Requests Processed: {:?}", worker.request_id);
134
+ println!(
135
+ " - Last Request Started: {:?} ago",
136
+ if worker.current_request_start.load(Ordering::Relaxed) == 0 {
137
+ Duration::from_secs(0)
138
+ } else {
139
+ SystemTime::now()
140
+ .duration_since(
141
+ UNIX_EPOCH
142
+ + Duration::from_secs(
143
+ worker.current_request_start.load(Ordering::Relaxed),
144
+ ),
145
+ )
146
+ .unwrap_or(Duration::from_secs(0))
147
+ }
148
+ );
149
+ call_with_gvl(|_| {
150
+ if let Some(thread) = worker.thread.read().as_ref() {
151
+ if let Ok(backtrace) = thread.funcall::<_, _, Vec<String>>("backtrace", ()) {
152
+ println!(" - Backtrace:");
153
+ for line in backtrace {
154
+ println!(" - {}", line);
155
+ }
156
+ }
157
+ }
158
+ })
159
+ }
160
+
161
+ Ok(())
162
+ }
163
+
164
+ pub fn start_monitors(
165
+ self: Arc<Self>,
166
+ thread_workers: Arc<Vec<Arc<ThreadWorker>>>,
167
+ ) -> magnus::Thread {
168
+ call_with_gvl(move |_| {
169
+ create_ruby_thread(move || {
170
+ call_without_gvl(move || {
171
+ let monitor_runtime = RuntimeBuilder::new_current_thread()
172
+ .enable_time()
173
+ .build()
174
+ .unwrap();
175
+ let receiver = self.clone();
176
+ monitor_runtime.block_on({
177
+ let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
178
+ let receiver = receiver.clone();
179
+ let thread_workers = thread_workers.clone();
180
+ async move {
181
+ loop {
182
+ tokio::select! {
183
+ _ = tokio::time::sleep(Duration::from_secs(1)) => {
184
+ let mut status_lock = receiver.status.write();
185
+ thread_workers.iter().for_each(|worker| {
186
+ let worker_entry = status_lock.entry(worker.id);
187
+ let data = (
188
+ worker.request_id.load(Ordering::Relaxed),
189
+ worker.current_request_start.load(Ordering::Relaxed),
190
+ );
191
+ worker_entry.or_insert(data);
192
+ });
193
+ }
194
+ lifecycle_event = lifecycle_rx.recv() => {
195
+ match lifecycle_event {
196
+ Ok(LifecycleEvent::Restart) => {
197
+ receiver.restart().ok();
198
+ }
199
+ Ok(LifecycleEvent::Reload) => {
200
+ receiver.reload().ok();
201
+ }
202
+ Ok(LifecycleEvent::Shutdown) => {
203
+ break;
204
+ }
205
+ Ok(LifecycleEvent::PrintInfo) => {
206
+ receiver.print_info(thread_workers.clone()).await.ok();
207
+ }
208
+ _ => {}
209
+ }
210
+ }
211
+ }
212
+ }
213
+ }
214
+ })
215
+ })
216
+ })
217
+ })
218
+ }
219
+
220
+ #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
87
221
  pub fn run(self: Arc<Self>) -> Result<()> {
88
222
  let mut listener_task_set = JoinSet::new();
89
- let self_ref = Arc::new(self);
90
- self_ref.build_runtime().block_on(async {
91
-
92
- for listener in self_ref.listeners.clone().iter() {
93
- let listener = Arc::new(listener.to_tokio_listener());
94
- let mut lifecycle_rx = self_ref.lifecycle_channel.subscribe();
95
- let self_ref = self_ref.clone();
96
- let listener = listener.clone();
97
- let (shutdown_sender, mut shutdown_receiver) = tokio::sync::watch::channel::<RunningPhase>(RunningPhase::Running);
98
- let listener_clone = listener.clone();
99
-
100
- tokio::spawn(async move {
101
- listener_clone.spawn_state_task().await;
102
- });
103
-
104
- listener_task_set.spawn(async move {
105
- let strategy = self_ref.clone();
106
- loop {
107
- tokio::select! {
108
- accept_result = listener.accept() => match accept_result {
109
- Ok(accept_result) => {
110
- if let Err(e) = strategy.serve_connection(accept_result, listener.clone(), shutdown_receiver.clone()).await {
111
- error!("Error in serve_connection {:?}", e)
223
+ let runtime = self.build_runtime();
224
+
225
+ let (thread_workers, job_sender) = build_thread_workers(
226
+ self.server_config.server_params.read().clone(),
227
+ Pid::this(),
228
+ NonZeroU8::try_from(self.server_config.server_params.read().threads).unwrap(),
229
+ )
230
+ .inspect_err(|e| {
231
+ if let Some(err_val) = e.value() {
232
+ print_rb_backtrace(err_val);
233
+ }
234
+ })?;
235
+
236
+ info!(
237
+ pid = format!("{}", Pid::this()),
238
+ threads = thread_workers.len(),
239
+ binds = format!("{:?}", self.server_config.server_params.read().binds)
240
+ );
241
+
242
+ let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
243
+ let thread = self.clone().start_monitors(thread_workers.clone());
244
+ runtime.block_on(
245
+ async {
246
+ let server_params = self.server_config.server_params.read().clone();
247
+ server_params.middleware.get().unwrap().initialize_layers().await?;
248
+ let tokio_listeners = server_params.listeners.lock()
249
+ .drain(..)
250
+ .map(|list| {
251
+ Arc::new(list.into_tokio_listener())
252
+ })
253
+ .collect::<Vec<_>>();
254
+
255
+ for listener in tokio_listeners.iter() {
256
+ let mut lifecycle_rx = self.lifecycle_channel.subscribe();
257
+ let listener_info = Arc::new(listener.listener_info());
258
+ let self_ref = self.clone();
259
+ let listener = listener.clone();
260
+ let shutdown_sender = shutdown_sender.clone();
261
+ let job_sender = job_sender.clone();
262
+ let workers_clone = thread_workers.clone();
263
+ let listener_clone = listener.clone();
264
+ let mut shutdown_receiver = shutdown_sender.subscribe();
265
+ let shutdown_receiver_clone = shutdown_receiver.clone();
266
+ listener_task_set.spawn(async move {
267
+ listener_clone.spawn_state_task(shutdown_receiver_clone).await;
268
+ });
269
+
270
+ listener_task_set.spawn(async move {
271
+ let strategy_clone = self_ref.clone();
272
+ let mut acceptor_task_set = JoinSet::new();
273
+ loop {
274
+ tokio::select! {
275
+ accept_result = listener.accept() => match accept_result {
276
+ Ok(accept_result) => {
277
+ let strategy = strategy_clone.clone();
278
+ let listener_info = listener_info.clone();
279
+ let shutdown_receiver = shutdown_receiver.clone();
280
+ let job_sender = job_sender.clone();
281
+ acceptor_task_set.spawn(async move {
282
+ strategy.serve_connection(accept_result, job_sender, listener_info, shutdown_receiver).await;
283
+ });
284
+ },
285
+ Err(e) => debug!("Listener.accept failed {:?}", e),
286
+ },
287
+ _ = shutdown_receiver.changed() => {
288
+ break;
112
289
  }
113
- },
114
- Err(e) => debug!("Listener.accept failed {:?}", e),
115
- },
116
- _ = shutdown_receiver.changed() => {
117
- break;
290
+ lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
291
+ Ok(LifecycleEvent::Shutdown) => {
292
+ shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
293
+ // Tell any in-progress connections to stop accepting new requests
294
+ tokio::time::sleep(Duration::from_millis(25)).await;
295
+ // Tell workers to stop processing requests once they've flushed their buffers.
296
+ for _i in 0..workers_clone.len() {
297
+ job_sender.send(RequestJob::Shutdown).await.unwrap();
298
+ }
299
+ break;
300
+ },
301
+ Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
302
+ _ => {}
303
+ }
118
304
  }
119
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
120
- Ok(lifecycle_event) => {
121
- if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
122
- match e {
123
- ItsiError::Break() => break,
124
- _ => error!("Error in handle_lifecycle_event {:?}", e)
125
- }
126
- }
127
-
128
- },
129
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
130
- }
131
305
  }
132
- }
133
- if let Ok(listener) = Arc::try_unwrap(listener){
134
- listener.unbind();
135
- }
136
- });
306
+ while let Some(_res) = acceptor_task_set.join_next().await {}
307
+ });
308
+
309
+ }
310
+
311
+ while let Some(_res) = listener_task_set.join_next().await {}
312
+
313
+ Ok::<(), ItsiError>(())
314
+ })?;
137
315
 
138
- }
316
+ shutdown_sender.send(RunningPhase::Shutdown).ok();
317
+ let deadline = Instant::now()
318
+ + Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
139
319
 
140
- while let Some(_res) = listener_task_set.join_next().await {}
141
- });
320
+ runtime.shutdown_timeout(Duration::from_millis(100));
142
321
 
322
+ loop {
323
+ if thread_workers
324
+ .iter()
325
+ .all(|worker| call_with_gvl(move |_| !worker.poll_shutdown(deadline)))
326
+ {
327
+ funcall_no_ret(thread, "join", ()).ok();
328
+ break;
329
+ }
330
+ sleep(Duration::from_millis(50));
331
+ }
332
+
333
+ if self.restart_requested.load(Ordering::SeqCst) {
334
+ self.restart_requested.store(false, Ordering::SeqCst);
335
+ info!("Worker restarting");
336
+ self.run()?;
337
+ }
338
+ debug!("Runtime has shut down");
143
339
  Ok(())
144
340
  }
145
341
 
146
342
  pub(crate) async fn serve_connection(
147
343
  &self,
148
344
  stream: IoStream,
149
- listener: Arc<TokioListener>,
150
- shutdown_channel: tokio::sync::watch::Receiver<RunningPhase>,
151
- ) -> Result<()> {
152
- let sender_clone = self.sender.clone();
345
+ job_sender: async_channel::Sender<RequestJob>,
346
+ listener: Arc<ListenerInfo>,
347
+ shutdown_channel: watch::Receiver<RunningPhase>,
348
+ ) {
153
349
  let addr = stream.addr();
154
350
  let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
155
- let server = self.server.clone();
156
351
  let executor = self.executor.clone();
157
352
  let mut shutdown_channel_clone = shutdown_channel.clone();
158
- tokio::spawn(async move {
159
- let server = server.clone();
160
- let mut executor = executor.clone();
161
- let mut binding = executor.http1();
162
- let shutdown_channel = shutdown_channel_clone.clone();
163
- let mut serve = Box::pin(
164
- binding
165
- .timer(TokioTimer::new())
166
- .header_read_timeout(Duration::from_secs(1))
167
- .serve_connection_with_upgrades(
168
- io,
169
- service_fn(move |hyper_request: Request<Incoming>| {
170
- ItsiRequest::process_request(
171
- hyper_request,
172
- sender_clone.clone(),
173
- server.clone(),
174
- listener.clone(),
175
- addr.clone(),
176
- shutdown_channel.clone(),
177
- )
178
- }),
179
- ),
180
- );
353
+ let mut executor = executor.clone();
354
+ let mut binding = executor.http1();
355
+ let shutdown_channel = shutdown_channel_clone.clone();
181
356
 
182
- tokio::select! {
183
- // Await the connection finishing naturally.
184
- res = &mut serve => {
185
- match res{
186
- Ok(()) => {
187
- debug!("Connection closed normally")
188
- },
189
- Err(res) => {
190
- debug!("Connection finished with error: {:?}", res)
191
- }
192
- }
193
- serve.as_mut().graceful_shutdown();
194
- },
195
- // A lifecycle event triggers shutdown.
196
- _ = shutdown_channel_clone.changed() => {
197
- // Initiate graceful shutdown.
198
- serve.as_mut().graceful_shutdown();
199
- // Now await the connection to finish shutting down.
200
- if let Err(e) = serve.await {
201
- debug!("Connection shutdown error: {:?}", e);
357
+ let service = ItsiService {
358
+ inner: Arc::new(IstiServiceInner {
359
+ sender: job_sender.clone(),
360
+ server_params: self.server_config.server_params.read().clone(),
361
+ listener,
362
+ addr: addr.to_string(),
363
+ shutdown_channel: shutdown_channel.clone(),
364
+ }),
365
+ };
366
+ let mut serve = Box::pin(
367
+ binding
368
+ .timer(TokioTimer::new()) // your existing timer
369
+ .header_read_timeout(Duration::from_secs(1))
370
+ .serve_connection_with_upgrades(io, service),
371
+ );
372
+
373
+ tokio::select! {
374
+ // Await the connection finishing naturally.
375
+ res = &mut serve => {
376
+ match res{
377
+ Ok(()) => {
378
+ debug!("Connection closed normally")
379
+ },
380
+ Err(res) => {
381
+ debug!("Connection closed abruptly: {:?}", res)
202
382
  }
203
383
  }
204
- }
205
- });
206
- Ok(())
207
- }
384
+ serve.as_mut().graceful_shutdown();
385
+ },
386
+ // A lifecycle event triggers shutdown.
387
+ _ = shutdown_channel_clone.changed() => {
388
+ // Initiate graceful shutdown.
389
+ info!("Starting graceful shutdown");
390
+ serve.as_mut().graceful_shutdown();
208
391
 
209
- pub async fn handle_lifecycle_event(
210
- &self,
211
- lifecycle_event: LifecycleEvent,
212
- shutdown_sender: tokio::sync::watch::Sender<RunningPhase>,
213
- ) -> Result<()> {
214
- if let LifecycleEvent::Shutdown = lifecycle_event {
215
- shutdown_sender
216
- .send(RunningPhase::ShutdownPending)
217
- .expect("Failed to send shutdown pending signal");
218
- let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
219
- for worker in &*self.thread_workers {
220
- worker.request_shutdown().await;
221
- }
222
- while Instant::now() < deadline {
223
- tokio::time::sleep(Duration::from_millis(50)).await;
224
- let alive_threads = self
225
- .thread_workers
226
- .iter()
227
- .filter(|worker| worker.poll_shutdown(deadline))
228
- .count();
229
- if alive_threads == 0 {
230
- break;
392
+ // Now await the connection to finish shutting down.
393
+ if let Err(e) = serve.await {
394
+ debug!("Connection shutdown error: {:?}", e);
231
395
  }
232
- tokio::time::sleep(Duration::from_millis(200)).await;
233
396
  }
397
+ }
398
+ }
234
399
 
235
- info!("Sending shutdown signal");
236
- shutdown_sender
237
- .send(RunningPhase::Shutdown)
238
- .expect("Failed to send shutdown signal");
239
- self.thread_workers.iter().for_each(|worker| {
240
- worker.poll_shutdown(deadline);
241
- });
242
-
243
- return Err(ItsiError::Break());
400
+ /// Attempts to reload the config "live"
401
+ /// Not that when running in single mode this will not unload
402
+ /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
403
+ pub fn reload(&self) -> Result<()> {
404
+ let should_reexec = self.server_config.clone().reload(false)?;
405
+ if should_reexec {
406
+ self.server_config.dup_fds()?;
407
+ self.server_config.reload_exec()?;
244
408
  }
409
+ self.restart_requested.store(true, Ordering::SeqCst);
410
+ self.stop()?;
411
+ self.server_config.server_params.read().preload_ruby()?;
412
+ Ok(())
413
+ }
414
+
415
+ /// Restart the server while keeping connections open.
416
+ pub fn restart(&self) -> Result<()> {
417
+ self.server_config.dup_fds()?;
418
+ self.server_config.reload_exec()?;
245
419
  Ok(())
246
420
  }
247
421
  }
@@ -10,6 +10,10 @@ pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
10
10
  broadcast::Receiver<LifecycleEvent>,
11
11
  )> = LazyLock::new(|| sync::broadcast::channel(5));
12
12
 
13
+ pub fn send_shutdown_event() {
14
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
15
+ }
16
+
13
17
  pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
14
18
  fn receive_signal(signum: i32, _: sighandler_t) {
15
19
  SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
@@ -26,9 +30,18 @@ fn receive_signal(signum: i32, _: sighandler_t) {
26
30
  .ok();
27
31
  }
28
32
  }
33
+ libc::SIGUSR2 => {
34
+ SIGNAL_HANDLER_CHANNEL
35
+ .0
36
+ .send(LifecycleEvent::PrintInfo)
37
+ .ok();
38
+ }
29
39
  libc::SIGUSR1 => {
30
40
  SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Restart).ok();
31
41
  }
42
+ libc::SIGHUP => {
43
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Reload).ok();
44
+ }
32
45
  libc::SIGTTIN => {
33
46
  SIGNAL_HANDLER_CHANNEL
34
47
  .0
@@ -41,6 +54,12 @@ fn receive_signal(signum: i32, _: sighandler_t) {
41
54
  .send(LifecycleEvent::DecreaseWorkers)
42
55
  .ok();
43
56
  }
57
+ libc::SIGCHLD => {
58
+ SIGNAL_HANDLER_CHANNEL
59
+ .0
60
+ .send(LifecycleEvent::ChildTerminated)
61
+ .ok();
62
+ }
44
63
  _ => {}
45
64
  }
46
65
  }
@@ -50,10 +69,12 @@ pub fn reset_signal_handlers() -> bool {
50
69
  unsafe {
51
70
  libc::signal(libc::SIGTERM, receive_signal as usize);
52
71
  libc::signal(libc::SIGINT, receive_signal as usize);
53
- libc::signal(libc::SIGUSR1, receive_signal as usize);
54
72
  libc::signal(libc::SIGUSR2, receive_signal as usize);
73
+ libc::signal(libc::SIGUSR1, receive_signal as usize);
74
+ libc::signal(libc::SIGHUP, receive_signal as usize);
55
75
  libc::signal(libc::SIGTTIN, receive_signal as usize);
56
76
  libc::signal(libc::SIGTTOU, receive_signal as usize);
77
+ libc::signal(libc::SIGCHLD, receive_signal as usize);
57
78
  }
58
79
  true
59
80
  }
@@ -62,9 +83,11 @@ pub fn clear_signal_handlers() {
62
83
  unsafe {
63
84
  libc::signal(libc::SIGTERM, libc::SIG_DFL);
64
85
  libc::signal(libc::SIGINT, libc::SIG_DFL);
65
- libc::signal(libc::SIGUSR1, libc::SIG_DFL);
66
86
  libc::signal(libc::SIGUSR2, libc::SIG_DFL);
87
+ libc::signal(libc::SIGUSR1, libc::SIG_DFL);
88
+ libc::signal(libc::SIGHUP, libc::SIG_DFL);
67
89
  libc::signal(libc::SIGTTIN, libc::SIG_DFL);
68
90
  libc::signal(libc::SIGTTOU, libc::SIG_DFL);
91
+ libc::signal(libc::SIGCHLD, libc::SIG_DFL);
69
92
  }
70
93
  }