itsi-server 0.1.11 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -0
  3. data/CODE_OF_CONDUCT.md +7 -0
  4. data/Cargo.lock +1536 -45
  5. data/README.md +4 -0
  6. data/_index.md +6 -0
  7. data/exe/itsi +33 -74
  8. data/ext/itsi_error/src/lib.rs +9 -0
  9. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  10. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  11. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  12. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  13. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  14. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  15. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  16. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  17. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  18. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  19. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  20. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  21. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  22. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  23. data/ext/itsi_rb_helpers/Cargo.toml +1 -0
  24. data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
  25. data/ext/itsi_rb_helpers/src/lib.rs +34 -7
  26. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  27. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  28. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  29. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  30. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  31. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  32. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  33. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  34. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  35. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  36. data/ext/itsi_server/Cargo.toml +69 -30
  37. data/ext/itsi_server/src/lib.rs +79 -147
  38. data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
  39. data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +22 -3
  40. data/ext/itsi_server/src/ruby_types/itsi_grpc_request.rs +147 -0
  41. data/ext/itsi_server/src/ruby_types/itsi_grpc_response.rs +19 -0
  42. data/ext/itsi_server/src/ruby_types/itsi_grpc_stream/mod.rs +216 -0
  43. data/ext/itsi_server/src/{request/itsi_request.rs → ruby_types/itsi_http_request.rs} +101 -117
  44. data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +72 -41
  45. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
  46. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +355 -0
  47. data/ext/itsi_server/src/ruby_types/itsi_server.rs +82 -0
  48. data/ext/itsi_server/src/ruby_types/mod.rs +55 -0
  49. data/ext/itsi_server/src/server/bind.rs +13 -5
  50. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  51. data/ext/itsi_server/src/server/cache_store.rs +74 -0
  52. data/ext/itsi_server/src/server/itsi_service.rs +172 -0
  53. data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
  54. data/ext/itsi_server/src/server/listener.rs +102 -2
  55. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +153 -0
  56. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +47 -0
  57. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +58 -0
  58. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +82 -0
  59. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +321 -0
  60. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +139 -0
  61. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +300 -0
  62. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +287 -0
  63. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +48 -0
  64. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +127 -0
  65. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +191 -0
  66. data/ext/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +72 -0
  67. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +85 -0
  68. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +195 -0
  69. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  70. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +82 -0
  71. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +216 -0
  72. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +124 -0
  73. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
  74. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +43 -0
  75. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +34 -0
  76. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +93 -0
  77. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +162 -0
  78. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +158 -0
  79. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
  80. data/ext/itsi_server/src/server/middleware_stack/mod.rs +315 -0
  81. data/ext/itsi_server/src/server/mod.rs +8 -1
  82. data/ext/itsi_server/src/server/process_worker.rs +38 -12
  83. data/ext/itsi_server/src/server/rate_limiter.rs +565 -0
  84. data/ext/itsi_server/src/server/request_job.rs +11 -0
  85. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +119 -42
  86. data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
  87. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +256 -111
  88. data/ext/itsi_server/src/server/signal.rs +19 -0
  89. data/ext/itsi_server/src/server/static_file_server.rs +984 -0
  90. data/ext/itsi_server/src/server/thread_worker.rs +139 -94
  91. data/ext/itsi_server/src/server/types.rs +43 -0
  92. data/ext/itsi_tracing/Cargo.toml +1 -0
  93. data/ext/itsi_tracing/src/lib.rs +216 -45
  94. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  95. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  96. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  97. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  98. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  99. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  100. data/lib/itsi/{request.rb → http_request.rb} +29 -5
  101. data/lib/itsi/http_response.rb +39 -0
  102. data/lib/itsi/server/Itsi.rb +11 -19
  103. data/lib/itsi/server/config/dsl.rb +506 -0
  104. data/lib/itsi/server/config.rb +103 -8
  105. data/lib/itsi/server/default_app/default_app.rb +38 -0
  106. data/lib/itsi/server/grpc_interface.rb +213 -0
  107. data/lib/itsi/server/rack/handler/itsi.rb +8 -17
  108. data/lib/itsi/server/rack_interface.rb +23 -4
  109. data/lib/itsi/server/scheduler_interface.rb +1 -1
  110. data/lib/itsi/server/scheduler_mode.rb +4 -0
  111. data/lib/itsi/server/signal_trap.rb +7 -1
  112. data/lib/itsi/server/version.rb +1 -1
  113. data/lib/itsi/server.rb +74 -63
  114. data/lib/itsi/standard_headers.rb +86 -0
  115. metadata +84 -15
  116. data/ext/itsi_scheduler/extconf.rb +0 -6
  117. data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
  118. data/ext/itsi_server/src/request/mod.rs +0 -1
  119. data/ext/itsi_server/src/response/mod.rs +0 -1
  120. data/ext/itsi_server/src/server/itsi_server.rs +0 -288
  121. data/lib/itsi/server/options_dsl.rb +0 -401
  122. data/lib/itsi/stream_io.rb +0 -38
  123. /data/lib/itsi/{index.html → server/default_app/index.html} +0 -0
@@ -1,36 +1,43 @@
1
1
  use crate::{
2
- request::itsi_request::ItsiRequest,
2
+ ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
3
3
  server::{
4
4
  io_stream::IoStream,
5
- itsi_server::{RequestJob, Server},
5
+ itsi_service::{IstiServiceInner, ItsiService},
6
6
  lifecycle_event::LifecycleEvent,
7
- listener::{Listener, ListenerInfo},
7
+ listener::ListenerInfo,
8
+ request_job::RequestJob,
9
+ signal::SIGNAL_HANDLER_CHANNEL,
8
10
  thread_worker::{build_thread_workers, ThreadWorker},
9
11
  },
10
12
  };
11
- use http::Request;
12
- use hyper::{body::Incoming, service::service_fn};
13
13
  use hyper_util::{
14
14
  rt::{TokioExecutor, TokioIo, TokioTimer},
15
15
  server::conn::auto::Builder,
16
16
  };
17
17
  use itsi_error::{ItsiError, Result};
18
- use itsi_rb_helpers::print_rb_backtrace;
18
+ use itsi_rb_helpers::{
19
+ call_with_gvl, call_without_gvl, create_ruby_thread, funcall_no_ret, print_rb_backtrace,
20
+ };
19
21
  use itsi_tracing::{debug, error, info};
22
+ use magnus::value::ReprValue;
20
23
  use nix::unistd::Pid;
21
- use parking_lot::Mutex;
24
+ use parking_lot::RwLock;
22
25
  use std::{
26
+ collections::HashMap,
23
27
  num::NonZeroU8,
24
- panic,
25
28
  pin::Pin,
26
- sync::Arc,
27
- time::{Duration, Instant},
29
+ sync::{
30
+ atomic::{AtomicBool, Ordering},
31
+ Arc,
32
+ },
33
+ thread::sleep,
34
+ time::{Duration, Instant, SystemTime, UNIX_EPOCH},
28
35
  };
29
36
  use tokio::{
30
37
  runtime::{Builder as RuntimeBuilder, Runtime},
31
38
  sync::{
32
39
  broadcast,
33
- watch::{self, Sender},
40
+ watch::{self},
34
41
  },
35
42
  task::JoinSet,
36
43
  };
@@ -38,11 +45,10 @@ use tracing::instrument;
38
45
 
39
46
  pub struct SingleMode {
40
47
  pub executor: Builder<TokioExecutor>,
41
- pub server: Arc<Server>,
42
- pub sender: async_channel::Sender<RequestJob>,
43
- pub(crate) listeners: Mutex<Vec<Listener>>,
44
- pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
48
+ pub server_config: Arc<ItsiServerConfig>,
45
49
  pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
50
+ pub restart_requested: AtomicBool,
51
+ pub status: RwLock<HashMap<u8, (u64, u64)>>,
46
52
  }
47
53
 
48
54
  pub enum RunningPhase {
@@ -52,36 +58,29 @@ pub enum RunningPhase {
52
58
  }
53
59
 
54
60
  impl SingleMode {
55
- #[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
56
- pub(crate) fn new(
57
- server: Arc<Server>,
58
- listeners: Vec<Listener>,
59
- lifecycle_channel: broadcast::Sender<LifecycleEvent>,
60
- ) -> Result<Self> {
61
- let (thread_workers, sender) = build_thread_workers(
62
- server.clone(),
63
- Pid::this(),
64
- NonZeroU8::try_from(server.threads).unwrap(),
65
- server.app.clone(),
66
- server.scheduler_class.clone(),
67
- )
68
- .inspect_err(|e| {
69
- if let Some(err_val) = e.value() {
70
- print_rb_backtrace(err_val);
71
- }
72
- })?;
61
+ #[instrument(parent=None, skip_all)]
62
+ pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
63
+ server_config.server_params.read().preload_ruby()?;
73
64
  Ok(Self {
74
65
  executor: Builder::new(TokioExecutor::new()),
75
- listeners: Mutex::new(listeners),
76
- server,
77
- sender,
78
- thread_workers,
79
- lifecycle_channel,
66
+ server_config,
67
+ lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
68
+ restart_requested: AtomicBool::new(false),
69
+ status: RwLock::new(HashMap::new()),
80
70
  })
81
71
  }
82
72
 
83
73
  pub fn build_runtime(&self) -> Runtime {
84
- let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
74
+ let mut builder: RuntimeBuilder = if self
75
+ .server_config
76
+ .server_params
77
+ .read()
78
+ .multithreaded_reactor
79
+ {
80
+ RuntimeBuilder::new_multi_thread()
81
+ } else {
82
+ RuntimeBuilder::new_current_thread()
83
+ };
85
84
  builder
86
85
  .thread_name("itsi-server-accept-loop")
87
86
  .thread_stack_size(3 * 1024 * 1024)
@@ -96,27 +95,171 @@ impl SingleMode {
96
95
  Ok(())
97
96
  }
98
97
 
99
- #[instrument(parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
98
+ pub async fn print_info(&self, thread_workers: Arc<Vec<Arc<ThreadWorker>>>) -> Result<()> {
99
+ println!(" └─ Worker");
100
+ println!(
101
+ " - binds: {:?}",
102
+ self.server_config.server_params.read().binds
103
+ );
104
+
105
+ println!(
106
+ " ─ script_name: {:?}",
107
+ self.server_config.server_params.read().script_name
108
+ );
109
+ println!(
110
+ " ─ streaming body: {:?}",
111
+ self.server_config.server_params.read().streamable_body
112
+ );
113
+ println!(
114
+ " ─ multithreaded runtime: {:?}",
115
+ self.server_config
116
+ .server_params
117
+ .read()
118
+ .multithreaded_reactor
119
+ );
120
+ println!(
121
+ " ─ scheduler: {:?}",
122
+ self.server_config.server_params.read().scheduler_class
123
+ );
124
+ println!(
125
+ " ─ OOB GC Response threadhold: {:?}",
126
+ self.server_config
127
+ .server_params
128
+ .read()
129
+ .oob_gc_responses_threshold
130
+ );
131
+ for worker in thread_workers.iter() {
132
+ println!(" └─ - Thread : {:?}", worker.id);
133
+ println!(" - # Requests Processed: {:?}", worker.request_id);
134
+ println!(
135
+ " - Last Request Started: {:?} ago",
136
+ if worker.current_request_start.load(Ordering::Relaxed) == 0 {
137
+ Duration::from_secs(0)
138
+ } else {
139
+ SystemTime::now()
140
+ .duration_since(
141
+ UNIX_EPOCH
142
+ + Duration::from_secs(
143
+ worker.current_request_start.load(Ordering::Relaxed),
144
+ ),
145
+ )
146
+ .unwrap_or(Duration::from_secs(0))
147
+ }
148
+ );
149
+ call_with_gvl(|_| {
150
+ if let Some(thread) = worker.thread.read().as_ref() {
151
+ if let Ok(backtrace) = thread.funcall::<_, _, Vec<String>>("backtrace", ()) {
152
+ println!(" - Backtrace:");
153
+ for line in backtrace {
154
+ println!(" - {}", line);
155
+ }
156
+ }
157
+ }
158
+ })
159
+ }
160
+
161
+ Ok(())
162
+ }
163
+
164
+ pub fn start_monitors(
165
+ self: Arc<Self>,
166
+ thread_workers: Arc<Vec<Arc<ThreadWorker>>>,
167
+ ) -> magnus::Thread {
168
+ call_with_gvl(move |_| {
169
+ create_ruby_thread(move || {
170
+ call_without_gvl(move || {
171
+ let monitor_runtime = RuntimeBuilder::new_current_thread()
172
+ .enable_time()
173
+ .build()
174
+ .unwrap();
175
+ let receiver = self.clone();
176
+ monitor_runtime.block_on({
177
+ let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
178
+ let receiver = receiver.clone();
179
+ let thread_workers = thread_workers.clone();
180
+ async move {
181
+ loop {
182
+ tokio::select! {
183
+ _ = tokio::time::sleep(Duration::from_secs(1)) => {
184
+ let mut status_lock = receiver.status.write();
185
+ thread_workers.iter().for_each(|worker| {
186
+ let worker_entry = status_lock.entry(worker.id);
187
+ let data = (
188
+ worker.request_id.load(Ordering::Relaxed),
189
+ worker.current_request_start.load(Ordering::Relaxed),
190
+ );
191
+ worker_entry.or_insert(data);
192
+ });
193
+ }
194
+ lifecycle_event = lifecycle_rx.recv() => {
195
+ match lifecycle_event {
196
+ Ok(LifecycleEvent::Restart) => {
197
+ receiver.restart().ok();
198
+ }
199
+ Ok(LifecycleEvent::Reload) => {
200
+ receiver.reload().ok();
201
+ }
202
+ Ok(LifecycleEvent::Shutdown) => {
203
+ break;
204
+ }
205
+ Ok(LifecycleEvent::PrintInfo) => {
206
+ receiver.print_info(thread_workers.clone()).await.ok();
207
+ }
208
+ _ => {}
209
+ }
210
+ }
211
+ }
212
+ }
213
+ }
214
+ })
215
+ })
216
+ })
217
+ })
218
+ }
219
+
220
+ #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
100
221
  pub fn run(self: Arc<Self>) -> Result<()> {
101
222
  let mut listener_task_set = JoinSet::new();
102
223
  let runtime = self.build_runtime();
103
224
 
104
- runtime.block_on(async {
105
- let tokio_listeners = self
106
- .listeners.lock()
225
+ let (thread_workers, job_sender) = build_thread_workers(
226
+ self.server_config.server_params.read().clone(),
227
+ Pid::this(),
228
+ NonZeroU8::try_from(self.server_config.server_params.read().threads).unwrap(),
229
+ )
230
+ .inspect_err(|e| {
231
+ if let Some(err_val) = e.value() {
232
+ print_rb_backtrace(err_val);
233
+ }
234
+ })?;
235
+
236
+ info!(
237
+ pid = format!("{}", Pid::this()),
238
+ threads = thread_workers.len(),
239
+ binds = format!("{:?}", self.server_config.server_params.read().binds)
240
+ );
241
+
242
+ let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
243
+ let thread = self.clone().start_monitors(thread_workers.clone());
244
+ runtime.block_on(
245
+ async {
246
+ let server_params = self.server_config.server_params.read().clone();
247
+ server_params.middleware.get().unwrap().initialize_layers().await?;
248
+ let tokio_listeners = server_params.listeners.lock()
107
249
  .drain(..)
108
250
  .map(|list| {
109
251
  Arc::new(list.into_tokio_listener())
110
252
  })
111
253
  .collect::<Vec<_>>();
112
- let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
254
+
113
255
  for listener in tokio_listeners.iter() {
114
256
  let mut lifecycle_rx = self.lifecycle_channel.subscribe();
115
257
  let listener_info = Arc::new(listener.listener_info());
116
258
  let self_ref = self.clone();
117
259
  let listener = listener.clone();
118
260
  let shutdown_sender = shutdown_sender.clone();
119
-
261
+ let job_sender = job_sender.clone();
262
+ let workers_clone = thread_workers.clone();
120
263
  let listener_clone = listener.clone();
121
264
  let mut shutdown_receiver = shutdown_sender.subscribe();
122
265
  let shutdown_receiver_clone = shutdown_receiver.clone();
@@ -134,8 +277,9 @@ impl SingleMode {
134
277
  let strategy = strategy_clone.clone();
135
278
  let listener_info = listener_info.clone();
136
279
  let shutdown_receiver = shutdown_receiver.clone();
280
+ let job_sender = job_sender.clone();
137
281
  acceptor_task_set.spawn(async move {
138
- strategy.serve_connection(accept_result, listener_info, shutdown_receiver).await;
282
+ strategy.serve_connection(accept_result, job_sender, listener_info, shutdown_receiver).await;
139
283
  });
140
284
  },
141
285
  Err(e) => debug!("Listener.accept failed {:?}", e),
@@ -144,16 +288,18 @@ impl SingleMode {
144
288
  break;
145
289
  }
146
290
  lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
147
- Ok(lifecycle_event) => {
148
- if let Err(e) = self_ref.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
149
- match e {
150
- ItsiError::Break() => break,
151
- _ => error!("Error in handle_lifecycle_event {:?}", e)
152
- }
291
+ Ok(LifecycleEvent::Shutdown) => {
292
+ shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
293
+ // Tell any in-progress connections to stop accepting new requests
294
+ tokio::time::sleep(Duration::from_millis(25)).await;
295
+ // Tell workers to stop processing requests once they've flushed their buffers.
296
+ for _i in 0..workers_clone.len() {
297
+ job_sender.send(RequestJob::Shutdown).await.unwrap();
153
298
  }
154
-
299
+ break;
155
300
  },
156
301
  Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
302
+ _ => {}
157
303
  }
158
304
  }
159
305
  }
@@ -164,8 +310,31 @@ impl SingleMode {
164
310
 
165
311
  while let Some(_res) = listener_task_set.join_next().await {}
166
312
 
167
- });
313
+ Ok::<(), ItsiError>(())
314
+ })?;
315
+
316
+ shutdown_sender.send(RunningPhase::Shutdown).ok();
317
+ let deadline = Instant::now()
318
+ + Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
319
+
168
320
  runtime.shutdown_timeout(Duration::from_millis(100));
321
+
322
+ loop {
323
+ if thread_workers
324
+ .iter()
325
+ .all(|worker| call_with_gvl(move |_| !worker.poll_shutdown(deadline)))
326
+ {
327
+ funcall_no_ret(thread, "join", ()).ok();
328
+ break;
329
+ }
330
+ sleep(Duration::from_millis(50));
331
+ }
332
+
333
+ if self.restart_requested.load(Ordering::SeqCst) {
334
+ self.restart_requested.store(false, Ordering::SeqCst);
335
+ info!("Worker restarting");
336
+ self.run()?;
337
+ }
169
338
  debug!("Runtime has shut down");
170
339
  Ok(())
171
340
  }
@@ -173,36 +342,32 @@ impl SingleMode {
173
342
  pub(crate) async fn serve_connection(
174
343
  &self,
175
344
  stream: IoStream,
345
+ job_sender: async_channel::Sender<RequestJob>,
176
346
  listener: Arc<ListenerInfo>,
177
347
  shutdown_channel: watch::Receiver<RunningPhase>,
178
348
  ) {
179
- let sender_clone = self.sender.clone();
180
349
  let addr = stream.addr();
181
350
  let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
182
- let server = self.server.clone();
183
351
  let executor = self.executor.clone();
184
352
  let mut shutdown_channel_clone = shutdown_channel.clone();
185
- let server = server.clone();
186
353
  let mut executor = executor.clone();
187
354
  let mut binding = executor.http1();
188
355
  let shutdown_channel = shutdown_channel_clone.clone();
356
+
357
+ let service = ItsiService {
358
+ inner: Arc::new(IstiServiceInner {
359
+ sender: job_sender.clone(),
360
+ server_params: self.server_config.server_params.read().clone(),
361
+ listener,
362
+ addr: addr.to_string(),
363
+ shutdown_channel: shutdown_channel.clone(),
364
+ }),
365
+ };
189
366
  let mut serve = Box::pin(
190
367
  binding
191
- .timer(TokioTimer::new())
368
+ .timer(TokioTimer::new()) // your existing timer
192
369
  .header_read_timeout(Duration::from_secs(1))
193
- .serve_connection_with_upgrades(
194
- io,
195
- service_fn(move |hyper_request: Request<Incoming>| {
196
- ItsiRequest::process_request(
197
- hyper_request,
198
- sender_clone.clone(),
199
- server.clone(),
200
- listener.clone(),
201
- addr.clone(),
202
- shutdown_channel.clone(),
203
- )
204
- }),
205
- ),
370
+ .serve_connection_with_upgrades(io, service),
206
371
  );
207
372
 
208
373
  tokio::select! {
@@ -213,7 +378,7 @@ impl SingleMode {
213
378
  debug!("Connection closed normally")
214
379
  },
215
380
  Err(res) => {
216
- debug!("Connection finished with error: {:?}", res)
381
+ debug!("Connection closed abruptly: {:?}", res)
217
382
  }
218
383
  }
219
384
  serve.as_mut().graceful_shutdown();
@@ -221,6 +386,7 @@ impl SingleMode {
221
386
  // A lifecycle event triggers shutdown.
222
387
  _ = shutdown_channel_clone.changed() => {
223
388
  // Initiate graceful shutdown.
389
+ info!("Starting graceful shutdown");
224
390
  serve.as_mut().graceful_shutdown();
225
391
 
226
392
  // Now await the connection to finish shutting down.
@@ -231,46 +397,25 @@ impl SingleMode {
231
397
  }
232
398
  }
233
399
 
234
- pub async fn handle_lifecycle_event(
235
- &self,
236
- lifecycle_event: LifecycleEvent,
237
- shutdown_sender: Sender<RunningPhase>,
238
- ) -> Result<()> {
239
- info!("Handling lifecycle event: {:?}", lifecycle_event);
240
- if let LifecycleEvent::Shutdown = lifecycle_event {
241
- //1. Stop accepting new connections.
242
- shutdown_sender.send(RunningPhase::ShutdownPending).ok();
243
- tokio::time::sleep(Duration::from_millis(25)).await;
244
-
245
- //2. Break out of work queues.
246
- for worker in &*self.thread_workers {
247
- worker.request_shutdown().await;
248
- }
249
-
250
- tokio::time::sleep(Duration::from_millis(25)).await;
251
-
252
- //3. Wait for all threads to finish.
253
- let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
254
- while Instant::now() < deadline {
255
- let alive_threads = self
256
- .thread_workers
257
- .iter()
258
- .filter(|worker| worker.poll_shutdown(deadline))
259
- .count();
260
- if alive_threads == 0 {
261
- break;
262
- }
263
- tokio::time::sleep(Duration::from_millis(200)).await;
264
- }
265
-
266
- //4. Force shutdown any stragglers
267
- shutdown_sender.send(RunningPhase::Shutdown).ok();
268
- self.thread_workers.iter().for_each(|worker| {
269
- worker.poll_shutdown(deadline);
270
- });
271
-
272
- return Err(ItsiError::Break());
400
+ /// Attempts to reload the config "live"
401
+ /// Not that when running in single mode this will not unload
402
+ /// old code. If you need a clean restart, use the `restart` (SIGUSR2) method instead
403
+ pub fn reload(&self) -> Result<()> {
404
+ let should_reexec = self.server_config.clone().reload(false)?;
405
+ if should_reexec {
406
+ self.server_config.dup_fds()?;
407
+ self.server_config.reload_exec()?;
273
408
  }
409
+ self.restart_requested.store(true, Ordering::SeqCst);
410
+ self.stop()?;
411
+ self.server_config.server_params.read().preload_ruby()?;
412
+ Ok(())
413
+ }
414
+
415
+ /// Restart the server while keeping connections open.
416
+ pub fn restart(&self) -> Result<()> {
417
+ self.server_config.dup_fds()?;
418
+ self.server_config.reload_exec()?;
274
419
  Ok(())
275
420
  }
276
421
  }
@@ -30,9 +30,18 @@ fn receive_signal(signum: i32, _: sighandler_t) {
30
30
  .ok();
31
31
  }
32
32
  }
33
+ libc::SIGINFO => {
34
+ SIGNAL_HANDLER_CHANNEL
35
+ .0
36
+ .send(LifecycleEvent::PrintInfo)
37
+ .ok();
38
+ }
33
39
  libc::SIGUSR1 => {
34
40
  SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Restart).ok();
35
41
  }
42
+ libc::SIGUSR2 => {
43
+ SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Reload).ok();
44
+ }
36
45
  libc::SIGTTIN => {
37
46
  SIGNAL_HANDLER_CHANNEL
38
47
  .0
@@ -45,6 +54,12 @@ fn receive_signal(signum: i32, _: sighandler_t) {
45
54
  .send(LifecycleEvent::DecreaseWorkers)
46
55
  .ok();
47
56
  }
57
+ libc::SIGCHLD => {
58
+ SIGNAL_HANDLER_CHANNEL
59
+ .0
60
+ .send(LifecycleEvent::ChildTerminated)
61
+ .ok();
62
+ }
48
63
  _ => {}
49
64
  }
50
65
  }
@@ -54,10 +69,12 @@ pub fn reset_signal_handlers() -> bool {
54
69
  unsafe {
55
70
  libc::signal(libc::SIGTERM, receive_signal as usize);
56
71
  libc::signal(libc::SIGINT, receive_signal as usize);
72
+ libc::signal(libc::SIGINFO, receive_signal as usize);
57
73
  libc::signal(libc::SIGUSR1, receive_signal as usize);
58
74
  libc::signal(libc::SIGUSR2, receive_signal as usize);
59
75
  libc::signal(libc::SIGTTIN, receive_signal as usize);
60
76
  libc::signal(libc::SIGTTOU, receive_signal as usize);
77
+ libc::signal(libc::SIGCHLD, receive_signal as usize);
61
78
  }
62
79
  true
63
80
  }
@@ -66,9 +83,11 @@ pub fn clear_signal_handlers() {
66
83
  unsafe {
67
84
  libc::signal(libc::SIGTERM, libc::SIG_DFL);
68
85
  libc::signal(libc::SIGINT, libc::SIG_DFL);
86
+ libc::signal(libc::SIGINFO, libc::SIG_DFL);
69
87
  libc::signal(libc::SIGUSR1, libc::SIG_DFL);
70
88
  libc::signal(libc::SIGUSR2, libc::SIG_DFL);
71
89
  libc::signal(libc::SIGTTIN, libc::SIG_DFL);
72
90
  libc::signal(libc::SIGTTOU, libc::SIG_DFL);
91
+ libc::signal(libc::SIGCHLD, libc::SIG_DFL);
73
92
  }
74
93
  }