itsi 0.1.14 → 0.1.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +124 -109
  3. data/Cargo.toml +6 -0
  4. data/crates/itsi_error/Cargo.toml +1 -0
  5. data/crates/itsi_error/src/lib.rs +100 -10
  6. data/crates/itsi_scheduler/src/itsi_scheduler.rs +1 -1
  7. data/crates/itsi_server/Cargo.toml +8 -10
  8. data/crates/itsi_server/src/default_responses/html/401.html +68 -0
  9. data/crates/itsi_server/src/default_responses/html/403.html +68 -0
  10. data/crates/itsi_server/src/default_responses/html/404.html +68 -0
  11. data/crates/itsi_server/src/default_responses/html/413.html +71 -0
  12. data/crates/itsi_server/src/default_responses/html/429.html +68 -0
  13. data/crates/itsi_server/src/default_responses/html/500.html +71 -0
  14. data/crates/itsi_server/src/default_responses/html/502.html +71 -0
  15. data/crates/itsi_server/src/default_responses/html/503.html +68 -0
  16. data/crates/itsi_server/src/default_responses/html/504.html +69 -0
  17. data/crates/itsi_server/src/default_responses/html/index.html +238 -0
  18. data/crates/itsi_server/src/default_responses/json/401.json +6 -0
  19. data/crates/itsi_server/src/default_responses/json/403.json +6 -0
  20. data/crates/itsi_server/src/default_responses/json/404.json +6 -0
  21. data/crates/itsi_server/src/default_responses/json/413.json +6 -0
  22. data/crates/itsi_server/src/default_responses/json/429.json +6 -0
  23. data/crates/itsi_server/src/default_responses/json/500.json +6 -0
  24. data/crates/itsi_server/src/default_responses/json/502.json +6 -0
  25. data/crates/itsi_server/src/default_responses/json/503.json +6 -0
  26. data/crates/itsi_server/src/default_responses/json/504.json +6 -0
  27. data/crates/itsi_server/src/default_responses/mod.rs +11 -0
  28. data/crates/itsi_server/src/lib.rs +58 -26
  29. data/crates/itsi_server/src/prelude.rs +2 -0
  30. data/crates/itsi_server/src/ruby_types/README.md +21 -0
  31. data/crates/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +8 -6
  32. data/crates/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
  33. data/crates/itsi_server/src/ruby_types/{itsi_grpc_stream → itsi_grpc_response_stream}/mod.rs +121 -73
  34. data/crates/itsi_server/src/ruby_types/itsi_http_request.rs +103 -40
  35. data/crates/itsi_server/src/ruby_types/itsi_http_response.rs +8 -5
  36. data/crates/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +4 -4
  37. data/crates/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +37 -17
  38. data/crates/itsi_server/src/ruby_types/itsi_server.rs +4 -3
  39. data/crates/itsi_server/src/ruby_types/mod.rs +6 -13
  40. data/crates/itsi_server/src/server/{bind.rs → binds/bind.rs} +23 -4
  41. data/crates/itsi_server/src/server/{listener.rs → binds/listener.rs} +24 -10
  42. data/crates/itsi_server/src/server/binds/mod.rs +4 -0
  43. data/crates/itsi_server/src/server/{tls.rs → binds/tls.rs} +9 -4
  44. data/crates/itsi_server/src/server/http_message_types.rs +97 -0
  45. data/crates/itsi_server/src/server/io_stream.rs +2 -1
  46. data/crates/itsi_server/src/server/middleware_stack/middleware.rs +28 -16
  47. data/crates/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +17 -8
  48. data/crates/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +47 -18
  49. data/crates/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +13 -9
  50. data/crates/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +50 -29
  51. data/crates/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +5 -2
  52. data/crates/itsi_server/src/server/middleware_stack/middlewares/compression.rs +37 -48
  53. data/crates/itsi_server/src/server/middleware_stack/middlewares/cors.rs +25 -20
  54. data/crates/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +14 -7
  55. data/crates/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +190 -0
  56. data/crates/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +125 -95
  57. data/crates/itsi_server/src/server/middleware_stack/middlewares/etag.rs +9 -5
  58. data/crates/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +1 -4
  59. data/crates/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +25 -19
  60. data/crates/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +4 -4
  61. data/crates/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
  62. data/crates/itsi_server/src/server/middleware_stack/middlewares/mod.rs +9 -4
  63. data/crates/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +260 -62
  64. data/crates/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +29 -22
  65. data/crates/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +6 -6
  66. data/crates/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +6 -5
  67. data/crates/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +4 -2
  68. data/crates/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +51 -18
  69. data/crates/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +31 -13
  70. data/crates/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
  71. data/crates/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +13 -8
  72. data/crates/itsi_server/src/server/middleware_stack/mod.rs +101 -69
  73. data/crates/itsi_server/src/server/mod.rs +3 -9
  74. data/crates/itsi_server/src/server/process_worker.rs +21 -3
  75. data/crates/itsi_server/src/server/request_job.rs +2 -2
  76. data/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs +8 -3
  77. data/crates/itsi_server/src/server/serve_strategy/single_mode.rs +26 -26
  78. data/crates/itsi_server/src/server/signal.rs +24 -41
  79. data/crates/itsi_server/src/server/size_limited_incoming.rs +101 -0
  80. data/crates/itsi_server/src/server/thread_worker.rs +59 -28
  81. data/crates/itsi_server/src/services/itsi_http_service.rs +239 -0
  82. data/crates/itsi_server/src/services/mime_types.rs +1416 -0
  83. data/crates/itsi_server/src/services/mod.rs +6 -0
  84. data/crates/itsi_server/src/services/password_hasher.rs +83 -0
  85. data/crates/itsi_server/src/{server → services}/rate_limiter.rs +35 -31
  86. data/crates/itsi_server/src/{server → services}/static_file_server.rs +521 -181
  87. data/crates/itsi_tracing/src/lib.rs +145 -55
  88. data/{Itsi.rb → foo/Itsi.rb} +6 -9
  89. data/gems/scheduler/Cargo.lock +7 -0
  90. data/gems/scheduler/lib/itsi/scheduler/version.rb +1 -1
  91. data/gems/scheduler/test/helpers/test_helper.rb +0 -1
  92. data/gems/scheduler/test/test_address_resolve.rb +0 -1
  93. data/gems/scheduler/test/test_network_io.rb +1 -1
  94. data/gems/scheduler/test/test_process_wait.rb +0 -1
  95. data/gems/server/Cargo.lock +124 -109
  96. data/gems/server/exe/itsi +65 -19
  97. data/gems/server/itsi-server.gemspec +4 -3
  98. data/gems/server/lib/itsi/http_request/response_status_shortcodes.rb +74 -0
  99. data/gems/server/lib/itsi/http_request.rb +116 -17
  100. data/gems/server/lib/itsi/http_response.rb +2 -0
  101. data/gems/server/lib/itsi/passfile.rb +109 -0
  102. data/gems/server/lib/itsi/server/config/dsl.rb +160 -101
  103. data/gems/server/lib/itsi/server/config.rb +58 -23
  104. data/gems/server/lib/itsi/server/default_app/default_app.rb +25 -29
  105. data/gems/server/lib/itsi/server/default_app/index.html +113 -89
  106. data/gems/server/lib/itsi/server/{Itsi.rb → default_config/Itsi-rackup.rb} +1 -1
  107. data/gems/server/lib/itsi/server/default_config/Itsi.rb +107 -0
  108. data/gems/server/lib/itsi/server/grpc/grpc_call.rb +246 -0
  109. data/gems/server/lib/itsi/server/grpc/grpc_interface.rb +100 -0
  110. data/gems/server/lib/itsi/server/grpc/reflection/v1/reflection_pb.rb +26 -0
  111. data/gems/server/lib/itsi/server/grpc/reflection/v1/reflection_services_pb.rb +122 -0
  112. data/gems/server/lib/itsi/server/route_tester.rb +107 -0
  113. data/gems/server/lib/itsi/server/typed_handlers/param_parser.rb +200 -0
  114. data/gems/server/lib/itsi/server/typed_handlers/source_parser.rb +55 -0
  115. data/gems/server/lib/itsi/server/typed_handlers.rb +17 -0
  116. data/gems/server/lib/itsi/server/version.rb +1 -1
  117. data/gems/server/lib/itsi/server.rb +82 -12
  118. data/gems/server/lib/ruby_lsp/itsi/addon.rb +111 -0
  119. data/gems/server/lib/shell_completions/completions.rb +26 -0
  120. data/gems/server/test/helpers/test_helper.rb +2 -1
  121. data/lib/itsi/version.rb +1 -1
  122. data/sandbox/README.md +5 -0
  123. data/sandbox/itsi_file/Gemfile +4 -2
  124. data/sandbox/itsi_file/Gemfile.lock +48 -6
  125. data/sandbox/itsi_file/Itsi.rb +326 -129
  126. data/sandbox/itsi_file/call.json +1 -0
  127. data/sandbox/itsi_file/echo_client/Gemfile +10 -0
  128. data/sandbox/itsi_file/echo_client/Gemfile.lock +27 -0
  129. data/sandbox/itsi_file/echo_client/README.md +95 -0
  130. data/sandbox/itsi_file/echo_client/echo_client.rb +164 -0
  131. data/sandbox/itsi_file/echo_client/gen_proto.sh +17 -0
  132. data/sandbox/itsi_file/echo_client/lib/echo_pb.rb +16 -0
  133. data/sandbox/itsi_file/echo_client/lib/echo_services_pb.rb +29 -0
  134. data/sandbox/itsi_file/echo_client/run_client.rb +64 -0
  135. data/sandbox/itsi_file/echo_client/test_compressions.sh +20 -0
  136. data/sandbox/itsi_file/echo_service_nonitsi/Gemfile +10 -0
  137. data/sandbox/itsi_file/echo_service_nonitsi/Gemfile.lock +79 -0
  138. data/sandbox/itsi_file/echo_service_nonitsi/echo.proto +26 -0
  139. data/sandbox/itsi_file/echo_service_nonitsi/echo_pb.rb +16 -0
  140. data/sandbox/itsi_file/echo_service_nonitsi/echo_services_pb.rb +29 -0
  141. data/sandbox/itsi_file/echo_service_nonitsi/server.rb +52 -0
  142. data/sandbox/itsi_sandbox_async/config.ru +0 -1
  143. data/sandbox/itsi_sandbox_rack/Gemfile.lock +2 -2
  144. data/sandbox/itsi_sandbox_rails/Gemfile +2 -2
  145. data/sandbox/itsi_sandbox_rails/Gemfile.lock +76 -2
  146. data/sandbox/itsi_sandbox_rails/app/controllers/home_controller.rb +15 -0
  147. data/sandbox/itsi_sandbox_rails/config/environments/development.rb +1 -0
  148. data/sandbox/itsi_sandbox_rails/config/environments/production.rb +1 -0
  149. data/sandbox/itsi_sandbox_rails/config/routes.rb +2 -0
  150. data/sandbox/itsi_sinatra/app.rb +0 -1
  151. data/sandbox/static_files/.env +1 -0
  152. data/sandbox/static_files/404.html +25 -0
  153. data/sandbox/static_files/_DSC0102.NEF.jpg +0 -0
  154. data/sandbox/static_files/about.html +68 -0
  155. data/sandbox/static_files/tiny.html +1 -0
  156. data/sandbox/static_files/writebook.zip +0 -0
  157. data/tasks.txt +28 -33
  158. metadata +87 -26
  159. data/crates/itsi_error/src/from.rs +0 -68
  160. data/crates/itsi_server/src/ruby_types/itsi_grpc_request.rs +0 -147
  161. data/crates/itsi_server/src/ruby_types/itsi_grpc_response.rs +0 -19
  162. data/crates/itsi_server/src/server/itsi_service.rs +0 -172
  163. data/crates/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +0 -72
  164. data/crates/itsi_server/src/server/types.rs +0 -43
  165. data/gems/server/lib/itsi/server/grpc_interface.rb +0 -213
  166. data/sandbox/itsi_file/public/assets/index.html +0 -1
  167. /data/crates/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
  168. /data/crates/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +0 -0
  169. /data/crates/itsi_server/src/{server → services}/cache_store.rs +0 -0
@@ -1,4 +1,5 @@
1
1
  use super::serve_strategy::{cluster_mode::ClusterMode, single_mode::SingleMode};
2
+ use core_affinity::CoreId;
2
3
  use itsi_error::{ItsiError, Result};
3
4
  use itsi_rb_helpers::{call_with_gvl, call_without_gvl, create_ruby_thread, fork};
4
5
  use itsi_tracing::error;
@@ -16,7 +17,7 @@ use nix::{
16
17
  use parking_lot::Mutex;
17
18
  use std::{
18
19
  process::{self, exit},
19
- sync::Arc,
20
+ sync::{Arc, LazyLock},
20
21
  time::{Duration, Instant},
21
22
  };
22
23
  use sysinfo::System;
@@ -41,6 +42,8 @@ impl Default for ProcessWorker {
41
42
  }
42
43
  }
43
44
 
45
+ static CORE_IDS: LazyLock<Vec<CoreId>> = LazyLock::new(|| core_affinity::get_core_ids().unwrap());
46
+
44
47
  impl ProcessWorker {
45
48
  #[instrument(skip(self, cluster_template), fields(self.worker_id = %self.worker_id))]
46
49
  pub(crate) fn boot(&self, cluster_template: Arc<ClusterMode>) -> Result<()> {
@@ -77,6 +80,16 @@ impl ProcessWorker {
77
80
  }
78
81
  match SingleMode::new(cluster_template.server_config.clone()) {
79
82
  Ok(single_mode) => {
83
+ if cluster_template
84
+ .server_config
85
+ .server_params
86
+ .read()
87
+ .pin_worker_cores
88
+ {
89
+ core_affinity::set_for_current(
90
+ CORE_IDS[self.worker_id % CORE_IDS.len()],
91
+ );
92
+ }
80
93
  Arc::new(single_mode).run().ok();
81
94
  }
82
95
  Err(e) => {
@@ -174,8 +187,12 @@ impl ProcessWorker {
174
187
  pub(crate) fn request_shutdown(&self) {
175
188
  let child_pid = *self.child_pid.lock();
176
189
  if let Some(pid) = child_pid {
177
- if let Err(e) = kill(pid, SIGTERM) {
178
- error!("Failed to send SIGTERM to process {}: {}", pid, e);
190
+ if self.is_alive() {
191
+ if let Err(e) = kill(pid, SIGTERM) {
192
+ error!("Failed to send SIGTERM to process {}: {}", pid, e);
193
+ }
194
+ } else {
195
+ error!("Trying to shutdown a dead process");
179
196
  }
180
197
  }
181
198
  }
@@ -184,6 +201,7 @@ impl ProcessWorker {
184
201
  let child_pid = *self.child_pid.lock();
185
202
  if let Some(pid) = child_pid {
186
203
  if self.is_alive() {
204
+ info!("Worker still alive, sending SIGKILL {}", pid);
187
205
  if let Err(e) = kill(pid, SIGKILL) {
188
206
  error!("Failed to force kill process {}: {}", pid, e);
189
207
  }
@@ -1,4 +1,4 @@
1
- use crate::ruby_types::{itsi_grpc_request::ItsiGrpcRequest, itsi_http_request::ItsiHttpRequest};
1
+ use crate::ruby_types::{itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest};
2
2
  use itsi_rb_helpers::HeapValue;
3
3
  use magnus::block::Proc;
4
4
  use std::sync::Arc;
@@ -6,6 +6,6 @@ use std::sync::Arc;
6
6
  #[derive(Debug)]
7
7
  pub enum RequestJob {
8
8
  ProcessHttpRequest(ItsiHttpRequest, Arc<HeapValue<Proc>>),
9
- ProcessGrpcRequest(ItsiGrpcRequest, Arc<HeapValue<Proc>>),
9
+ ProcessGrpcRequest(ItsiGrpcCall, Arc<HeapValue<Proc>>),
10
10
  Shutdown,
11
11
  }
@@ -202,7 +202,7 @@ impl ClusterMode {
202
202
  }
203
203
  }
204
204
 
205
- Err(ItsiError::Break())
205
+ Err(ItsiError::Break)
206
206
  }
207
207
 
208
208
  pub async fn print_info(self: Arc<Self>) -> Result<()> {
@@ -320,7 +320,7 @@ impl ClusterMode {
320
320
  Ok(lifecycle_event) => {
321
321
  if let Err(e) = self_ref.clone().handle_lifecycle_event(lifecycle_event).await{
322
322
  match e {
323
- ItsiError::Break() => break,
323
+ ItsiError::Break => break,
324
324
  _ => error!("Error in handle_lifecycle_event {:?}", e)
325
325
  }
326
326
  }
@@ -331,7 +331,12 @@ impl ClusterMode {
331
331
  }
332
332
  }
333
333
  });
334
-
334
+ self.server_config
335
+ .server_params
336
+ .write()
337
+ .listeners
338
+ .lock()
339
+ .drain(..);
335
340
  Ok(())
336
341
  }
337
342
  }
@@ -1,14 +1,14 @@
1
1
  use crate::{
2
2
  ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
3
3
  server::{
4
+ binds::listener::ListenerInfo,
4
5
  io_stream::IoStream,
5
- itsi_service::{IstiServiceInner, ItsiService},
6
6
  lifecycle_event::LifecycleEvent,
7
- listener::ListenerInfo,
8
7
  request_job::RequestJob,
9
- signal::SIGNAL_HANDLER_CHANNEL,
8
+ signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
10
9
  thread_worker::{build_thread_workers, ThreadWorker},
11
10
  },
11
+ services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
12
12
  };
13
13
  use hyper_util::{
14
14
  rt::{TokioExecutor, TokioIo, TokioTimer},
@@ -24,7 +24,6 @@ use nix::unistd::Pid;
24
24
  use parking_lot::RwLock;
25
25
  use std::{
26
26
  collections::HashMap,
27
- num::NonZeroU8,
28
27
  pin::Pin,
29
28
  sync::{
30
29
  atomic::{AtomicBool, Ordering},
@@ -102,10 +101,6 @@ impl SingleMode {
102
101
  self.server_config.server_params.read().binds
103
102
  );
104
103
 
105
- println!(
106
- " ─ script_name: {:?}",
107
- self.server_config.server_params.read().script_name
108
- );
109
104
  println!(
110
105
  " ─ streaming body: {:?}",
111
106
  self.server_config.server_params.read().streamable_body
@@ -222,25 +217,24 @@ impl SingleMode {
222
217
  let mut listener_task_set = JoinSet::new();
223
218
  let runtime = self.build_runtime();
224
219
 
225
- let (thread_workers, job_sender) = build_thread_workers(
226
- self.server_config.server_params.read().clone(),
227
- Pid::this(),
228
- NonZeroU8::try_from(self.server_config.server_params.read().threads).unwrap(),
229
- )
230
- .inspect_err(|e| {
231
- if let Some(err_val) = e.value() {
232
- print_rb_backtrace(err_val);
233
- }
234
- })?;
220
+ let (thread_workers, job_sender, nonblocking_sender) =
221
+ build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
222
+ .inspect_err(|e| {
223
+ if let Some(err_val) = e.value() {
224
+ print_rb_backtrace(err_val);
225
+ }
226
+ })?;
235
227
 
236
228
  info!(
237
- pid = format!("{}", Pid::this()),
238
229
  threads = thread_workers.len(),
239
230
  binds = format!("{:?}", self.server_config.server_params.read().binds)
240
231
  );
241
232
 
242
233
  let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
243
234
  let thread = self.clone().start_monitors(thread_workers.clone());
235
+ if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
236
+ return Ok(());
237
+ }
244
238
  runtime.block_on(
245
239
  async {
246
240
  let server_params = self.server_config.server_params.read().clone();
@@ -254,11 +248,13 @@ impl SingleMode {
254
248
 
255
249
  for listener in tokio_listeners.iter() {
256
250
  let mut lifecycle_rx = self.lifecycle_channel.subscribe();
251
+
257
252
  let listener_info = Arc::new(listener.listener_info());
258
253
  let self_ref = self.clone();
259
254
  let listener = listener.clone();
260
255
  let shutdown_sender = shutdown_sender.clone();
261
256
  let job_sender = job_sender.clone();
257
+ let nonblocking_sender = nonblocking_sender.clone();
262
258
  let workers_clone = thread_workers.clone();
263
259
  let listener_clone = listener.clone();
264
260
  let mut shutdown_receiver = shutdown_sender.subscribe();
@@ -278,8 +274,9 @@ impl SingleMode {
278
274
  let listener_info = listener_info.clone();
279
275
  let shutdown_receiver = shutdown_receiver.clone();
280
276
  let job_sender = job_sender.clone();
277
+ let nonblocking_sender = nonblocking_sender.clone();
281
278
  acceptor_task_set.spawn(async move {
282
- strategy.serve_connection(accept_result, job_sender, listener_info, shutdown_receiver).await;
279
+ strategy.serve_connection(accept_result, job_sender, nonblocking_sender, listener_info, shutdown_receiver).await;
283
280
  });
284
281
  },
285
282
  Err(e) => debug!("Listener.accept failed {:?}", e),
@@ -290,11 +287,10 @@ impl SingleMode {
290
287
  lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
291
288
  Ok(LifecycleEvent::Shutdown) => {
292
289
  shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
293
- // Tell any in-progress connections to stop accepting new requests
294
290
  tokio::time::sleep(Duration::from_millis(25)).await;
295
- // Tell workers to stop processing requests once they've flushed their buffers.
296
291
  for _i in 0..workers_clone.len() {
297
292
  job_sender.send(RequestJob::Shutdown).await.unwrap();
293
+ nonblocking_sender.send(RequestJob::Shutdown).await.unwrap();
298
294
  }
299
295
  break;
300
296
  },
@@ -310,6 +306,9 @@ impl SingleMode {
310
306
 
311
307
  while let Some(_res) = listener_task_set.join_next().await {}
312
308
 
309
+ // Explicitly drop all listeners to ensure file descriptors are released
310
+ drop(tokio_listeners);
311
+
313
312
  Ok::<(), ItsiError>(())
314
313
  })?;
315
314
 
@@ -343,6 +342,7 @@ impl SingleMode {
343
342
  &self,
344
343
  stream: IoStream,
345
344
  job_sender: async_channel::Sender<RequestJob>,
345
+ nonblocking_sender: async_channel::Sender<RequestJob>,
346
346
  listener: Arc<ListenerInfo>,
347
347
  shutdown_channel: watch::Receiver<RunningPhase>,
348
348
  ) {
@@ -354,9 +354,10 @@ impl SingleMode {
354
354
  let mut binding = executor.http1();
355
355
  let shutdown_channel = shutdown_channel_clone.clone();
356
356
 
357
- let service = ItsiService {
358
- inner: Arc::new(IstiServiceInner {
357
+ let service = ItsiHttpService {
358
+ inner: Arc::new(ItsiHttpServiceInner {
359
359
  sender: job_sender.clone(),
360
+ nonblocking_sender: nonblocking_sender.clone(),
360
361
  server_params: self.server_config.server_params.read().clone(),
361
362
  listener,
362
363
  addr: addr.to_string(),
@@ -365,7 +366,7 @@ impl SingleMode {
365
366
  };
366
367
  let mut serve = Box::pin(
367
368
  binding
368
- .timer(TokioTimer::new()) // your existing timer
369
+ .timer(TokioTimer::new())
369
370
  .header_read_timeout(Duration::from_secs(1))
370
371
  .serve_connection_with_upgrades(io, service),
371
372
  );
@@ -386,7 +387,6 @@ impl SingleMode {
386
387
  // A lifecycle event triggers shutdown.
387
388
  _ = shutdown_channel_clone.changed() => {
388
389
  // Initiate graceful shutdown.
389
- info!("Starting graceful shutdown");
390
390
  serve.as_mut().graceful_shutdown();
391
391
 
392
392
  // Now await the connection to finish shutting down.
@@ -1,71 +1,54 @@
1
- use std::sync::{atomic::AtomicI8, LazyLock};
1
+ use std::sync::{
2
+ atomic::{AtomicBool, AtomicI8},
3
+ LazyLock,
4
+ };
2
5
 
3
6
  use nix::libc::{self, sighandler_t};
4
7
  use tokio::sync::{self, broadcast};
5
8
 
6
9
  use super::lifecycle_event::LifecycleEvent;
7
10
 
11
+ pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
12
+ pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
8
13
  pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
9
14
  broadcast::Sender<LifecycleEvent>,
10
15
  broadcast::Receiver<LifecycleEvent>,
11
16
  )> = LazyLock::new(|| sync::broadcast::channel(5));
12
17
 
13
- pub fn send_shutdown_event() {
14
- SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
18
+ pub fn send_lifecycle_event(event: LifecycleEvent) {
19
+ SIGNAL_HANDLER_CHANNEL.0.send(event).ok();
15
20
  }
16
21
 
17
- pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
18
22
  fn receive_signal(signum: i32, _: sighandler_t) {
19
23
  SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
20
- match signum {
24
+ let event = match signum {
21
25
  libc::SIGTERM | libc::SIGINT => {
26
+ SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
22
27
  SIGINT_COUNT.fetch_add(2, std::sync::atomic::Ordering::SeqCst);
23
28
  if SIGINT_COUNT.load(std::sync::atomic::Ordering::SeqCst) < 2 {
24
- SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
29
+ Some(LifecycleEvent::Shutdown)
25
30
  } else {
26
31
  // Not messing about. Force shutdown.
27
- SIGNAL_HANDLER_CHANNEL
28
- .0
29
- .send(LifecycleEvent::ForceShutdown)
30
- .ok();
32
+ Some(LifecycleEvent::ForceShutdown)
31
33
  }
32
34
  }
33
- libc::SIGUSR2 => {
34
- SIGNAL_HANDLER_CHANNEL
35
- .0
36
- .send(LifecycleEvent::PrintInfo)
37
- .ok();
38
- }
39
- libc::SIGUSR1 => {
40
- SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Restart).ok();
41
- }
42
- libc::SIGHUP => {
43
- SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Reload).ok();
44
- }
45
- libc::SIGTTIN => {
46
- SIGNAL_HANDLER_CHANNEL
47
- .0
48
- .send(LifecycleEvent::IncreaseWorkers)
49
- .ok();
50
- }
51
- libc::SIGTTOU => {
52
- SIGNAL_HANDLER_CHANNEL
53
- .0
54
- .send(LifecycleEvent::DecreaseWorkers)
55
- .ok();
56
- }
57
- libc::SIGCHLD => {
58
- SIGNAL_HANDLER_CHANNEL
59
- .0
60
- .send(LifecycleEvent::ChildTerminated)
61
- .ok();
62
- }
63
- _ => {}
35
+ libc::SIGUSR2 => Some(LifecycleEvent::PrintInfo),
36
+ libc::SIGUSR1 => Some(LifecycleEvent::Restart),
37
+ libc::SIGHUP => Some(LifecycleEvent::Reload),
38
+ libc::SIGTTIN => Some(LifecycleEvent::IncreaseWorkers),
39
+ libc::SIGTTOU => Some(LifecycleEvent::DecreaseWorkers),
40
+ libc::SIGCHLD => Some(LifecycleEvent::ChildTerminated),
41
+ _ => None,
42
+ };
43
+
44
+ if let Some(event) = event {
45
+ send_lifecycle_event(event);
64
46
  }
65
47
  }
66
48
 
67
49
  pub fn reset_signal_handlers() -> bool {
68
50
  SIGINT_COUNT.store(0, std::sync::atomic::Ordering::SeqCst);
51
+ SHUTDOWN_REQUESTED.store(false, std::sync::atomic::Ordering::SeqCst);
69
52
  unsafe {
70
53
  libc::signal(libc::SIGTERM, receive_signal as usize);
71
54
  libc::signal(libc::SIGINT, receive_signal as usize);
@@ -0,0 +1,101 @@
1
+ use bytes::Buf;
2
+ use hyper::body::Body;
3
+ use hyper::body::Frame;
4
+ use hyper::body::SizeHint;
5
+ use std::error::Error;
6
+ use std::fmt;
7
+ use std::ops::Deref;
8
+ use std::pin::Pin;
9
+ use std::sync::atomic::AtomicUsize;
10
+ use std::sync::atomic::Ordering;
11
+ use std::task::Context;
12
+ use std::task::Poll;
13
+
14
+ /// Custom error to indicate that the maximum body size was exceeded.
15
+ #[derive(Debug)]
16
+ pub struct MaxBodySizeReached;
17
+ impl fmt::Display for MaxBodySizeReached {
18
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
19
+ write!(f, "Maximum body size reached")
20
+ }
21
+ }
22
+
23
+ impl Error for MaxBodySizeReached {}
24
+
25
+ #[derive(Debug)]
26
+ pub struct SizeLimitedIncoming<B> {
27
+ pub inner: B,
28
+ pub limit: AtomicUsize,
29
+ current: usize,
30
+ }
31
+
32
+ impl<B> Deref for SizeLimitedIncoming<B> {
33
+ type Target = B;
34
+
35
+ fn deref(&self) -> &Self::Target {
36
+ &self.inner
37
+ }
38
+ }
39
+
40
+ impl<B> SizeLimitedIncoming<B> {
41
+ pub fn new(inner: B) -> Self {
42
+ Self {
43
+ inner,
44
+ limit: AtomicUsize::new(usize::MAX),
45
+ current: 0,
46
+ }
47
+ }
48
+ }
49
+
50
+ impl<B> Body for SizeLimitedIncoming<B>
51
+ where
52
+ B: Body + Unpin,
53
+ B::Data: Buf,
54
+ // Ensure that the inner error converts into our boxed error type.
55
+ B::Error: Into<Box<dyn Error + Send + Sync>>,
56
+ {
57
+ type Data = B::Data;
58
+ type Error = Box<dyn Error + Send + Sync>;
59
+
60
+ fn poll_frame(
61
+ mut self: Pin<&mut Self>,
62
+ cx: &mut Context<'_>,
63
+ ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
64
+ // Pin the inner body.
65
+ let inner = Pin::new(&mut self.inner);
66
+ match inner.poll_frame(cx) {
67
+ Poll::Ready(Some(Ok(frame))) => {
68
+ // Use public methods since we cannot match on the private enum.
69
+ if frame.is_data() {
70
+ match frame.into_data() {
71
+ Ok(data) => {
72
+ let len = data.remaining();
73
+ self.current += len;
74
+ if self.current > self.limit.load(Ordering::Relaxed) {
75
+ Poll::Ready(Some(Err(Box::new(MaxBodySizeReached))))
76
+ } else {
77
+ Poll::Ready(Some(Ok(Frame::data(data))))
78
+ }
79
+ }
80
+ // Should not occur if is_data() was true, but pass through if it does.
81
+ Err(frame) => Poll::Ready(Some(Ok(frame))),
82
+ }
83
+ } else {
84
+ // For non-data frames (e.g. trailers), just pass them along.
85
+ Poll::Ready(Some(Ok(frame)))
86
+ }
87
+ }
88
+ Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
89
+ Poll::Ready(None) => Poll::Ready(None),
90
+ Poll::Pending => Poll::Pending,
91
+ }
92
+ }
93
+
94
+ fn is_end_stream(&self) -> bool {
95
+ self.inner.is_end_stream()
96
+ }
97
+
98
+ fn size_hint(&self) -> SizeHint {
99
+ self.inner.size_hint()
100
+ }
101
+ }
@@ -11,7 +11,6 @@ use magnus::{
11
11
  use nix::unistd::Pid;
12
12
  use parking_lot::{Mutex, RwLock};
13
13
  use std::{
14
- num::NonZeroU8,
15
14
  ops::Deref,
16
15
  sync::{
17
16
  atomic::{AtomicBool, AtomicU64, Ordering},
@@ -24,7 +23,7 @@ use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
24
23
  use tracing::instrument;
25
24
 
26
25
  use crate::ruby_types::{
27
- itsi_grpc_request::ItsiGrpcRequest, itsi_http_request::ItsiHttpRequest,
26
+ itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
28
27
  itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
29
28
  };
30
29
 
@@ -55,34 +54,66 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
55
54
  });
56
55
 
57
56
  pub struct TerminateWakerSignal(bool);
58
- type ThreadWorkerBuildResult = Result<(Arc<Vec<Arc<ThreadWorker>>>, Sender<RequestJob>)>;
57
+ type ThreadWorkerBuildResult = Result<(
58
+ Arc<Vec<Arc<ThreadWorker>>>,
59
+ Sender<RequestJob>,
60
+ Sender<RequestJob>,
61
+ )>;
59
62
 
60
- #[instrument(name = "boot", parent=None, skip(params, threads, pid))]
61
- pub fn build_thread_workers(
62
- params: Arc<ServerParams>,
63
- pid: Pid,
64
- threads: NonZeroU8,
65
- ) -> ThreadWorkerBuildResult {
66
- let (sender, receiver) = async_channel::bounded((threads.get() as u16 * 30) as usize);
67
- let receiver_ref = Arc::new(receiver);
68
- let sender_ref = sender;
63
+ #[instrument(name = "boot", parent=None, skip(params, pid))]
64
+ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorkerBuildResult {
65
+ let blocking_thread_count = params.threads;
66
+ let nonblocking_thread_count = params.scheduler_threads;
67
+
68
+ let (blocking_sender, blocking_receiver) =
69
+ async_channel::bounded((blocking_thread_count as u16 * 30) as usize);
70
+ let blocking_receiver_ref = Arc::new(blocking_receiver);
71
+ let blocking_sender_ref = blocking_sender;
69
72
  let scheduler_class = load_scheduler_class(params.scheduler_class.clone())?;
73
+
74
+ let mut workers = (1..=blocking_thread_count)
75
+ .map(|id| {
76
+ ThreadWorker::new(
77
+ params.clone(),
78
+ id,
79
+ format!("{:?}#{:?}", pid, id),
80
+ blocking_receiver_ref.clone(),
81
+ blocking_sender_ref.clone(),
82
+ if nonblocking_thread_count.is_some() {
83
+ None
84
+ } else {
85
+ scheduler_class
86
+ },
87
+ )
88
+ })
89
+ .collect::<Result<Vec<_>>>()?;
90
+
91
+ let nonblocking_sender_ref = if let (Some(nonblocking_thread_count), Some(scheduler_class)) =
92
+ (nonblocking_thread_count, scheduler_class)
93
+ {
94
+ let (nonblocking_sender, nonblocking_receiver) =
95
+ async_channel::bounded((nonblocking_thread_count as u16 * 30) as usize);
96
+ let nonblocking_receiver_ref = Arc::new(nonblocking_receiver);
97
+ let nonblocking_sender_ref = nonblocking_sender.clone();
98
+ for id in 0..nonblocking_thread_count {
99
+ workers.push(ThreadWorker::new(
100
+ params.clone(),
101
+ id,
102
+ format!("{:?}#{:?}", pid, id),
103
+ nonblocking_receiver_ref.clone(),
104
+ nonblocking_sender_ref.clone(),
105
+ Some(scheduler_class),
106
+ )?)
107
+ }
108
+ nonblocking_sender
109
+ } else {
110
+ blocking_sender_ref.clone()
111
+ };
112
+
70
113
  Ok((
71
- Arc::new(
72
- (1..=u8::from(threads))
73
- .map(|id| {
74
- ThreadWorker::new(
75
- params.clone(),
76
- id,
77
- format!("{:?}#{:?}", pid, id),
78
- receiver_ref.clone(),
79
- sender_ref.clone(),
80
- scheduler_class,
81
- )
82
- })
83
- .collect::<Result<Vec<_>>>()?,
84
- ),
85
- sender_ref,
114
+ Arc::new(workers),
115
+ blocking_sender_ref,
116
+ nonblocking_sender_ref,
86
117
  ))
87
118
  }
88
119
 
@@ -259,7 +290,7 @@ impl ThreadWorker {
259
290
  *ID_SCHEDULE,
260
291
  (app_proc.as_value(), request),
261
292
  ) {
262
- ItsiGrpcRequest::internal_error(ruby, response, err)
293
+ ItsiGrpcCall::internal_error(ruby, response, err)
263
294
  }
264
295
  }
265
296
  RequestJob::Shutdown => return true,