itsi-scheduler 0.1.5 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +120 -52
  3. data/README.md +57 -24
  4. data/Rakefile +0 -4
  5. data/ext/itsi_acme/Cargo.toml +86 -0
  6. data/ext/itsi_acme/examples/high_level.rs +63 -0
  7. data/ext/itsi_acme/examples/high_level_warp.rs +52 -0
  8. data/ext/itsi_acme/examples/low_level.rs +87 -0
  9. data/ext/itsi_acme/examples/low_level_axum.rs +66 -0
  10. data/ext/itsi_acme/src/acceptor.rs +81 -0
  11. data/ext/itsi_acme/src/acme.rs +354 -0
  12. data/ext/itsi_acme/src/axum.rs +86 -0
  13. data/ext/itsi_acme/src/cache.rs +39 -0
  14. data/ext/itsi_acme/src/caches/boxed.rs +80 -0
  15. data/ext/itsi_acme/src/caches/composite.rs +69 -0
  16. data/ext/itsi_acme/src/caches/dir.rs +106 -0
  17. data/ext/itsi_acme/src/caches/mod.rs +11 -0
  18. data/ext/itsi_acme/src/caches/no.rs +78 -0
  19. data/ext/itsi_acme/src/caches/test.rs +136 -0
  20. data/ext/itsi_acme/src/config.rs +172 -0
  21. data/ext/itsi_acme/src/https_helper.rs +69 -0
  22. data/ext/itsi_acme/src/incoming.rs +142 -0
  23. data/ext/itsi_acme/src/jose.rs +161 -0
  24. data/ext/itsi_acme/src/lib.rs +142 -0
  25. data/ext/itsi_acme/src/resolver.rs +59 -0
  26. data/ext/itsi_acme/src/state.rs +424 -0
  27. data/ext/itsi_error/Cargo.toml +1 -0
  28. data/ext/itsi_error/src/lib.rs +106 -7
  29. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  30. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  31. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  32. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  33. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  34. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  35. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  36. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  37. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  38. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  39. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  40. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  41. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  42. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  43. data/ext/itsi_rb_helpers/Cargo.toml +1 -0
  44. data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
  45. data/ext/itsi_rb_helpers/src/lib.rs +63 -12
  46. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  47. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  48. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  49. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  50. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  51. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  52. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  53. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  54. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  55. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  56. data/ext/itsi_scheduler/Cargo.toml +1 -1
  57. data/ext/itsi_scheduler/src/itsi_scheduler.rs +9 -3
  58. data/ext/itsi_scheduler/src/lib.rs +1 -0
  59. data/ext/itsi_server/Cargo.lock +2956 -0
  60. data/ext/itsi_server/Cargo.toml +73 -29
  61. data/ext/itsi_server/src/default_responses/mod.rs +11 -0
  62. data/ext/itsi_server/src/env.rs +43 -0
  63. data/ext/itsi_server/src/lib.rs +114 -75
  64. data/ext/itsi_server/src/prelude.rs +2 -0
  65. data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
  66. data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +29 -8
  67. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
  68. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
  69. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +362 -0
  70. data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +84 -40
  71. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +233 -0
  72. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +565 -0
  73. data/ext/itsi_server/src/ruby_types/itsi_server.rs +86 -0
  74. data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
  75. data/ext/itsi_server/src/server/{bind.rs → binds/bind.rs} +59 -24
  76. data/ext/itsi_server/src/server/binds/listener.rs +444 -0
  77. data/ext/itsi_server/src/server/binds/mod.rs +4 -0
  78. data/ext/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +57 -19
  79. data/ext/itsi_server/src/server/{tls.rs → binds/tls.rs} +120 -31
  80. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  81. data/ext/itsi_server/src/server/http_message_types.rs +97 -0
  82. data/ext/itsi_server/src/server/io_stream.rs +2 -1
  83. data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
  84. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +170 -0
  85. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +63 -0
  86. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +94 -0
  87. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +94 -0
  88. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +343 -0
  89. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +151 -0
  90. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +316 -0
  91. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +301 -0
  92. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +193 -0
  93. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +64 -0
  94. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +192 -0
  95. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +171 -0
  96. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +198 -0
  97. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
  98. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +209 -0
  99. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  100. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
  101. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +116 -0
  102. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +411 -0
  103. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +142 -0
  104. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +55 -0
  105. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +54 -0
  106. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +51 -0
  107. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
  108. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +187 -0
  109. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
  110. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +173 -0
  111. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +31 -0
  112. data/ext/itsi_server/src/server/middleware_stack/mod.rs +381 -0
  113. data/ext/itsi_server/src/server/mod.rs +7 -5
  114. data/ext/itsi_server/src/server/process_worker.rs +65 -14
  115. data/ext/itsi_server/src/server/redirect_type.rs +26 -0
  116. data/ext/itsi_server/src/server/request_job.rs +11 -0
  117. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +150 -50
  118. data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
  119. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +399 -165
  120. data/ext/itsi_server/src/server/signal.rs +33 -26
  121. data/ext/itsi_server/src/server/size_limited_incoming.rs +107 -0
  122. data/ext/itsi_server/src/server/thread_worker.rs +218 -107
  123. data/ext/itsi_server/src/services/cache_store.rs +74 -0
  124. data/ext/itsi_server/src/services/itsi_http_service.rs +257 -0
  125. data/ext/itsi_server/src/services/mime_types.rs +1416 -0
  126. data/ext/itsi_server/src/services/mod.rs +6 -0
  127. data/ext/itsi_server/src/services/password_hasher.rs +83 -0
  128. data/ext/itsi_server/src/services/rate_limiter.rs +580 -0
  129. data/ext/itsi_server/src/services/static_file_server.rs +1340 -0
  130. data/ext/itsi_tracing/Cargo.toml +1 -0
  131. data/ext/itsi_tracing/src/lib.rs +362 -33
  132. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  133. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  134. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  135. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  136. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  137. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  138. data/itsi-scheduler-100.png +0 -0
  139. data/lib/itsi/scheduler/version.rb +1 -1
  140. data/lib/itsi/scheduler.rb +11 -6
  141. metadata +117 -24
  142. data/CHANGELOG.md +0 -5
  143. data/CODE_OF_CONDUCT.md +0 -132
  144. data/LICENSE.txt +0 -21
  145. data/ext/itsi_error/src/from.rs +0 -71
  146. data/ext/itsi_server/extconf.rb +0 -6
  147. data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
  148. data/ext/itsi_server/src/request/itsi_request.rs +0 -277
  149. data/ext/itsi_server/src/request/mod.rs +0 -1
  150. data/ext/itsi_server/src/response/mod.rs +0 -1
  151. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
  152. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
  153. data/ext/itsi_server/src/server/itsi_server.rs +0 -244
  154. data/ext/itsi_server/src/server/listener.rs +0 -327
  155. /data/ext/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
@@ -1,14 +1,11 @@
1
- use crate::server::{
2
- itsi_server::Server, lifecycle_event::LifecycleEvent, listener::Listener,
3
- process_worker::ProcessWorker,
4
- };
1
+ use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerConfig;
2
+ use crate::server::signal::SIGNAL_HANDLER_CHANNEL;
3
+ use crate::server::{lifecycle_event::LifecycleEvent, process_worker::ProcessWorker};
5
4
  use itsi_error::{ItsiError, Result};
6
- use itsi_rb_helpers::{call_without_gvl, create_ruby_thread};
5
+ use itsi_rb_helpers::{call_with_gvl, call_without_gvl, create_ruby_thread};
7
6
  use itsi_tracing::{error, info, warn};
8
- use nix::{
9
- libc::{self, exit},
10
- unistd::Pid,
11
- };
7
+ use magnus::Value;
8
+ use nix::{libc::exit, unistd::Pid};
12
9
 
13
10
  use std::{
14
11
  sync::{atomic::AtomicUsize, Arc},
@@ -19,10 +16,9 @@ use tokio::{
19
16
  sync::{broadcast, watch, Mutex},
20
17
  time::{self, sleep},
21
18
  };
22
- use tracing::instrument;
19
+ use tracing::{debug, instrument};
23
20
  pub(crate) struct ClusterMode {
24
- pub listeners: Arc<Vec<Arc<Listener>>>,
25
- pub server: Arc<Server>,
21
+ pub server_config: Arc<ItsiServerConfig>,
26
22
  pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
27
23
  pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
28
24
  }
@@ -32,15 +28,8 @@ static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
32
28
  parking_lot::Mutex::new(None);
33
29
 
34
30
  impl ClusterMode {
35
- pub fn new(
36
- server: Arc<Server>,
37
- listeners: Arc<Vec<Arc<Listener>>>,
38
- lifecycle_channel: broadcast::Sender<LifecycleEvent>,
39
- ) -> Self {
40
- if let Some(f) = server.before_fork.lock().take() {
41
- f();
42
- }
43
- let process_workers = (0..server.workers)
31
+ pub fn new(server_config: Arc<ItsiServerConfig>) -> Self {
32
+ let process_workers = (0..server_config.server_params.read().workers)
44
33
  .map(|_| ProcessWorker {
45
34
  worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
46
35
  ..Default::default()
@@ -48,10 +37,9 @@ impl ClusterMode {
48
37
  .collect();
49
38
 
50
39
  Self {
51
- listeners,
52
- server,
40
+ server_config,
53
41
  process_workers: parking_lot::Mutex::new(process_workers),
54
- lifecycle_channel,
42
+ lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
55
43
  }
56
44
  }
57
45
 
@@ -66,6 +54,12 @@ impl ClusterMode {
66
54
  .expect("Failed to build Tokio runtime")
67
55
  }
68
56
 
57
+ pub fn invoke_hook(&self, hook_name: &str) {
58
+ if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) {
59
+ call_with_gvl(|_| hook.call::<_, Value>(()).ok());
60
+ }
61
+ }
62
+
69
63
  #[allow(clippy::await_holding_lock)]
70
64
  pub async fn handle_lifecycle_event(
71
65
  self: Arc<Self>,
@@ -73,13 +67,63 @@ impl ClusterMode {
73
67
  ) -> Result<()> {
74
68
  match lifecycle_event {
75
69
  LifecycleEvent::Start => Ok(()),
70
+ LifecycleEvent::PrintInfo => {
71
+ self.print_info().await?;
72
+ Ok(())
73
+ }
76
74
  LifecycleEvent::Shutdown => {
75
+ self.server_config.stop_watcher()?;
77
76
  self.shutdown().await?;
77
+ self.invoke_hook("before_shutdown");
78
78
  Ok(())
79
79
  }
80
80
  LifecycleEvent::Restart => {
81
- for worker in self.process_workers.lock().iter() {
82
- worker.reboot(self.clone()).await?;
81
+ if self.server_config.check_config().await {
82
+ self.invoke_hook("before_restart");
83
+ self.server_config.dup_fds()?;
84
+ self.shutdown().await.ok();
85
+ info!("Shutdown complete. Calling reload exec");
86
+ self.server_config.reload_exec()?;
87
+ }
88
+ Ok(())
89
+ }
90
+ LifecycleEvent::Reload => {
91
+ if !self.server_config.check_config().await {
92
+ return Ok(());
93
+ }
94
+ let should_reexec = self.server_config.clone().reload(true)?;
95
+ if should_reexec {
96
+ self.server_config.dup_fds()?;
97
+ self.shutdown().await.ok();
98
+ self.server_config.reload_exec()?;
99
+ }
100
+ let mut workers_to_load = self.server_config.server_params.read().workers;
101
+ let mut next_workers = Vec::new();
102
+ for worker in self.process_workers.lock().drain(..) {
103
+ if workers_to_load == 0 {
104
+ worker.graceful_shutdown(self.clone()).await
105
+ } else {
106
+ workers_to_load -= 1;
107
+ worker.reboot(self.clone()).await?;
108
+ next_workers.push(worker);
109
+ }
110
+ }
111
+ self.process_workers.lock().extend(next_workers);
112
+ while workers_to_load > 0 {
113
+ let mut workers = self.process_workers.lock();
114
+ let worker = ProcessWorker {
115
+ worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
116
+ ..Default::default()
117
+ };
118
+ let worker_clone = worker.clone();
119
+ let self_clone = self.clone();
120
+ create_ruby_thread(move || {
121
+ call_without_gvl(move || {
122
+ worker_clone.boot(self_clone).ok();
123
+ })
124
+ });
125
+ workers.push(worker);
126
+ workers_to_load -= 1
83
127
  }
84
128
  Ok(())
85
129
  }
@@ -106,8 +150,10 @@ impl ClusterMode {
106
150
  };
107
151
  if let Some(dropped_worker) = worker {
108
152
  dropped_worker.request_shutdown();
109
- let force_kill_time =
110
- Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
153
+ let force_kill_time = Instant::now()
154
+ + Duration::from_secs_f64(
155
+ self.server_config.server_params.read().shutdown_timeout,
156
+ );
111
157
  while dropped_worker.is_alive() && force_kill_time > Instant::now() {
112
158
  tokio::time::sleep(Duration::from_millis(100)).await;
113
159
  }
@@ -121,13 +167,20 @@ impl ClusterMode {
121
167
  for worker in self.process_workers.lock().iter() {
122
168
  worker.force_kill();
123
169
  }
170
+ error!("Force shutdown!");
124
171
  unsafe { exit(0) };
125
172
  }
173
+ LifecycleEvent::ChildTerminated => {
174
+ CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
175
+ i.send(()).ok();
176
+ });
177
+ Ok(())
178
+ }
126
179
  }
127
180
  }
128
181
 
129
182
  pub async fn shutdown(&self) -> Result<()> {
130
- let shutdown_timeout = self.server.shutdown_timeout;
183
+ let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
131
184
  let workers = self.process_workers.lock().clone();
132
185
 
133
186
  workers.iter().for_each(|worker| worker.request_shutdown());
@@ -152,7 +205,7 @@ impl ClusterMode {
152
205
 
153
206
  tokio::select! {
154
207
  _ = monitor_handle => {
155
- info!("All children exited early, exit normally")
208
+ debug!("All children exited early, exit normally")
156
209
  }
157
210
  _ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
158
211
  warn!("Graceful shutdown timeout reached, force killing remaining children");
@@ -160,37 +213,68 @@ impl ClusterMode {
160
213
  }
161
214
  }
162
215
 
163
- Err(ItsiError::Break())
216
+ Err(ItsiError::Break)
164
217
  }
165
218
 
166
- pub fn receive_signal(signal: i32) {
167
- match signal {
168
- libc::SIGCHLD => {
169
- CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
170
- i.send(()).ok();
171
- });
172
- }
173
- _ => {
174
- // Handle other signals
219
+ pub async fn print_info(self: Arc<Self>) -> Result<()> {
220
+ println!("Itsi Cluster Info:");
221
+ println!("Master PID: {:?}", Pid::this());
222
+ if let Some(memory_limit) = self.server_config.server_params.read().worker_memory_limit {
223
+ println!("Worker Memory Limit: {}", memory_limit);
224
+ }
225
+
226
+ if self.server_config.watcher_fd.is_some() {
227
+ println!("File Watcher Enabled: true",);
228
+ if let Some(watchers) = self
229
+ .server_config
230
+ .server_params
231
+ .read()
232
+ .notify_watchers
233
+ .as_ref()
234
+ {
235
+ for watcher in watchers {
236
+ println!(
237
+ "Watching path: {} => {}",
238
+ watcher.0,
239
+ watcher
240
+ .1
241
+ .iter()
242
+ .map(|path| path.join(","))
243
+ .collect::<Vec<String>>()
244
+ .join(" ")
245
+ );
246
+ }
175
247
  }
176
248
  }
249
+ println!(
250
+ "Silent Mode: {}",
251
+ self.server_config.server_params.read().silence
252
+ );
253
+ println!(
254
+ "Preload: {}",
255
+ self.server_config.server_params.read().preload
256
+ );
257
+ let workers = self.process_workers.lock().clone();
258
+ for worker in workers {
259
+ worker.print_info()?;
260
+ sleep(Duration::from_millis(50)).await;
261
+ }
262
+ Ok(())
177
263
  }
178
264
 
179
265
  pub fn stop(&self) -> Result<()> {
180
- unsafe { libc::signal(libc::SIGCHLD, libc::SIG_DFL) };
181
-
182
266
  for worker in self.process_workers.lock().iter() {
183
267
  if worker.is_alive() {
184
268
  worker.force_kill();
185
269
  }
186
270
  }
187
-
188
271
  Ok(())
189
272
  }
190
273
 
191
274
  #[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
192
275
  pub fn run(self: Arc<Self>) -> Result<()> {
193
276
  info!("Starting in Cluster mode");
277
+ self.invoke_hook("before_fork");
194
278
  self.process_workers
195
279
  .lock()
196
280
  .iter()
@@ -199,27 +283,35 @@ impl ClusterMode {
199
283
  let (sender, mut receiver) = watch::channel(());
200
284
  *CHILD_SIGNAL_SENDER.lock() = Some(sender);
201
285
 
202
- unsafe { libc::signal(libc::SIGCHLD, Self::receive_signal as usize) };
203
-
204
286
  let mut lifecycle_rx = self.lifecycle_channel.subscribe();
205
287
  let self_ref = self.clone();
206
288
 
207
289
  self.build_runtime().block_on(async {
208
290
  let self_ref = self_ref.clone();
209
- let mut memory_check_interval = time::interval(time::Duration::from_secs(2));
291
+ let memory_check_duration = if self_ref.server_config.server_params.read().worker_memory_limit.is_some(){
292
+ time::Duration::from_secs(15)
293
+ } else {
294
+ time::Duration::from_secs(60 * 60 * 24 * 365 * 100)
295
+ };
296
+
297
+ let mut memory_check_interval = time::interval(memory_check_duration);
298
+
299
+ self.invoke_hook("after_start");
300
+
210
301
  loop {
211
302
  tokio::select! {
212
303
  _ = receiver.changed() => {
213
304
  let mut workers = self_ref.process_workers.lock();
214
305
  workers.retain(|worker| {
215
- worker.boot_if_dead(Arc::clone(&self_ref))
306
+ worker.boot_if_dead(self_ref.clone())
216
307
  });
217
308
  if workers.is_empty() {
218
309
  warn!("No workers running. Send SIGTTIN to increase worker count");
219
310
  }
220
311
  }
221
312
  _ = memory_check_interval.tick() => {
222
- if let Some(memory_limit) = self_ref.server.worker_memory_limit {
313
+ let worker_memory_limit = self_ref.server_config.server_params.read().worker_memory_limit;
314
+ if let Some(memory_limit) = worker_memory_limit {
223
315
  let largest_worker = {
224
316
  let workers = self_ref.process_workers.lock();
225
317
  workers.iter().max_by(|wa, wb| wa.memory_usage().cmp(&wb.memory_usage())).cloned()
@@ -228,6 +320,9 @@ impl ClusterMode {
228
320
  if let Some(current_mem_usage) = largest_worker.memory_usage(){
229
321
  if current_mem_usage > memory_limit {
230
322
  largest_worker.reboot(self_ref.clone()).await.ok();
323
+ if let Some(hook) = self_ref.server_config.server_params.read().hooks.get("after_memory_limit_reached") {
324
+ call_with_gvl(|_| hook.call::<_, Value>((largest_worker.pid(),)).ok() );
325
+ }
231
326
  }
232
327
  }
233
328
  }
@@ -237,7 +332,7 @@ impl ClusterMode {
237
332
  Ok(lifecycle_event) => {
238
333
  if let Err(e) = self_ref.clone().handle_lifecycle_event(lifecycle_event).await{
239
334
  match e {
240
- ItsiError::Break() => break,
335
+ ItsiError::Break => break,
241
336
  _ => error!("Error in handle_lifecycle_event {:?}", e)
242
337
  }
243
338
  }
@@ -248,7 +343,12 @@ impl ClusterMode {
248
343
  }
249
344
  }
250
345
  });
251
-
346
+ self.server_config
347
+ .server_params
348
+ .write()
349
+ .listeners
350
+ .lock()
351
+ .drain(..);
252
352
  Ok(())
253
353
  }
254
354
  }
@@ -1,27 +1,30 @@
1
+ use std::sync::Arc;
2
+
1
3
  use cluster_mode::ClusterMode;
2
4
  use itsi_error::Result;
3
5
  use single_mode::SingleMode;
4
- use std::sync::Arc;
6
+
5
7
  pub mod cluster_mode;
6
8
  pub mod single_mode;
7
9
 
10
+ #[derive(Clone)]
8
11
  pub(crate) enum ServeStrategy {
9
12
  Single(Arc<SingleMode>),
10
13
  Cluster(Arc<ClusterMode>),
11
14
  }
12
15
 
13
16
  impl ServeStrategy {
14
- pub fn run(&self) -> Result<()> {
17
+ pub fn run(self) -> Result<()> {
15
18
  match self {
16
- ServeStrategy::Single(single_router) => single_router.clone().run(),
17
- ServeStrategy::Cluster(cluster_router) => cluster_router.clone().run(),
19
+ ServeStrategy::Single(single_router) => single_router.run(),
20
+ ServeStrategy::Cluster(cluster_router) => cluster_router.run(),
18
21
  }
19
22
  }
20
23
 
21
24
  pub(crate) fn stop(&self) -> Result<()> {
22
25
  match self {
23
- ServeStrategy::Single(single_router) => single_router.clone().stop(),
24
- ServeStrategy::Cluster(cluster_router) => cluster_router.clone().stop(),
26
+ ServeStrategy::Single(single_router) => single_router.stop(),
27
+ ServeStrategy::Cluster(cluster_router) => cluster_router.stop(),
25
28
  }
26
29
  }
27
30
  }