itsi-scheduler 0.1.5 → 0.1.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of itsi-scheduler might be problematic. Click here for more details.

Files changed (125) hide show
  1. checksums.yaml +4 -4
  2. data/CODE_OF_CONDUCT.md +7 -0
  3. data/Cargo.lock +90 -22
  4. data/README.md +5 -0
  5. data/_index.md +7 -0
  6. data/ext/itsi_error/Cargo.toml +1 -0
  7. data/ext/itsi_error/src/lib.rs +106 -7
  8. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  9. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  10. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  11. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  12. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  13. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  14. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  15. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  16. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  17. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  18. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  19. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  20. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  21. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  22. data/ext/itsi_rb_helpers/Cargo.toml +1 -0
  23. data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
  24. data/ext/itsi_rb_helpers/src/lib.rs +59 -9
  25. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  26. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  27. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  28. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  29. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  30. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  31. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  32. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  33. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  34. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  35. data/ext/itsi_scheduler/src/itsi_scheduler.rs +1 -1
  36. data/ext/itsi_server/Cargo.lock +2956 -0
  37. data/ext/itsi_server/Cargo.toml +72 -28
  38. data/ext/itsi_server/src/default_responses/mod.rs +11 -0
  39. data/ext/itsi_server/src/env.rs +43 -0
  40. data/ext/itsi_server/src/lib.rs +113 -75
  41. data/ext/itsi_server/src/prelude.rs +2 -0
  42. data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
  43. data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +29 -8
  44. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
  45. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
  46. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +345 -0
  47. data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +84 -40
  48. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
  49. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +375 -0
  50. data/ext/itsi_server/src/ruby_types/itsi_server.rs +83 -0
  51. data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
  52. data/ext/itsi_server/src/server/{bind.rs → binds/bind.rs} +56 -24
  53. data/ext/itsi_server/src/server/{listener.rs → binds/listener.rs} +218 -113
  54. data/ext/itsi_server/src/server/binds/mod.rs +4 -0
  55. data/ext/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +55 -17
  56. data/ext/itsi_server/src/server/{tls.rs → binds/tls.rs} +109 -28
  57. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  58. data/ext/itsi_server/src/server/http_message_types.rs +97 -0
  59. data/ext/itsi_server/src/server/io_stream.rs +2 -1
  60. data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
  61. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +165 -0
  62. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +56 -0
  63. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +87 -0
  64. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +86 -0
  65. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +285 -0
  66. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +142 -0
  67. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +289 -0
  68. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +292 -0
  69. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +55 -0
  70. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +190 -0
  71. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +157 -0
  72. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +195 -0
  73. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
  74. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +201 -0
  75. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  76. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
  77. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +87 -0
  78. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +414 -0
  79. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +131 -0
  80. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
  81. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +44 -0
  82. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +36 -0
  83. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
  84. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +180 -0
  85. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
  86. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +163 -0
  87. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
  88. data/ext/itsi_server/src/server/middleware_stack/mod.rs +347 -0
  89. data/ext/itsi_server/src/server/mod.rs +6 -5
  90. data/ext/itsi_server/src/server/process_worker.rs +65 -14
  91. data/ext/itsi_server/src/server/request_job.rs +11 -0
  92. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +137 -49
  93. data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
  94. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +338 -164
  95. data/ext/itsi_server/src/server/signal.rs +32 -26
  96. data/ext/itsi_server/src/server/size_limited_incoming.rs +101 -0
  97. data/ext/itsi_server/src/server/thread_worker.rs +214 -107
  98. data/ext/itsi_server/src/services/cache_store.rs +74 -0
  99. data/ext/itsi_server/src/services/itsi_http_service.rs +239 -0
  100. data/ext/itsi_server/src/services/mime_types.rs +1416 -0
  101. data/ext/itsi_server/src/services/mod.rs +6 -0
  102. data/ext/itsi_server/src/services/password_hasher.rs +83 -0
  103. data/ext/itsi_server/src/services/rate_limiter.rs +569 -0
  104. data/ext/itsi_server/src/services/static_file_server.rs +1324 -0
  105. data/ext/itsi_tracing/Cargo.toml +1 -0
  106. data/ext/itsi_tracing/src/lib.rs +312 -34
  107. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  108. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  109. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  110. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  111. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  112. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  113. data/lib/itsi/scheduler/version.rb +1 -1
  114. data/lib/itsi/scheduler.rb +2 -2
  115. metadata +93 -21
  116. data/ext/itsi_error/src/from.rs +0 -71
  117. data/ext/itsi_server/extconf.rb +0 -6
  118. data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
  119. data/ext/itsi_server/src/request/itsi_request.rs +0 -277
  120. data/ext/itsi_server/src/request/mod.rs +0 -1
  121. data/ext/itsi_server/src/response/mod.rs +0 -1
  122. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
  123. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
  124. data/ext/itsi_server/src/server/itsi_server.rs +0 -244
  125. /data/ext/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
@@ -1,14 +1,13 @@
1
- use crate::server::{
2
- itsi_server::Server, lifecycle_event::LifecycleEvent, listener::Listener,
3
- process_worker::ProcessWorker,
4
- };
1
+ use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerConfig;
2
+ use crate::server::signal::SIGNAL_HANDLER_CHANNEL;
3
+ use crate::server::{lifecycle_event::LifecycleEvent, process_worker::ProcessWorker};
5
4
  use itsi_error::{ItsiError, Result};
6
- use itsi_rb_helpers::{call_without_gvl, create_ruby_thread};
7
- use itsi_tracing::{error, info, warn};
8
- use nix::{
9
- libc::{self, exit},
10
- unistd::Pid,
5
+ use itsi_rb_helpers::{
6
+ call_proc_and_log_errors, call_with_gvl, call_without_gvl, create_ruby_thread,
11
7
  };
8
+ use itsi_tracing::{error, info, warn};
9
+ use magnus::Value;
10
+ use nix::{libc::exit, unistd::Pid};
12
11
 
13
12
  use std::{
14
13
  sync::{atomic::AtomicUsize, Arc},
@@ -19,10 +18,9 @@ use tokio::{
19
18
  sync::{broadcast, watch, Mutex},
20
19
  time::{self, sleep},
21
20
  };
22
- use tracing::instrument;
21
+ use tracing::{debug, instrument};
23
22
  pub(crate) struct ClusterMode {
24
- pub listeners: Arc<Vec<Arc<Listener>>>,
25
- pub server: Arc<Server>,
23
+ pub server_config: Arc<ItsiServerConfig>,
26
24
  pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
27
25
  pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
28
26
  }
@@ -32,15 +30,8 @@ static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
32
30
  parking_lot::Mutex::new(None);
33
31
 
34
32
  impl ClusterMode {
35
- pub fn new(
36
- server: Arc<Server>,
37
- listeners: Arc<Vec<Arc<Listener>>>,
38
- lifecycle_channel: broadcast::Sender<LifecycleEvent>,
39
- ) -> Self {
40
- if let Some(f) = server.before_fork.lock().take() {
41
- f();
42
- }
43
- let process_workers = (0..server.workers)
33
+ pub fn new(server_config: Arc<ItsiServerConfig>) -> Self {
34
+ let process_workers = (0..server_config.server_params.read().workers)
44
35
  .map(|_| ProcessWorker {
45
36
  worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
46
37
  ..Default::default()
@@ -48,10 +39,9 @@ impl ClusterMode {
48
39
  .collect();
49
40
 
50
41
  Self {
51
- listeners,
52
- server,
42
+ server_config,
53
43
  process_workers: parking_lot::Mutex::new(process_workers),
54
- lifecycle_channel,
44
+ lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
55
45
  }
56
46
  }
57
47
 
@@ -73,13 +63,56 @@ impl ClusterMode {
73
63
  ) -> Result<()> {
74
64
  match lifecycle_event {
75
65
  LifecycleEvent::Start => Ok(()),
66
+ LifecycleEvent::PrintInfo => {
67
+ self.print_info().await?;
68
+ Ok(())
69
+ }
76
70
  LifecycleEvent::Shutdown => {
71
+ self.server_config.stop_watcher()?;
77
72
  self.shutdown().await?;
78
73
  Ok(())
79
74
  }
80
75
  LifecycleEvent::Restart => {
81
- for worker in self.process_workers.lock().iter() {
82
- worker.reboot(self.clone()).await?;
76
+ self.server_config.dup_fds()?;
77
+ self.shutdown().await.ok();
78
+ info!("Shutdown complete. Calling reload exec");
79
+ self.server_config.reload_exec()?;
80
+ Ok(())
81
+ }
82
+ LifecycleEvent::Reload => {
83
+ let should_reexec = self.server_config.clone().reload(true)?;
84
+ if should_reexec {
85
+ self.server_config.dup_fds()?;
86
+ self.shutdown().await.ok();
87
+ self.server_config.reload_exec()?;
88
+ }
89
+ let mut workers_to_load = self.server_config.server_params.read().workers;
90
+ let mut next_workers = Vec::new();
91
+ for worker in self.process_workers.lock().drain(..) {
92
+ if workers_to_load == 0 {
93
+ worker.graceful_shutdown(self.clone()).await
94
+ } else {
95
+ workers_to_load -= 1;
96
+ worker.reboot(self.clone()).await?;
97
+ next_workers.push(worker);
98
+ }
99
+ }
100
+ self.process_workers.lock().extend(next_workers);
101
+ while workers_to_load > 0 {
102
+ let mut workers = self.process_workers.lock();
103
+ let worker = ProcessWorker {
104
+ worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
105
+ ..Default::default()
106
+ };
107
+ let worker_clone = worker.clone();
108
+ let self_clone = self.clone();
109
+ create_ruby_thread(move || {
110
+ call_without_gvl(move || {
111
+ worker_clone.boot(self_clone).ok();
112
+ })
113
+ });
114
+ workers.push(worker);
115
+ workers_to_load -= 1
83
116
  }
84
117
  Ok(())
85
118
  }
@@ -106,8 +139,10 @@ impl ClusterMode {
106
139
  };
107
140
  if let Some(dropped_worker) = worker {
108
141
  dropped_worker.request_shutdown();
109
- let force_kill_time =
110
- Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
142
+ let force_kill_time = Instant::now()
143
+ + Duration::from_secs_f64(
144
+ self.server_config.server_params.read().shutdown_timeout,
145
+ );
111
146
  while dropped_worker.is_alive() && force_kill_time > Instant::now() {
112
147
  tokio::time::sleep(Duration::from_millis(100)).await;
113
148
  }
@@ -121,13 +156,20 @@ impl ClusterMode {
121
156
  for worker in self.process_workers.lock().iter() {
122
157
  worker.force_kill();
123
158
  }
159
+ error!("Force shutdown!");
124
160
  unsafe { exit(0) };
125
161
  }
162
+ LifecycleEvent::ChildTerminated => {
163
+ CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
164
+ i.send(()).ok();
165
+ });
166
+ Ok(())
167
+ }
126
168
  }
127
169
  }
128
170
 
129
171
  pub async fn shutdown(&self) -> Result<()> {
130
- let shutdown_timeout = self.server.shutdown_timeout;
172
+ let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
131
173
  let workers = self.process_workers.lock().clone();
132
174
 
133
175
  workers.iter().for_each(|worker| worker.request_shutdown());
@@ -152,7 +194,7 @@ impl ClusterMode {
152
194
 
153
195
  tokio::select! {
154
196
  _ = monitor_handle => {
155
- info!("All children exited early, exit normally")
197
+ debug!("All children exited early, exit normally")
156
198
  }
157
199
  _ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
158
200
  warn!("Graceful shutdown timeout reached, force killing remaining children");
@@ -160,37 +202,76 @@ impl ClusterMode {
160
202
  }
161
203
  }
162
204
 
163
- Err(ItsiError::Break())
205
+ Err(ItsiError::Break)
164
206
  }
165
207
 
166
- pub fn receive_signal(signal: i32) {
167
- match signal {
168
- libc::SIGCHLD => {
169
- CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
170
- i.send(()).ok();
171
- });
172
- }
173
- _ => {
174
- // Handle other signals
208
+ pub async fn print_info(self: Arc<Self>) -> Result<()> {
209
+ println!("Itsi Cluster Info:");
210
+ println!("Master PID: {:?}", Pid::this());
211
+ if let Some(memory_limit) = self.server_config.server_params.read().worker_memory_limit {
212
+ println!("Worker Memory Limit: {}", memory_limit);
213
+ }
214
+
215
+ if self.server_config.watcher_fd.is_some() {
216
+ println!("File Watcher Enabled: true",);
217
+ if let Some(watchers) = self
218
+ .server_config
219
+ .server_params
220
+ .read()
221
+ .notify_watchers
222
+ .as_ref()
223
+ {
224
+ for watcher in watchers {
225
+ println!(
226
+ "Watching path: {} => {}",
227
+ watcher.0,
228
+ watcher
229
+ .1
230
+ .iter()
231
+ .map(|path| path.join(","))
232
+ .collect::<Vec<String>>()
233
+ .join(" ")
234
+ );
235
+ }
175
236
  }
176
237
  }
238
+ println!(
239
+ "Silent Mode: {}",
240
+ self.server_config.server_params.read().silence
241
+ );
242
+ println!(
243
+ "Preload: {}",
244
+ self.server_config.server_params.read().preload
245
+ );
246
+ let workers = self.process_workers.lock().clone();
247
+ for worker in workers {
248
+ worker.print_info()?;
249
+ sleep(Duration::from_millis(50)).await;
250
+ }
251
+ Ok(())
177
252
  }
178
253
 
179
254
  pub fn stop(&self) -> Result<()> {
180
- unsafe { libc::signal(libc::SIGCHLD, libc::SIG_DFL) };
181
-
182
255
  for worker in self.process_workers.lock().iter() {
183
256
  if worker.is_alive() {
184
257
  worker.force_kill();
185
258
  }
186
259
  }
187
-
188
260
  Ok(())
189
261
  }
190
262
 
191
263
  #[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
192
264
  pub fn run(self: Arc<Self>) -> Result<()> {
193
265
  info!("Starting in Cluster mode");
266
+ if let Some(proc) = self
267
+ .server_config
268
+ .server_params
269
+ .read()
270
+ .hooks
271
+ .get("before_fork")
272
+ {
273
+ call_with_gvl(|_| call_proc_and_log_errors(proc.clone()))
274
+ }
194
275
  self.process_workers
195
276
  .lock()
196
277
  .iter()
@@ -199,8 +280,6 @@ impl ClusterMode {
199
280
  let (sender, mut receiver) = watch::channel(());
200
281
  *CHILD_SIGNAL_SENDER.lock() = Some(sender);
201
282
 
202
- unsafe { libc::signal(libc::SIGCHLD, Self::receive_signal as usize) };
203
-
204
283
  let mut lifecycle_rx = self.lifecycle_channel.subscribe();
205
284
  let self_ref = self.clone();
206
285
 
@@ -212,14 +291,15 @@ impl ClusterMode {
212
291
  _ = receiver.changed() => {
213
292
  let mut workers = self_ref.process_workers.lock();
214
293
  workers.retain(|worker| {
215
- worker.boot_if_dead(Arc::clone(&self_ref))
294
+ worker.boot_if_dead(self_ref.clone())
216
295
  });
217
296
  if workers.is_empty() {
218
297
  warn!("No workers running. Send SIGTTIN to increase worker count");
219
298
  }
220
299
  }
221
300
  _ = memory_check_interval.tick() => {
222
- if let Some(memory_limit) = self_ref.server.worker_memory_limit {
301
+ let worker_memory_limit = self_ref.server_config.server_params.read().worker_memory_limit;
302
+ if let Some(memory_limit) = worker_memory_limit {
223
303
  let largest_worker = {
224
304
  let workers = self_ref.process_workers.lock();
225
305
  workers.iter().max_by(|wa, wb| wa.memory_usage().cmp(&wb.memory_usage())).cloned()
@@ -228,6 +308,9 @@ impl ClusterMode {
228
308
  if let Some(current_mem_usage) = largest_worker.memory_usage(){
229
309
  if current_mem_usage > memory_limit {
230
310
  largest_worker.reboot(self_ref.clone()).await.ok();
311
+ if let Some(hook) = self_ref.server_config.server_params.read().hooks.get("after_memory_threshold_reached") {
312
+ call_with_gvl(|_| hook.call::<_, Value>((largest_worker.pid(),)).ok() );
313
+ }
231
314
  }
232
315
  }
233
316
  }
@@ -237,7 +320,7 @@ impl ClusterMode {
237
320
  Ok(lifecycle_event) => {
238
321
  if let Err(e) = self_ref.clone().handle_lifecycle_event(lifecycle_event).await{
239
322
  match e {
240
- ItsiError::Break() => break,
323
+ ItsiError::Break => break,
241
324
  _ => error!("Error in handle_lifecycle_event {:?}", e)
242
325
  }
243
326
  }
@@ -248,7 +331,12 @@ impl ClusterMode {
248
331
  }
249
332
  }
250
333
  });
251
-
334
+ self.server_config
335
+ .server_params
336
+ .write()
337
+ .listeners
338
+ .lock()
339
+ .drain(..);
252
340
  Ok(())
253
341
  }
254
342
  }
@@ -1,27 +1,30 @@
1
+ use std::sync::Arc;
2
+
1
3
  use cluster_mode::ClusterMode;
2
4
  use itsi_error::Result;
3
5
  use single_mode::SingleMode;
4
- use std::sync::Arc;
6
+
5
7
  pub mod cluster_mode;
6
8
  pub mod single_mode;
7
9
 
10
+ #[derive(Clone)]
8
11
  pub(crate) enum ServeStrategy {
9
12
  Single(Arc<SingleMode>),
10
13
  Cluster(Arc<ClusterMode>),
11
14
  }
12
15
 
13
16
  impl ServeStrategy {
14
- pub fn run(&self) -> Result<()> {
17
+ pub fn run(self) -> Result<()> {
15
18
  match self {
16
- ServeStrategy::Single(single_router) => single_router.clone().run(),
17
- ServeStrategy::Cluster(cluster_router) => cluster_router.clone().run(),
19
+ ServeStrategy::Single(single_router) => single_router.run(),
20
+ ServeStrategy::Cluster(cluster_router) => cluster_router.run(),
18
21
  }
19
22
  }
20
23
 
21
24
  pub(crate) fn stop(&self) -> Result<()> {
22
25
  match self {
23
- ServeStrategy::Single(single_router) => single_router.clone().stop(),
24
- ServeStrategy::Cluster(cluster_router) => cluster_router.clone().stop(),
26
+ ServeStrategy::Single(single_router) => single_router.stop(),
27
+ ServeStrategy::Cluster(cluster_router) => cluster_router.stop(),
25
28
  }
26
29
  }
27
30
  }