itsi 0.1.8 → 0.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +11 -2
- data/Rakefile +6 -2
- data/crates/itsi_rb_helpers/src/lib.rs +27 -4
- data/crates/itsi_server/Cargo.toml +4 -1
- data/crates/itsi_server/src/lib.rs +74 -1
- data/crates/itsi_server/src/request/itsi_request.rs +32 -11
- data/crates/itsi_server/src/response/itsi_response.rs +14 -4
- data/crates/itsi_server/src/server/bind.rs +16 -12
- data/crates/itsi_server/src/server/itsi_server.rs +146 -95
- data/crates/itsi_server/src/server/listener.rs +10 -10
- data/crates/itsi_server/src/server/process_worker.rs +10 -3
- data/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
- data/crates/itsi_server/src/server/serve_strategy/single_mode.rs +134 -115
- data/crates/itsi_server/src/server/signal.rs +4 -0
- data/crates/itsi_server/src/server/thread_worker.rs +55 -24
- data/crates/itsi_server/src/server/tls.rs +11 -8
- data/crates/itsi_tracing/src/lib.rs +18 -1
- data/gems/scheduler/Cargo.lock +12 -12
- data/gems/scheduler/ext/itsi_rb_helpers/src/lib.rs +27 -4
- data/gems/scheduler/ext/itsi_server/Cargo.toml +4 -1
- data/gems/scheduler/ext/itsi_server/src/lib.rs +74 -1
- data/gems/scheduler/ext/itsi_server/src/request/itsi_request.rs +32 -11
- data/gems/scheduler/ext/itsi_server/src/response/itsi_response.rs +14 -4
- data/gems/scheduler/ext/itsi_server/src/server/bind.rs +16 -12
- data/gems/scheduler/ext/itsi_server/src/server/itsi_server.rs +146 -95
- data/gems/scheduler/ext/itsi_server/src/server/listener.rs +10 -10
- data/gems/scheduler/ext/itsi_server/src/server/process_worker.rs +10 -3
- data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
- data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/single_mode.rs +134 -115
- data/gems/scheduler/ext/itsi_server/src/server/signal.rs +4 -0
- data/gems/scheduler/ext/itsi_server/src/server/thread_worker.rs +55 -24
- data/gems/scheduler/ext/itsi_server/src/server/tls.rs +11 -8
- data/gems/scheduler/ext/itsi_tracing/src/lib.rs +18 -1
- data/gems/scheduler/lib/itsi/scheduler/version.rb +1 -1
- data/gems/scheduler/test/test_address_resolve.rb +0 -1
- data/gems/scheduler/test/test_file_io.rb +0 -1
- data/gems/scheduler/test/test_kernel_sleep.rb +3 -4
- data/gems/server/Cargo.lock +11 -2
- data/gems/server/Rakefile +8 -1
- data/gems/server/exe/itsi +53 -23
- data/gems/server/ext/itsi_rb_helpers/src/lib.rs +27 -4
- data/gems/server/ext/itsi_server/Cargo.toml +4 -1
- data/gems/server/ext/itsi_server/src/lib.rs +74 -1
- data/gems/server/ext/itsi_server/src/request/itsi_request.rs +32 -11
- data/gems/server/ext/itsi_server/src/response/itsi_response.rs +14 -4
- data/gems/server/ext/itsi_server/src/server/bind.rs +16 -12
- data/gems/server/ext/itsi_server/src/server/itsi_server.rs +146 -95
- data/gems/server/ext/itsi_server/src/server/listener.rs +10 -10
- data/gems/server/ext/itsi_server/src/server/process_worker.rs +10 -3
- data/gems/server/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
- data/gems/server/ext/itsi_server/src/server/serve_strategy/single_mode.rs +134 -115
- data/gems/server/ext/itsi_server/src/server/signal.rs +4 -0
- data/gems/server/ext/itsi_server/src/server/thread_worker.rs +55 -24
- data/gems/server/ext/itsi_server/src/server/tls.rs +11 -8
- data/gems/server/ext/itsi_tracing/src/lib.rs +18 -1
- data/gems/server/lib/itsi/request.rb +29 -21
- data/gems/server/lib/itsi/server/Itsi.rb +127 -0
- data/gems/server/lib/itsi/server/config.rb +36 -0
- data/gems/server/lib/itsi/server/options_dsl.rb +401 -0
- data/gems/server/lib/itsi/server/rack/handler/itsi.rb +18 -7
- data/gems/server/lib/itsi/server/rack_interface.rb +75 -0
- data/gems/server/lib/itsi/server/scheduler_interface.rb +21 -0
- data/gems/server/lib/itsi/server/signal_trap.rb +23 -0
- data/gems/server/lib/itsi/server/version.rb +1 -1
- data/gems/server/lib/itsi/server.rb +71 -101
- data/gems/server/test/helpers/test_helper.rb +30 -0
- data/gems/server/test/test_itsi_server.rb +294 -3
- data/lib/itsi/version.rb +1 -1
- data/location_dsl.rb +381 -0
- data/sandbox/deploy/main.tf +1 -0
- data/sandbox/itsi_itsi_file/Itsi.rb +119 -0
- data/sandbox/itsi_sandbox_async/Gemfile +1 -1
- data/sandbox/itsi_sandbox_rack/Gemfile.lock +2 -2
- data/sandbox/itsi_sandbox_rails/Gemfile.lock +2 -2
- data/tasks.txt +25 -8
- metadata +21 -14
- data/gems/server/lib/itsi/signals.rb +0 -23
- data/gems/server/test/test_helper.rb +0 -7
- /data/gems/server/lib/itsi/{index.html.erb → index.html} +0 -0
@@ -2,21 +2,23 @@ use super::{
|
|
2
2
|
bind::Bind,
|
3
3
|
listener::Listener,
|
4
4
|
serve_strategy::{cluster_mode::ClusterMode, single_mode::SingleMode},
|
5
|
-
signal::{
|
5
|
+
signal::{
|
6
|
+
clear_signal_handlers, reset_signal_handlers, send_shutdown_event, SIGNAL_HANDLER_CHANNEL,
|
7
|
+
},
|
6
8
|
};
|
7
9
|
use crate::{request::itsi_request::ItsiRequest, server::serve_strategy::ServeStrategy};
|
8
10
|
use derive_more::Debug;
|
9
|
-
use itsi_rb_helpers::call_without_gvl;
|
10
|
-
use itsi_tracing::error;
|
11
|
+
use itsi_rb_helpers::{call_without_gvl, HeapVal, HeapValue};
|
12
|
+
use itsi_tracing::{error, run_silently};
|
11
13
|
use magnus::{
|
12
14
|
block::Proc,
|
13
15
|
error::Result,
|
14
|
-
scan_args::{get_kwargs, scan_args, Args, KwArgs},
|
15
|
-
value::
|
16
|
-
RHash, Ruby, Symbol, Value,
|
16
|
+
scan_args::{get_kwargs, scan_args, Args, KwArgs, ScanArgsKw, ScanArgsOpt, ScanArgsRequired},
|
17
|
+
value::ReprValue,
|
18
|
+
ArgList, RArray, RHash, Ruby, Symbol, Value,
|
17
19
|
};
|
18
|
-
use parking_lot::Mutex;
|
19
|
-
use std::{cmp::max, ops::Deref, sync::Arc};
|
20
|
+
use parking_lot::{Mutex, RwLock};
|
21
|
+
use std::{cmp::max, collections::HashMap, ops::Deref, sync::Arc};
|
20
22
|
use tracing::{info, instrument};
|
21
23
|
|
22
24
|
static DEFAULT_BIND: &str = "http://localhost:3000";
|
@@ -34,12 +36,11 @@ impl Deref for Server {
|
|
34
36
|
&self.config
|
35
37
|
}
|
36
38
|
}
|
37
|
-
type AfterFork = Mutex<Arc<Option<Box<dyn Fn() + Send + Sync>>>>;
|
38
39
|
|
39
40
|
#[derive(Debug)]
|
40
41
|
pub struct ServerConfig {
|
41
42
|
#[debug(skip)]
|
42
|
-
pub app:
|
43
|
+
pub app: HeapVal,
|
43
44
|
#[allow(unused)]
|
44
45
|
pub workers: u8,
|
45
46
|
#[allow(unused)]
|
@@ -49,12 +50,14 @@ pub struct ServerConfig {
|
|
49
50
|
pub script_name: String,
|
50
51
|
pub(crate) binds: Mutex<Vec<Bind>>,
|
51
52
|
#[debug(skip)]
|
52
|
-
pub
|
53
|
-
#[debug(skip)]
|
54
|
-
pub after_fork: AfterFork,
|
53
|
+
pub hooks: HashMap<String, HeapValue<Proc>>,
|
55
54
|
pub scheduler_class: Option<String>,
|
56
55
|
pub stream_body: Option<bool>,
|
57
56
|
pub worker_memory_limit: Option<u64>,
|
57
|
+
#[debug(skip)]
|
58
|
+
pub(crate) strategy: RwLock<Option<ServeStrategy>>,
|
59
|
+
pub silence: bool,
|
60
|
+
pub oob_gc_responses_threshold: Option<u64>,
|
58
61
|
}
|
59
62
|
|
60
63
|
#[derive(Debug)]
|
@@ -63,6 +66,30 @@ pub enum RequestJob {
|
|
63
66
|
Shutdown,
|
64
67
|
}
|
65
68
|
|
69
|
+
fn extract_args<Req, Opt, Splat>(
|
70
|
+
scan_args: &Args<(), (), (), (), RHash, ()>,
|
71
|
+
primaries: &[&str],
|
72
|
+
rest: &[&str],
|
73
|
+
) -> Result<KwArgs<Req, Opt, Splat>>
|
74
|
+
where
|
75
|
+
Req: ScanArgsRequired,
|
76
|
+
Opt: ScanArgsOpt,
|
77
|
+
Splat: ScanArgsKw,
|
78
|
+
{
|
79
|
+
let symbols: Vec<Symbol> = primaries
|
80
|
+
.iter()
|
81
|
+
.chain(rest.iter())
|
82
|
+
.map(|&name| Symbol::new(name))
|
83
|
+
.collect();
|
84
|
+
|
85
|
+
let hash = scan_args
|
86
|
+
.keywords
|
87
|
+
.funcall::<_, _, RHash>("slice", symbols.into_arg_list_with(&Ruby::get().unwrap()))
|
88
|
+
.unwrap();
|
89
|
+
|
90
|
+
get_kwargs(hash, primaries, rest)
|
91
|
+
}
|
92
|
+
|
66
93
|
impl Server {
|
67
94
|
#[instrument(
|
68
95
|
name = "Itsi",
|
@@ -73,39 +100,44 @@ impl Server {
|
|
73
100
|
pub fn new(args: &[Value]) -> Result<Self> {
|
74
101
|
let scan_args: Args<(), (), (), (), RHash, ()> = scan_args(args)?;
|
75
102
|
|
76
|
-
type
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
103
|
+
type Args1 = KwArgs<
|
104
|
+
(Value,),
|
105
|
+
(
|
106
|
+
// Workers
|
107
|
+
Option<u8>,
|
108
|
+
// Threads
|
109
|
+
Option<u8>,
|
110
|
+
// Shutdown Timeout
|
111
|
+
Option<f64>,
|
112
|
+
// Script Name
|
113
|
+
Option<String>,
|
114
|
+
// Binds
|
115
|
+
Option<Vec<String>>,
|
116
|
+
// Stream Body
|
117
|
+
Option<bool>,
|
118
|
+
),
|
119
|
+
(),
|
120
|
+
>;
|
121
|
+
|
122
|
+
type Args2 = KwArgs<
|
123
|
+
(),
|
124
|
+
(
|
125
|
+
// Hooks
|
126
|
+
Option<RHash>,
|
127
|
+
// Scheduler Class
|
128
|
+
Option<String>,
|
129
|
+
// Worker Memory Limit
|
130
|
+
Option<u64>,
|
131
|
+
// Out-of-band GC Responses Threshold
|
132
|
+
Option<u64>,
|
133
|
+
// Silence
|
134
|
+
Option<bool>,
|
135
|
+
),
|
136
|
+
(),
|
137
|
+
>;
|
138
|
+
|
139
|
+
let args1: Args1 = extract_args(
|
140
|
+
&scan_args,
|
109
141
|
&["app"],
|
110
142
|
&[
|
111
143
|
"workers",
|
@@ -113,24 +145,43 @@ impl Server {
|
|
113
145
|
"shutdown_timeout",
|
114
146
|
"script_name",
|
115
147
|
"binds",
|
116
|
-
"before_fork",
|
117
|
-
"after_fork",
|
118
|
-
"scheduler_class",
|
119
148
|
"stream_body",
|
120
149
|
],
|
121
150
|
)?;
|
122
151
|
|
123
|
-
let args2:
|
124
|
-
scan_args
|
125
|
-
.keywords
|
126
|
-
.funcall::<_, _, RHash>("slice", (Symbol::new("worker_memory_limit"),))
|
127
|
-
.unwrap(),
|
152
|
+
let args2: Args2 = extract_args(
|
153
|
+
&scan_args,
|
128
154
|
&[],
|
129
|
-
&[
|
155
|
+
&[
|
156
|
+
"hooks",
|
157
|
+
"scheduler_class",
|
158
|
+
"worker_memory_limit",
|
159
|
+
"oob_gc_responses_threshold",
|
160
|
+
"silence",
|
161
|
+
],
|
130
162
|
)?;
|
131
163
|
|
164
|
+
let hooks = args2
|
165
|
+
.optional
|
166
|
+
.0
|
167
|
+
.map(|rhash| -> Result<HashMap<String, HeapValue<Proc>>> {
|
168
|
+
let mut hook_map: HashMap<String, HeapValue<Proc>> = HashMap::new();
|
169
|
+
for pair in rhash.enumeratorize::<_, ()>("each", ()) {
|
170
|
+
if let Some(pair_value) = RArray::from_value(pair?) {
|
171
|
+
if let (Ok(key), Ok(value)) =
|
172
|
+
(pair_value.entry::<Value>(0), pair_value.entry::<Proc>(1))
|
173
|
+
{
|
174
|
+
hook_map.insert(key.to_string(), HeapValue::from(value));
|
175
|
+
}
|
176
|
+
}
|
177
|
+
}
|
178
|
+
Ok(hook_map)
|
179
|
+
})
|
180
|
+
.transpose()?
|
181
|
+
.unwrap_or_default();
|
182
|
+
|
132
183
|
let config = ServerConfig {
|
133
|
-
app:
|
184
|
+
app: HeapVal::from(args1.required.0),
|
134
185
|
workers: max(args1.optional.0.unwrap_or(1), 1),
|
135
186
|
threads: max(args1.optional.1.unwrap_or(1), 1),
|
136
187
|
shutdown_timeout: args1.optional.2.unwrap_or(5.0),
|
@@ -144,33 +195,21 @@ impl Server {
|
|
144
195
|
.map(|s| s.parse())
|
145
196
|
.collect::<itsi_error::Result<Vec<Bind>>>()?,
|
146
197
|
),
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
}) as Box<dyn FnOnce() + Send + Sync>
|
155
|
-
})),
|
156
|
-
after_fork: Mutex::new(Arc::new(args1.optional.6.map(|p| {
|
157
|
-
let opaque_proc = Opaque::from(p);
|
158
|
-
Box::new(move || {
|
159
|
-
opaque_proc
|
160
|
-
.get_inner_with(&Ruby::get().unwrap())
|
161
|
-
.call::<_, Value>(())
|
162
|
-
.unwrap();
|
163
|
-
}) as Box<dyn Fn() + Send + Sync>
|
164
|
-
}))),
|
165
|
-
scheduler_class: args1.optional.7.clone(),
|
166
|
-
stream_body: args1.optional.8,
|
167
|
-
worker_memory_limit: args2.optional.0,
|
198
|
+
stream_body: args1.optional.5,
|
199
|
+
hooks,
|
200
|
+
scheduler_class: args2.optional.1.clone(),
|
201
|
+
worker_memory_limit: args2.optional.2,
|
202
|
+
strategy: RwLock::new(None),
|
203
|
+
oob_gc_responses_threshold: args2.optional.3,
|
204
|
+
silence: args2.optional.4.is_some_and(|s| s),
|
168
205
|
};
|
169
206
|
|
170
|
-
if
|
171
|
-
|
172
|
-
|
173
|
-
|
207
|
+
if !config.silence {
|
208
|
+
if let Some(scheduler_class) = args2.optional.1 {
|
209
|
+
info!(scheduler_class, fiber_scheduler = true);
|
210
|
+
} else {
|
211
|
+
info!(fiber_scheduler = false);
|
212
|
+
}
|
174
213
|
}
|
175
214
|
|
176
215
|
Ok(Server {
|
@@ -179,7 +218,7 @@ impl Server {
|
|
179
218
|
}
|
180
219
|
|
181
220
|
#[instrument(name = "Bind", skip_all, fields(binds=format!("{:?}", self.config.binds.lock())))]
|
182
|
-
pub(crate) fn
|
221
|
+
pub(crate) fn build_listeners(&self) -> Result<Vec<Listener>> {
|
183
222
|
let listeners = self
|
184
223
|
.config
|
185
224
|
.binds
|
@@ -189,17 +228,15 @@ impl Server {
|
|
189
228
|
.map(Listener::try_from)
|
190
229
|
.collect::<std::result::Result<Vec<Listener>, _>>()?
|
191
230
|
.into_iter()
|
192
|
-
.map(Arc::new)
|
193
231
|
.collect::<Vec<_>>();
|
194
232
|
info!("Bound {:?} listeners", listeners.len());
|
195
|
-
Ok(
|
233
|
+
Ok(listeners)
|
196
234
|
}
|
197
235
|
|
198
|
-
pub(crate) fn build_strategy(
|
199
|
-
self
|
200
|
-
listeners: Arc<Vec<Arc<Listener>>>,
|
201
|
-
) -> Result<ServeStrategy> {
|
236
|
+
pub(crate) fn build_strategy(self) -> Result<()> {
|
237
|
+
let listeners = self.build_listeners()?;
|
202
238
|
let server = Arc::new(self);
|
239
|
+
let server_clone = server.clone();
|
203
240
|
|
204
241
|
let strategy = if server.config.workers == 1 {
|
205
242
|
ServeStrategy::Single(Arc::new(SingleMode::new(
|
@@ -214,24 +251,38 @@ impl Server {
|
|
214
251
|
SIGNAL_HANDLER_CHANNEL.0.clone(),
|
215
252
|
)))
|
216
253
|
};
|
217
|
-
|
254
|
+
|
255
|
+
*server_clone.strategy.write() = Some(strategy);
|
256
|
+
Ok(())
|
257
|
+
}
|
258
|
+
|
259
|
+
pub fn stop(&self) -> Result<()> {
|
260
|
+
send_shutdown_event();
|
261
|
+
Ok(())
|
218
262
|
}
|
219
263
|
|
220
264
|
pub fn start(&self) -> Result<()> {
|
265
|
+
if self.silence {
|
266
|
+
run_silently(|| self.build_and_run_strategy())
|
267
|
+
} else {
|
268
|
+
self.build_and_run_strategy()
|
269
|
+
}
|
270
|
+
}
|
271
|
+
|
272
|
+
fn build_and_run_strategy(&self) -> Result<()> {
|
221
273
|
reset_signal_handlers();
|
222
274
|
let rself = self.clone();
|
223
|
-
let listeners = self.listeners()?;
|
224
|
-
let listeners_clone = listeners.clone();
|
225
275
|
call_without_gvl(move || -> Result<()> {
|
226
|
-
|
227
|
-
if let Err(e) = strategy.run() {
|
276
|
+
rself.clone().build_strategy()?;
|
277
|
+
if let Err(e) = rself.strategy.read().as_ref().unwrap().run() {
|
228
278
|
error!("Error running server: {}", e);
|
229
|
-
strategy.stop()?;
|
279
|
+
rself.strategy.read().as_ref().unwrap().stop()?;
|
230
280
|
}
|
231
|
-
drop(strategy);
|
232
281
|
Ok(())
|
233
282
|
})?;
|
234
283
|
clear_signal_handlers();
|
284
|
+
self.strategy.write().take();
|
285
|
+
info!("Server stopped");
|
235
286
|
Ok(())
|
236
287
|
}
|
237
288
|
}
|
@@ -117,7 +117,7 @@ impl TokioListener {
|
|
117
117
|
tokio::select! {
|
118
118
|
stream_event = StreamExt::next(&mut *state) => {
|
119
119
|
match stream_event {
|
120
|
-
Some(event) => info!("
|
120
|
+
Some(event) => info!("ACME Event: {:?}", event),
|
121
121
|
None => error!("Received no acme event"),
|
122
122
|
}
|
123
123
|
},
|
@@ -243,20 +243,20 @@ impl std::fmt::Display for SockAddr {
|
|
243
243
|
}
|
244
244
|
|
245
245
|
impl Listener {
|
246
|
-
pub fn
|
246
|
+
pub fn into_tokio_listener(self) -> TokioListener {
|
247
247
|
match self {
|
248
|
-
Listener::Tcp(listener) =>
|
249
|
-
TokioTcpListener::from_std(
|
250
|
-
|
248
|
+
Listener::Tcp(listener) => {
|
249
|
+
TokioListener::Tcp(TokioTcpListener::from_std(listener).unwrap())
|
250
|
+
}
|
251
251
|
Listener::TcpTls((listener, acceptor)) => TokioListener::TcpTls(
|
252
|
-
TokioTcpListener::from_std(
|
252
|
+
TokioTcpListener::from_std(listener).unwrap(),
|
253
253
|
acceptor.clone(),
|
254
254
|
),
|
255
|
-
Listener::Unix(listener) =>
|
256
|
-
TokioUnixListener::from_std(
|
257
|
-
|
255
|
+
Listener::Unix(listener) => {
|
256
|
+
TokioListener::Unix(TokioUnixListener::from_std(listener).unwrap())
|
257
|
+
}
|
258
258
|
Listener::UnixTls((listener, acceptor)) => TokioListener::UnixTls(
|
259
|
-
TokioUnixListener::from_std(
|
259
|
+
TokioUnixListener::from_std(listener).unwrap(),
|
260
260
|
acceptor.clone(),
|
261
261
|
),
|
262
262
|
}
|
@@ -53,8 +53,8 @@ impl ProcessWorker {
|
|
53
53
|
}
|
54
54
|
*self.child_pid.lock() = None;
|
55
55
|
}
|
56
|
-
|
57
|
-
|
56
|
+
match call_with_gvl(|_ruby| fork(cluster_template.server.hooks.get("after_fork").cloned()))
|
57
|
+
{
|
58
58
|
Some(pid) => {
|
59
59
|
*self.child_pid.lock() = Some(Pid::from_raw(pid));
|
60
60
|
}
|
@@ -67,7 +67,7 @@ impl ProcessWorker {
|
|
67
67
|
}
|
68
68
|
match SingleMode::new(
|
69
69
|
cluster_template.server.clone(),
|
70
|
-
cluster_template.listeners.
|
70
|
+
cluster_template.listeners.lock().drain(..).collect(),
|
71
71
|
cluster_template.lifecycle_channel.clone(),
|
72
72
|
) {
|
73
73
|
Ok(single_mode) => {
|
@@ -83,6 +83,13 @@ impl ProcessWorker {
|
|
83
83
|
Ok(())
|
84
84
|
}
|
85
85
|
|
86
|
+
pub fn pid(&self) -> i32 {
|
87
|
+
if let Some(pid) = *self.child_pid.lock() {
|
88
|
+
return pid.as_raw();
|
89
|
+
}
|
90
|
+
0
|
91
|
+
}
|
92
|
+
|
86
93
|
pub(crate) fn memory_usage(&self) -> Option<u64> {
|
87
94
|
if let Some(pid) = *self.child_pid.lock() {
|
88
95
|
let s = System::new_all();
|
@@ -3,8 +3,11 @@ use crate::server::{
|
|
3
3
|
process_worker::ProcessWorker,
|
4
4
|
};
|
5
5
|
use itsi_error::{ItsiError, Result};
|
6
|
-
use itsi_rb_helpers::{
|
6
|
+
use itsi_rb_helpers::{
|
7
|
+
call_proc_and_log_errors, call_with_gvl, call_without_gvl, create_ruby_thread,
|
8
|
+
};
|
7
9
|
use itsi_tracing::{error, info, warn};
|
10
|
+
use magnus::Value;
|
8
11
|
use nix::{
|
9
12
|
libc::{self, exit},
|
10
13
|
unistd::Pid,
|
@@ -19,9 +22,9 @@ use tokio::{
|
|
19
22
|
sync::{broadcast, watch, Mutex},
|
20
23
|
time::{self, sleep},
|
21
24
|
};
|
22
|
-
use tracing::instrument;
|
25
|
+
use tracing::{debug, instrument};
|
23
26
|
pub(crate) struct ClusterMode {
|
24
|
-
pub listeners:
|
27
|
+
pub listeners: parking_lot::Mutex<Vec<Listener>>,
|
25
28
|
pub server: Arc<Server>,
|
26
29
|
pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
|
27
30
|
pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
@@ -34,12 +37,9 @@ static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
|
|
34
37
|
impl ClusterMode {
|
35
38
|
pub fn new(
|
36
39
|
server: Arc<Server>,
|
37
|
-
listeners:
|
40
|
+
listeners: Vec<Listener>,
|
38
41
|
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
39
42
|
) -> Self {
|
40
|
-
if let Some(f) = server.before_fork.lock().take() {
|
41
|
-
f();
|
42
|
-
}
|
43
43
|
let process_workers = (0..server.workers)
|
44
44
|
.map(|_| ProcessWorker {
|
45
45
|
worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
@@ -48,7 +48,7 @@ impl ClusterMode {
|
|
48
48
|
.collect();
|
49
49
|
|
50
50
|
Self {
|
51
|
-
listeners,
|
51
|
+
listeners: parking_lot::Mutex::new(listeners),
|
52
52
|
server,
|
53
53
|
process_workers: parking_lot::Mutex::new(process_workers),
|
54
54
|
lifecycle_channel,
|
@@ -152,7 +152,7 @@ impl ClusterMode {
|
|
152
152
|
|
153
153
|
tokio::select! {
|
154
154
|
_ = monitor_handle => {
|
155
|
-
|
155
|
+
debug!("All children exited early, exit normally")
|
156
156
|
}
|
157
157
|
_ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
|
158
158
|
warn!("Graceful shutdown timeout reached, force killing remaining children");
|
@@ -191,6 +191,9 @@ impl ClusterMode {
|
|
191
191
|
#[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
|
192
192
|
pub fn run(self: Arc<Self>) -> Result<()> {
|
193
193
|
info!("Starting in Cluster mode");
|
194
|
+
if let Some(proc) = self.server.hooks.get("before_fork") {
|
195
|
+
call_with_gvl(|_| call_proc_and_log_errors(proc.clone()))
|
196
|
+
}
|
194
197
|
self.process_workers
|
195
198
|
.lock()
|
196
199
|
.iter()
|
@@ -228,6 +231,9 @@ impl ClusterMode {
|
|
228
231
|
if let Some(current_mem_usage) = largest_worker.memory_usage(){
|
229
232
|
if current_mem_usage > memory_limit {
|
230
233
|
largest_worker.reboot(self_ref.clone()).await.ok();
|
234
|
+
if let Some(hook) = self_ref.server.hooks.get("after_memory_threshold_reached") {
|
235
|
+
call_with_gvl(|_| hook.call::<_, Value>((largest_worker.pid(),)).ok() );
|
236
|
+
}
|
231
237
|
}
|
232
238
|
}
|
233
239
|
}
|