itsi-scheduler 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +219 -23
- data/Rakefile +7 -1
- data/ext/itsi_error/Cargo.toml +2 -0
- data/ext/itsi_error/src/from.rs +70 -0
- data/ext/itsi_error/src/lib.rs +10 -37
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.toml +2 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +121 -0
- data/ext/itsi_rb_helpers/src/lib.rs +90 -10
- data/ext/itsi_scheduler/Cargo.toml +9 -1
- data/ext/itsi_scheduler/extconf.rb +1 -1
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
- data/ext/itsi_scheduler/src/lib.rs +31 -10
- data/ext/itsi_server/Cargo.toml +41 -0
- data/ext/itsi_server/extconf.rb +6 -0
- data/ext/itsi_server/src/body_proxy/big_bytes.rs +104 -0
- data/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +122 -0
- data/ext/itsi_server/src/body_proxy/mod.rs +2 -0
- data/ext/itsi_server/src/lib.rs +103 -0
- data/ext/itsi_server/src/request/itsi_request.rs +277 -0
- data/ext/itsi_server/src/request/mod.rs +1 -0
- data/ext/itsi_server/src/response/itsi_response.rs +347 -0
- data/ext/itsi_server/src/response/mod.rs +1 -0
- data/ext/itsi_server/src/server/bind.rs +168 -0
- data/ext/itsi_server/src/server/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/io_stream.rs +104 -0
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +13 -0
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +5 -0
- data/ext/itsi_server/src/server/itsi_server.rs +230 -0
- data/ext/itsi_server/src/server/lifecycle_event.rs +8 -0
- data/ext/itsi_server/src/server/listener.rs +259 -0
- data/ext/itsi_server/src/server/mod.rs +11 -0
- data/ext/itsi_server/src/server/process_worker.rs +196 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +253 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +27 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +238 -0
- data/ext/itsi_server/src/server/signal.rs +57 -0
- data/ext/itsi_server/src/server/thread_worker.rs +368 -0
- data/ext/itsi_server/src/server/tls.rs +152 -0
- data/ext/itsi_tracing/Cargo.toml +4 -0
- data/ext/itsi_tracing/src/lib.rs +36 -6
- data/lib/itsi/scheduler/version.rb +1 -1
- data/lib/itsi/scheduler.rb +137 -1
- metadata +38 -4
@@ -0,0 +1,230 @@
|
|
1
|
+
use super::{
|
2
|
+
bind::Bind,
|
3
|
+
listener::Listener,
|
4
|
+
serve_strategy::{cluster_mode::ClusterMode, single_mode::SingleMode},
|
5
|
+
signal::{reset_signal_handlers, SIGNAL_HANDLER_CHANNEL},
|
6
|
+
};
|
7
|
+
use crate::{request::itsi_request::ItsiRequest, server::serve_strategy::ServeStrategy};
|
8
|
+
use derive_more::Debug;
|
9
|
+
use itsi_rb_helpers::call_without_gvl;
|
10
|
+
use itsi_tracing::error;
|
11
|
+
use magnus::{
|
12
|
+
block::Proc,
|
13
|
+
error::Result,
|
14
|
+
scan_args::{get_kwargs, scan_args, Args, KwArgs},
|
15
|
+
value::{InnerValue, Opaque, ReprValue},
|
16
|
+
RHash, Ruby, Symbol, Value,
|
17
|
+
};
|
18
|
+
use parking_lot::Mutex;
|
19
|
+
use std::{cmp::max, ops::Deref, sync::Arc};
|
20
|
+
use tracing::{info, instrument};
|
21
|
+
|
22
|
+
static DEFAULT_BIND: &str = "localhost:3000";
|
23
|
+
|
24
|
+
#[magnus::wrap(class = "Itsi::Server", free_immediately, size)]
|
25
|
+
#[derive(Clone)]
|
26
|
+
pub struct Server {
|
27
|
+
pub config: Arc<ServerConfig>,
|
28
|
+
}
|
29
|
+
|
30
|
+
impl Deref for Server {
|
31
|
+
type Target = ServerConfig;
|
32
|
+
|
33
|
+
fn deref(&self) -> &Self::Target {
|
34
|
+
&self.config
|
35
|
+
}
|
36
|
+
}
|
37
|
+
type AfterFork = Mutex<Arc<Option<Box<dyn Fn() + Send + Sync>>>>;
|
38
|
+
|
39
|
+
#[derive(Debug)]
|
40
|
+
pub struct ServerConfig {
|
41
|
+
#[debug(skip)]
|
42
|
+
pub app: Opaque<Value>,
|
43
|
+
#[allow(unused)]
|
44
|
+
pub workers: u8,
|
45
|
+
#[allow(unused)]
|
46
|
+
pub threads: u8,
|
47
|
+
#[allow(unused)]
|
48
|
+
pub shutdown_timeout: f64,
|
49
|
+
pub script_name: String,
|
50
|
+
pub(crate) binds: Mutex<Vec<Bind>>,
|
51
|
+
#[debug(skip)]
|
52
|
+
pub before_fork: Mutex<Option<Box<dyn FnOnce() + Send + Sync>>>,
|
53
|
+
#[debug(skip)]
|
54
|
+
pub after_fork: AfterFork,
|
55
|
+
pub scheduler_class: Option<String>,
|
56
|
+
pub stream_body: Option<bool>,
|
57
|
+
pub worker_memory_limit: Option<u64>,
|
58
|
+
}
|
59
|
+
|
60
|
+
#[derive(Debug)]
|
61
|
+
pub enum RequestJob {
|
62
|
+
ProcessRequest(ItsiRequest),
|
63
|
+
Shutdown,
|
64
|
+
}
|
65
|
+
|
66
|
+
impl Server {
|
67
|
+
#[instrument(
|
68
|
+
name = "Itsi",
|
69
|
+
parent=None,
|
70
|
+
skip(args),
|
71
|
+
fields(workers = 1, threads = 1, shutdown_timeout = 5)
|
72
|
+
)]
|
73
|
+
pub fn new(args: &[Value]) -> Result<Self> {
|
74
|
+
let scan_args: Args<(), (), (), (), RHash, ()> = scan_args(args)?;
|
75
|
+
|
76
|
+
type ArgSet1 = (
|
77
|
+
Option<u8>,
|
78
|
+
Option<u8>,
|
79
|
+
Option<f64>,
|
80
|
+
Option<String>,
|
81
|
+
Option<Vec<String>>,
|
82
|
+
Option<Proc>,
|
83
|
+
Option<Proc>,
|
84
|
+
Option<String>,
|
85
|
+
Option<bool>,
|
86
|
+
);
|
87
|
+
|
88
|
+
type ArgSet2 = (Option<u64>,);
|
89
|
+
|
90
|
+
let args1: KwArgs<(Value,), ArgSet1, ()> = get_kwargs(
|
91
|
+
scan_args
|
92
|
+
.keywords
|
93
|
+
.funcall::<_, _, RHash>(
|
94
|
+
"slice",
|
95
|
+
(
|
96
|
+
Symbol::new("app"),
|
97
|
+
Symbol::new("workers"),
|
98
|
+
Symbol::new("threads"),
|
99
|
+
Symbol::new("shutdown_timeout"),
|
100
|
+
Symbol::new("script_name"),
|
101
|
+
Symbol::new("binds"),
|
102
|
+
Symbol::new("before_fork"),
|
103
|
+
Symbol::new("after_fork"),
|
104
|
+
Symbol::new("scheduler_class"),
|
105
|
+
Symbol::new("stream_body"),
|
106
|
+
),
|
107
|
+
)
|
108
|
+
.unwrap(),
|
109
|
+
&["app"],
|
110
|
+
&[
|
111
|
+
"workers",
|
112
|
+
"threads",
|
113
|
+
"shutdown_timeout",
|
114
|
+
"script_name",
|
115
|
+
"binds",
|
116
|
+
"before_fork",
|
117
|
+
"after_fork",
|
118
|
+
"scheduler_class",
|
119
|
+
"stream_body",
|
120
|
+
],
|
121
|
+
)?;
|
122
|
+
|
123
|
+
let args2: KwArgs<(), ArgSet2, ()> = get_kwargs(
|
124
|
+
scan_args
|
125
|
+
.keywords
|
126
|
+
.funcall::<_, _, RHash>("slice", (Symbol::new("worker_memory_limit"),))
|
127
|
+
.unwrap(),
|
128
|
+
&[],
|
129
|
+
&["worker_memory_limit"],
|
130
|
+
)?;
|
131
|
+
|
132
|
+
let config = ServerConfig {
|
133
|
+
app: Opaque::from(args1.required.0),
|
134
|
+
workers: max(args1.optional.0.unwrap_or(1), 1),
|
135
|
+
threads: max(args1.optional.1.unwrap_or(1), 1),
|
136
|
+
shutdown_timeout: args1.optional.2.unwrap_or(5.0),
|
137
|
+
script_name: args1.optional.3.unwrap_or("".to_string()),
|
138
|
+
binds: Mutex::new(
|
139
|
+
args1
|
140
|
+
.optional
|
141
|
+
.4
|
142
|
+
.unwrap_or_else(|| vec![DEFAULT_BIND.to_string()])
|
143
|
+
.into_iter()
|
144
|
+
.map(|s| s.parse())
|
145
|
+
.collect::<itsi_error::Result<Vec<Bind>>>()?,
|
146
|
+
),
|
147
|
+
before_fork: Mutex::new(args1.optional.5.map(|p| {
|
148
|
+
let opaque_proc = Opaque::from(p);
|
149
|
+
Box::new(move || {
|
150
|
+
opaque_proc
|
151
|
+
.get_inner_with(&Ruby::get().unwrap())
|
152
|
+
.call::<_, Value>(())
|
153
|
+
.unwrap();
|
154
|
+
}) as Box<dyn FnOnce() + Send + Sync>
|
155
|
+
})),
|
156
|
+
after_fork: Mutex::new(Arc::new(args1.optional.6.map(|p| {
|
157
|
+
let opaque_proc = Opaque::from(p);
|
158
|
+
Box::new(move || {
|
159
|
+
opaque_proc
|
160
|
+
.get_inner_with(&Ruby::get().unwrap())
|
161
|
+
.call::<_, Value>(())
|
162
|
+
.unwrap();
|
163
|
+
}) as Box<dyn Fn() + Send + Sync>
|
164
|
+
}))),
|
165
|
+
scheduler_class: args1.optional.7.clone(),
|
166
|
+
stream_body: args1.optional.8,
|
167
|
+
worker_memory_limit: args2.optional.0,
|
168
|
+
};
|
169
|
+
|
170
|
+
if let Some(scheduler_class) = args1.optional.7 {
|
171
|
+
info!(scheduler_class, fiber_scheduler = true);
|
172
|
+
} else {
|
173
|
+
info!(fiber_scheduler = false);
|
174
|
+
}
|
175
|
+
|
176
|
+
Ok(Server {
|
177
|
+
config: Arc::new(config),
|
178
|
+
})
|
179
|
+
}
|
180
|
+
|
181
|
+
#[instrument(name = "Bind", skip_all, fields(binds=format!("{:?}", self.config.binds.lock())))]
|
182
|
+
pub(crate) fn listeners(&self) -> Result<Arc<Vec<Arc<Listener>>>> {
|
183
|
+
let listeners = self
|
184
|
+
.config
|
185
|
+
.binds
|
186
|
+
.lock()
|
187
|
+
.iter()
|
188
|
+
.cloned()
|
189
|
+
.map(Listener::try_from)
|
190
|
+
.collect::<std::result::Result<Vec<Listener>, _>>()?
|
191
|
+
.into_iter()
|
192
|
+
.map(Arc::new)
|
193
|
+
.collect::<Vec<_>>();
|
194
|
+
info!("Bound {:?} listeners", listeners.len());
|
195
|
+
Ok(Arc::new(listeners))
|
196
|
+
}
|
197
|
+
|
198
|
+
pub(crate) fn build_strategy(self) -> Result<ServeStrategy> {
|
199
|
+
let server = Arc::new(self);
|
200
|
+
let listeners = server.listeners()?;
|
201
|
+
|
202
|
+
let strategy = if server.config.workers == 1 {
|
203
|
+
ServeStrategy::Single(Arc::new(SingleMode::new(
|
204
|
+
server,
|
205
|
+
listeners,
|
206
|
+
SIGNAL_HANDLER_CHANNEL.0.clone(),
|
207
|
+
)?))
|
208
|
+
} else {
|
209
|
+
ServeStrategy::Cluster(Arc::new(ClusterMode::new(
|
210
|
+
server,
|
211
|
+
listeners,
|
212
|
+
SIGNAL_HANDLER_CHANNEL.0.clone(),
|
213
|
+
)))
|
214
|
+
};
|
215
|
+
Ok(strategy)
|
216
|
+
}
|
217
|
+
|
218
|
+
pub fn start(&self) -> Result<()> {
|
219
|
+
reset_signal_handlers();
|
220
|
+
let rself = self.clone();
|
221
|
+
call_without_gvl(move || {
|
222
|
+
let strategy = rself.build_strategy()?;
|
223
|
+
if let Err(e) = strategy.run() {
|
224
|
+
error!("Error running server: {}", e);
|
225
|
+
strategy.stop()?;
|
226
|
+
}
|
227
|
+
Ok(())
|
228
|
+
})
|
229
|
+
}
|
230
|
+
}
|
@@ -0,0 +1,259 @@
|
|
1
|
+
use super::bind::{Bind, BindAddress};
|
2
|
+
use super::bind_protocol::BindProtocol;
|
3
|
+
use super::io_stream::IoStream;
|
4
|
+
use itsi_error::Result;
|
5
|
+
use itsi_tracing::info;
|
6
|
+
use socket2::{Domain, Protocol, Socket, Type};
|
7
|
+
use std::net::{IpAddr, SocketAddr, TcpListener};
|
8
|
+
use std::sync::Arc;
|
9
|
+
use std::{os::unix::net::UnixListener, path::PathBuf};
|
10
|
+
use tokio::net::TcpListener as TokioTcpListener;
|
11
|
+
use tokio::net::UnixListener as TokioUnixListener;
|
12
|
+
use tokio::net::{unix, TcpStream, UnixStream};
|
13
|
+
use tokio_rustls::TlsAcceptor;
|
14
|
+
|
15
|
+
pub(crate) enum Listener {
|
16
|
+
Tcp(TcpListener),
|
17
|
+
TcpTls((TcpListener, TlsAcceptor)),
|
18
|
+
Unix(UnixListener),
|
19
|
+
UnixTls((UnixListener, TlsAcceptor)),
|
20
|
+
}
|
21
|
+
|
22
|
+
pub(crate) enum TokioListener {
|
23
|
+
Tcp {
|
24
|
+
listener: TokioTcpListener,
|
25
|
+
host: String,
|
26
|
+
port: u16,
|
27
|
+
},
|
28
|
+
TcpTls {
|
29
|
+
listener: TokioTcpListener,
|
30
|
+
acceptor: TlsAcceptor,
|
31
|
+
host: String,
|
32
|
+
port: u16,
|
33
|
+
},
|
34
|
+
Unix {
|
35
|
+
listener: TokioUnixListener,
|
36
|
+
},
|
37
|
+
UnixTls {
|
38
|
+
listener: TokioUnixListener,
|
39
|
+
acceptor: TlsAcceptor,
|
40
|
+
},
|
41
|
+
}
|
42
|
+
|
43
|
+
impl TokioListener {
|
44
|
+
pub(crate) async fn accept(&self) -> Result<IoStream> {
|
45
|
+
match self {
|
46
|
+
TokioListener::Tcp { listener, .. } => TokioListener::accept_tcp(listener).await,
|
47
|
+
TokioListener::TcpTls {
|
48
|
+
listener, acceptor, ..
|
49
|
+
} => TokioListener::accept_tls(listener, acceptor).await,
|
50
|
+
TokioListener::Unix { listener, .. } => TokioListener::accept_unix(listener).await,
|
51
|
+
TokioListener::UnixTls {
|
52
|
+
listener, acceptor, ..
|
53
|
+
} => TokioListener::accept_unix_tls(listener, acceptor).await,
|
54
|
+
}
|
55
|
+
}
|
56
|
+
|
57
|
+
async fn accept_tcp(listener: &TokioTcpListener) -> Result<IoStream> {
|
58
|
+
let tcp_stream = listener.accept().await?;
|
59
|
+
Self::to_tokio_io(Stream::TcpStream(tcp_stream), None).await
|
60
|
+
}
|
61
|
+
|
62
|
+
async fn accept_tls(listener: &TokioTcpListener, acceptor: &TlsAcceptor) -> Result<IoStream> {
|
63
|
+
let tcp_stream = listener.accept().await?;
|
64
|
+
Self::to_tokio_io(Stream::TcpStream(tcp_stream), Some(acceptor)).await
|
65
|
+
}
|
66
|
+
|
67
|
+
async fn accept_unix(listener: &TokioUnixListener) -> Result<IoStream> {
|
68
|
+
let unix_stream = listener.accept().await?;
|
69
|
+
Self::to_tokio_io(Stream::UnixStream(unix_stream), None).await
|
70
|
+
}
|
71
|
+
|
72
|
+
async fn accept_unix_tls(
|
73
|
+
listener: &TokioUnixListener,
|
74
|
+
acceptor: &TlsAcceptor,
|
75
|
+
) -> Result<IoStream> {
|
76
|
+
let unix_stream = listener.accept().await?;
|
77
|
+
Self::to_tokio_io(Stream::UnixStream(unix_stream), Some(acceptor)).await
|
78
|
+
}
|
79
|
+
|
80
|
+
async fn to_tokio_io(
|
81
|
+
input_stream: Stream,
|
82
|
+
tls_acceptor: Option<&TlsAcceptor>,
|
83
|
+
) -> Result<IoStream> {
|
84
|
+
match tls_acceptor {
|
85
|
+
Some(acceptor) => match input_stream {
|
86
|
+
Stream::TcpStream((tcp_stream, socket_address)) => {
|
87
|
+
match acceptor.accept(tcp_stream).await {
|
88
|
+
Ok(tls_stream) => Ok(IoStream::TcpTls {
|
89
|
+
stream: tls_stream,
|
90
|
+
addr: SockAddr::Tcp(Arc::new(socket_address)),
|
91
|
+
}),
|
92
|
+
Err(err) => Err(err.into()),
|
93
|
+
}
|
94
|
+
}
|
95
|
+
Stream::UnixStream((unix_stream, socket_address)) => {
|
96
|
+
match acceptor.accept(unix_stream).await {
|
97
|
+
Ok(tls_stream) => Ok(IoStream::UnixTls {
|
98
|
+
stream: tls_stream,
|
99
|
+
addr: SockAddr::Unix(Arc::new(socket_address)),
|
100
|
+
}),
|
101
|
+
Err(err) => Err(err.into()),
|
102
|
+
}
|
103
|
+
}
|
104
|
+
},
|
105
|
+
None => match input_stream {
|
106
|
+
Stream::TcpStream((tcp_stream, socket_address)) => Ok(IoStream::Tcp {
|
107
|
+
stream: tcp_stream,
|
108
|
+
addr: SockAddr::Tcp(Arc::new(socket_address)),
|
109
|
+
}),
|
110
|
+
Stream::UnixStream((unix_stream, socket_address)) => Ok(IoStream::Unix {
|
111
|
+
stream: unix_stream,
|
112
|
+
addr: SockAddr::Unix(Arc::new(socket_address)),
|
113
|
+
}),
|
114
|
+
},
|
115
|
+
}
|
116
|
+
}
|
117
|
+
|
118
|
+
pub(crate) fn scheme(&self) -> String {
|
119
|
+
match self {
|
120
|
+
TokioListener::Tcp { .. } => "http".to_string(),
|
121
|
+
TokioListener::TcpTls { .. } => "https".to_string(),
|
122
|
+
TokioListener::Unix { .. } => "http".to_string(),
|
123
|
+
TokioListener::UnixTls { .. } => "https".to_string(),
|
124
|
+
}
|
125
|
+
}
|
126
|
+
|
127
|
+
pub(crate) fn port(&self) -> u16 {
|
128
|
+
match self {
|
129
|
+
TokioListener::Tcp { port, .. } => *port,
|
130
|
+
TokioListener::TcpTls { port, .. } => *port,
|
131
|
+
TokioListener::Unix { .. } => 0,
|
132
|
+
TokioListener::UnixTls { .. } => 0,
|
133
|
+
}
|
134
|
+
}
|
135
|
+
|
136
|
+
pub(crate) fn host(&self) -> String {
|
137
|
+
match self {
|
138
|
+
TokioListener::Tcp { host, .. } => host.to_string(),
|
139
|
+
TokioListener::TcpTls { host, .. } => host.to_string(),
|
140
|
+
TokioListener::Unix { .. } => "unix".to_string(),
|
141
|
+
TokioListener::UnixTls { .. } => "unix".to_string(),
|
142
|
+
}
|
143
|
+
}
|
144
|
+
}
|
145
|
+
|
146
|
+
enum Stream {
|
147
|
+
TcpStream((TcpStream, SocketAddr)),
|
148
|
+
UnixStream((UnixStream, unix::SocketAddr)),
|
149
|
+
}
|
150
|
+
|
151
|
+
#[derive(Clone, Debug)]
|
152
|
+
pub enum SockAddr {
|
153
|
+
Tcp(Arc<SocketAddr>),
|
154
|
+
Unix(Arc<unix::SocketAddr>),
|
155
|
+
}
|
156
|
+
impl std::fmt::Display for SockAddr {
|
157
|
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
158
|
+
match self {
|
159
|
+
SockAddr::Tcp(socket_addr) => write!(f, "{}", socket_addr.ip().to_canonical()),
|
160
|
+
SockAddr::Unix(socket_addr) => match socket_addr.as_pathname() {
|
161
|
+
Some(path) => write!(f, "{:?}", path),
|
162
|
+
None => write!(f, ""),
|
163
|
+
},
|
164
|
+
}
|
165
|
+
}
|
166
|
+
}
|
167
|
+
|
168
|
+
impl Listener {
|
169
|
+
pub fn to_tokio_listener(&self) -> TokioListener {
|
170
|
+
match self {
|
171
|
+
Listener::Tcp(listener) => TokioListener::Tcp {
|
172
|
+
listener: TokioTcpListener::from_std(TcpListener::try_clone(listener).unwrap())
|
173
|
+
.unwrap(),
|
174
|
+
host: listener
|
175
|
+
.local_addr()
|
176
|
+
.unwrap()
|
177
|
+
.ip()
|
178
|
+
.to_canonical()
|
179
|
+
.to_string(),
|
180
|
+
port: listener.local_addr().unwrap().port(),
|
181
|
+
},
|
182
|
+
Listener::TcpTls((listener, acceptor)) => TokioListener::TcpTls {
|
183
|
+
listener: TokioTcpListener::from_std(TcpListener::try_clone(listener).unwrap())
|
184
|
+
.unwrap(),
|
185
|
+
acceptor: acceptor.clone(),
|
186
|
+
host: listener
|
187
|
+
.local_addr()
|
188
|
+
.unwrap()
|
189
|
+
.ip()
|
190
|
+
.to_canonical()
|
191
|
+
.to_string(),
|
192
|
+
port: listener.local_addr().unwrap().port(),
|
193
|
+
},
|
194
|
+
Listener::Unix(listener) => TokioListener::Unix {
|
195
|
+
listener: TokioUnixListener::from_std(UnixListener::try_clone(listener).unwrap())
|
196
|
+
.unwrap(),
|
197
|
+
},
|
198
|
+
Listener::UnixTls((listener, acceptor)) => TokioListener::UnixTls {
|
199
|
+
listener: TokioUnixListener::from_std(UnixListener::try_clone(listener).unwrap())
|
200
|
+
.unwrap(),
|
201
|
+
acceptor: acceptor.clone(),
|
202
|
+
},
|
203
|
+
}
|
204
|
+
}
|
205
|
+
}
|
206
|
+
|
207
|
+
impl TryFrom<Bind> for Listener {
|
208
|
+
type Error = itsi_error::ItsiError;
|
209
|
+
|
210
|
+
fn try_from(bind: Bind) -> std::result::Result<Self, Self::Error> {
|
211
|
+
let bound = match bind.address {
|
212
|
+
BindAddress::Ip(addr) => match bind.protocol {
|
213
|
+
BindProtocol::Http => Listener::Tcp(connect_tcp_socket(addr, bind.port.unwrap())?),
|
214
|
+
BindProtocol::Https => {
|
215
|
+
let tcp_listener = connect_tcp_socket(addr, bind.port.unwrap())?;
|
216
|
+
let tls_acceptor = TlsAcceptor::from(Arc::new(bind.tls_config.unwrap()));
|
217
|
+
Listener::TcpTls((tcp_listener, tls_acceptor))
|
218
|
+
}
|
219
|
+
_ => unreachable!(),
|
220
|
+
},
|
221
|
+
BindAddress::UnixSocket(path) => match bind.tls_config {
|
222
|
+
Some(tls_config) => {
|
223
|
+
let tls_acceptor = TlsAcceptor::from(Arc::new(tls_config));
|
224
|
+
Listener::UnixTls((connect_unix_socket(&path)?, tls_acceptor))
|
225
|
+
}
|
226
|
+
None => Listener::Unix(connect_unix_socket(&path)?),
|
227
|
+
},
|
228
|
+
};
|
229
|
+
Ok(bound)
|
230
|
+
}
|
231
|
+
}
|
232
|
+
|
233
|
+
fn connect_tcp_socket(addr: IpAddr, port: u16) -> Result<TcpListener> {
|
234
|
+
let domain = match addr {
|
235
|
+
IpAddr::V4(_) => Domain::IPV4,
|
236
|
+
IpAddr::V6(_) => Domain::IPV6,
|
237
|
+
};
|
238
|
+
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
239
|
+
let socket_address: SocketAddr = SocketAddr::new(addr, port);
|
240
|
+
socket.set_nonblocking(true).ok();
|
241
|
+
socket.set_nodelay(true).ok();
|
242
|
+
socket.set_recv_buffer_size(1_048_576).ok();
|
243
|
+
socket.bind(&socket_address.into())?;
|
244
|
+
socket.listen(1024)?;
|
245
|
+
Ok(socket.into())
|
246
|
+
}
|
247
|
+
|
248
|
+
fn connect_unix_socket(path: &PathBuf) -> Result<UnixListener> {
|
249
|
+
let _ = std::fs::remove_file(path);
|
250
|
+
let socket = Socket::new(Domain::UNIX, Type::STREAM, None)?;
|
251
|
+
socket.set_nonblocking(true).ok();
|
252
|
+
let socket_address = socket2::SockAddr::unix(path)?;
|
253
|
+
|
254
|
+
info!("Binding to {:?}", path);
|
255
|
+
socket.bind(&socket_address)?;
|
256
|
+
socket.listen(1024)?;
|
257
|
+
|
258
|
+
Ok(socket.into())
|
259
|
+
}
|
@@ -0,0 +1,196 @@
|
|
1
|
+
use super::serve_strategy::{cluster_mode::ClusterMode, single_mode::SingleMode};
|
2
|
+
use itsi_error::{ItsiError, Result};
|
3
|
+
use itsi_rb_helpers::{call_with_gvl, call_without_gvl, create_ruby_thread, fork};
|
4
|
+
use itsi_tracing::error;
|
5
|
+
use nix::{
|
6
|
+
errno::Errno,
|
7
|
+
sys::{
|
8
|
+
signal::{
|
9
|
+
kill,
|
10
|
+
Signal::{SIGKILL, SIGTERM},
|
11
|
+
},
|
12
|
+
wait::{waitpid, WaitPidFlag, WaitStatus},
|
13
|
+
},
|
14
|
+
unistd::{setpgid, Pid},
|
15
|
+
};
|
16
|
+
use parking_lot::Mutex;
|
17
|
+
use std::{
|
18
|
+
process::{self, exit},
|
19
|
+
sync::Arc,
|
20
|
+
time::{Duration, Instant},
|
21
|
+
};
|
22
|
+
use sysinfo::System;
|
23
|
+
|
24
|
+
use tokio::{sync::watch, time::sleep};
|
25
|
+
use tracing::{info, instrument, warn};
|
26
|
+
|
27
|
+
#[derive(Clone, Debug)]
|
28
|
+
pub struct ProcessWorker {
|
29
|
+
pub worker_id: usize,
|
30
|
+
pub child_pid: Arc<Mutex<Option<Pid>>>,
|
31
|
+
pub started_at: Instant,
|
32
|
+
}
|
33
|
+
|
34
|
+
impl Default for ProcessWorker {
|
35
|
+
fn default() -> Self {
|
36
|
+
Self {
|
37
|
+
worker_id: 0,
|
38
|
+
child_pid: Arc::new(Mutex::new(None)),
|
39
|
+
started_at: Instant::now(),
|
40
|
+
}
|
41
|
+
}
|
42
|
+
}
|
43
|
+
|
44
|
+
impl ProcessWorker {
|
45
|
+
#[instrument(skip(self, cluster_template), fields(self.worker_id = %self.worker_id))]
|
46
|
+
pub(crate) fn boot(&self, cluster_template: Arc<ClusterMode>) -> Result<()> {
|
47
|
+
let child_pid = *self.child_pid.lock();
|
48
|
+
if let Some(pid) = child_pid {
|
49
|
+
if self.is_alive() {
|
50
|
+
if let Err(e) = kill(pid, SIGTERM) {
|
51
|
+
info!("Failed to send SIGTERM to process {}: {}", pid, e);
|
52
|
+
}
|
53
|
+
}
|
54
|
+
*self.child_pid.lock() = None;
|
55
|
+
}
|
56
|
+
|
57
|
+
match call_with_gvl(|_ruby| fork(cluster_template.server.after_fork.lock().clone())) {
|
58
|
+
Some(pid) => {
|
59
|
+
*self.child_pid.lock() = Some(Pid::from_raw(pid));
|
60
|
+
}
|
61
|
+
None => {
|
62
|
+
if let Err(e) = setpgid(
|
63
|
+
Pid::from_raw(process::id() as i32),
|
64
|
+
Pid::from_raw(process::id() as i32),
|
65
|
+
) {
|
66
|
+
error!("Failed to set process group ID: {}", e);
|
67
|
+
}
|
68
|
+
match SingleMode::new(
|
69
|
+
cluster_template.server.clone(),
|
70
|
+
cluster_template.listeners.clone(),
|
71
|
+
cluster_template.lifecycle_channel.clone(),
|
72
|
+
) {
|
73
|
+
Ok(single_mode) => {
|
74
|
+
Arc::new(single_mode).run().ok();
|
75
|
+
}
|
76
|
+
Err(e) => {
|
77
|
+
error!("Failed to boot into worker mode: {}", e);
|
78
|
+
}
|
79
|
+
}
|
80
|
+
exit(0)
|
81
|
+
}
|
82
|
+
}
|
83
|
+
Ok(())
|
84
|
+
}
|
85
|
+
|
86
|
+
pub(crate) fn memory_usage(&self) -> Option<u64> {
|
87
|
+
if let Some(pid) = *self.child_pid.lock() {
|
88
|
+
let s = System::new_all();
|
89
|
+
if let Some(process) = s.process(sysinfo::Pid::from(pid.as_raw() as usize)) {
|
90
|
+
return Some(process.memory());
|
91
|
+
}
|
92
|
+
}
|
93
|
+
None
|
94
|
+
}
|
95
|
+
|
96
|
+
pub(crate) async fn reboot(&self, cluster_template: Arc<ClusterMode>) -> Result<bool> {
|
97
|
+
self.graceful_shutdown(cluster_template.clone()).await;
|
98
|
+
let self_clone = self.clone();
|
99
|
+
let (booted_sender, mut booted_receiver) = watch::channel(false);
|
100
|
+
create_ruby_thread(move || {
|
101
|
+
call_without_gvl(move || {
|
102
|
+
if self_clone.boot(cluster_template).is_ok() {
|
103
|
+
booted_sender.send(true).ok()
|
104
|
+
} else {
|
105
|
+
booted_sender.send(false).ok()
|
106
|
+
};
|
107
|
+
})
|
108
|
+
});
|
109
|
+
|
110
|
+
booted_receiver
|
111
|
+
.changed()
|
112
|
+
.await
|
113
|
+
.map_err(|_| ItsiError::InternalServerError("Failed to boot worker".to_owned()))?;
|
114
|
+
|
115
|
+
let guard = booted_receiver.borrow();
|
116
|
+
let result = guard.to_owned();
|
117
|
+
// Not very robust, we should check to see if the worker is actually listening before considering this successful.
|
118
|
+
sleep(Duration::from_secs(1)).await;
|
119
|
+
Ok(result)
|
120
|
+
}
|
121
|
+
|
122
|
+
pub(crate) async fn graceful_shutdown(&self, cluster_template: Arc<ClusterMode>) {
|
123
|
+
let self_clone = self.clone();
|
124
|
+
self_clone.request_shutdown();
|
125
|
+
let force_kill_time =
|
126
|
+
Instant::now() + Duration::from_secs_f64(cluster_template.server.shutdown_timeout);
|
127
|
+
while self_clone.is_alive() && force_kill_time > Instant::now() {
|
128
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
129
|
+
}
|
130
|
+
if self_clone.is_alive() {
|
131
|
+
self_clone.force_kill();
|
132
|
+
}
|
133
|
+
}
|
134
|
+
|
135
|
+
pub(crate) fn boot_if_dead(&self, cluster_template: Arc<ClusterMode>) -> bool {
|
136
|
+
if !self.is_alive() {
|
137
|
+
if self.just_started() {
|
138
|
+
error!(
|
139
|
+
"Worker in crash loop {:?}. Refusing to restart",
|
140
|
+
self.child_pid.lock()
|
141
|
+
);
|
142
|
+
return false;
|
143
|
+
} else {
|
144
|
+
let self_clone = self.clone();
|
145
|
+
create_ruby_thread(move || {
|
146
|
+
call_without_gvl(move || {
|
147
|
+
self_clone.boot(cluster_template).ok();
|
148
|
+
})
|
149
|
+
});
|
150
|
+
}
|
151
|
+
}
|
152
|
+
true
|
153
|
+
}
|
154
|
+
|
155
|
+
pub(crate) fn request_shutdown(&self) {
|
156
|
+
let child_pid = *self.child_pid.lock();
|
157
|
+
if let Some(pid) = child_pid {
|
158
|
+
if let Err(e) = kill(pid, SIGTERM) {
|
159
|
+
error!("Failed to send SIGTERM to process {}: {}", pid, e);
|
160
|
+
}
|
161
|
+
}
|
162
|
+
}
|
163
|
+
|
164
|
+
pub(crate) fn force_kill(&self) {
|
165
|
+
let child_pid = *self.child_pid.lock();
|
166
|
+
if let Some(pid) = child_pid {
|
167
|
+
if let Err(e) = kill(pid, SIGKILL) {
|
168
|
+
error!("Failed to force kill process {}: {}", pid, e);
|
169
|
+
}
|
170
|
+
}
|
171
|
+
}
|
172
|
+
|
173
|
+
pub(crate) fn just_started(&self) -> bool {
|
174
|
+
let now = Instant::now();
|
175
|
+
now.duration_since(self.started_at).as_millis() < 2000
|
176
|
+
}
|
177
|
+
|
178
|
+
pub(crate) fn is_alive(&self) -> bool {
|
179
|
+
let child_pid = *self.child_pid.lock();
|
180
|
+
if let Some(pid) = child_pid {
|
181
|
+
match waitpid(pid, Some(WaitPidFlag::WNOHANG)) {
|
182
|
+
Ok(WaitStatus::Exited(_, _)) | Ok(WaitStatus::Signaled(_, _, _)) => {
|
183
|
+
return false;
|
184
|
+
}
|
185
|
+
Ok(WaitStatus::StillAlive) | Ok(_) => {}
|
186
|
+
Err(_) => return false,
|
187
|
+
}
|
188
|
+
match kill(pid, None) {
|
189
|
+
Ok(_) => true,
|
190
|
+
Err(errno) => !matches!(errno, Errno::ESRCH),
|
191
|
+
}
|
192
|
+
} else {
|
193
|
+
false
|
194
|
+
}
|
195
|
+
}
|
196
|
+
}
|