itsi-scheduler 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +219 -23
- data/Rakefile +7 -1
- data/ext/itsi_error/Cargo.toml +2 -0
- data/ext/itsi_error/src/from.rs +70 -0
- data/ext/itsi_error/src/lib.rs +10 -37
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.toml +2 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +121 -0
- data/ext/itsi_rb_helpers/src/lib.rs +90 -10
- data/ext/itsi_scheduler/Cargo.toml +9 -1
- data/ext/itsi_scheduler/extconf.rb +1 -1
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
- data/ext/itsi_scheduler/src/lib.rs +31 -10
- data/ext/itsi_server/Cargo.toml +41 -0
- data/ext/itsi_server/extconf.rb +6 -0
- data/ext/itsi_server/src/body_proxy/big_bytes.rs +104 -0
- data/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +122 -0
- data/ext/itsi_server/src/body_proxy/mod.rs +2 -0
- data/ext/itsi_server/src/lib.rs +103 -0
- data/ext/itsi_server/src/request/itsi_request.rs +277 -0
- data/ext/itsi_server/src/request/mod.rs +1 -0
- data/ext/itsi_server/src/response/itsi_response.rs +347 -0
- data/ext/itsi_server/src/response/mod.rs +1 -0
- data/ext/itsi_server/src/server/bind.rs +168 -0
- data/ext/itsi_server/src/server/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/io_stream.rs +104 -0
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +13 -0
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +5 -0
- data/ext/itsi_server/src/server/itsi_server.rs +230 -0
- data/ext/itsi_server/src/server/lifecycle_event.rs +8 -0
- data/ext/itsi_server/src/server/listener.rs +259 -0
- data/ext/itsi_server/src/server/mod.rs +11 -0
- data/ext/itsi_server/src/server/process_worker.rs +196 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +253 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +27 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +238 -0
- data/ext/itsi_server/src/server/signal.rs +57 -0
- data/ext/itsi_server/src/server/thread_worker.rs +368 -0
- data/ext/itsi_server/src/server/tls.rs +152 -0
- data/ext/itsi_tracing/Cargo.toml +4 -0
- data/ext/itsi_tracing/src/lib.rs +36 -6
- data/lib/itsi/scheduler/version.rb +1 -1
- data/lib/itsi/scheduler.rb +137 -1
- metadata +38 -4
@@ -0,0 +1,121 @@
|
|
1
|
+
use magnus::IntoValue;
|
2
|
+
use magnus::rb_sys::AsRawValue;
|
3
|
+
use magnus::value::BoxValue;
|
4
|
+
use magnus::{Ruby, Value, value::ReprValue};
|
5
|
+
use std::fmt::{self, Debug, Formatter};
|
6
|
+
use std::ops::Deref;
|
7
|
+
|
8
|
+
/// HeapVal is a wrapper for heap-allocated magnus ReprVa;ies
|
9
|
+
/// that is marked as thread-safe(Send and Sync)
|
10
|
+
/// It's up to the user to actually ensure this though,
|
11
|
+
/// typically by only interacting with the value from a thread which
|
12
|
+
/// holds the GVL.
|
13
|
+
pub struct HeapValue<T>(pub BoxValue<T>)
|
14
|
+
where
|
15
|
+
T: ReprValue;
|
16
|
+
|
17
|
+
impl<T> PartialEq for HeapValue<T>
|
18
|
+
where
|
19
|
+
T: ReprValue,
|
20
|
+
{
|
21
|
+
fn eq(&self, other: &Self) -> bool {
|
22
|
+
self.0.as_raw() == other.0.as_raw()
|
23
|
+
}
|
24
|
+
}
|
25
|
+
|
26
|
+
impl<T> Deref for HeapValue<T>
|
27
|
+
where
|
28
|
+
T: ReprValue,
|
29
|
+
{
|
30
|
+
type Target = T;
|
31
|
+
|
32
|
+
fn deref(&self) -> &Self::Target {
|
33
|
+
&self.0
|
34
|
+
}
|
35
|
+
}
|
36
|
+
|
37
|
+
impl<T> HeapValue<T>
|
38
|
+
where
|
39
|
+
T: ReprValue,
|
40
|
+
{
|
41
|
+
pub fn inner(self) -> T {
|
42
|
+
*self.0
|
43
|
+
}
|
44
|
+
}
|
45
|
+
|
46
|
+
impl<T> IntoValue for HeapValue<T>
|
47
|
+
where
|
48
|
+
T: ReprValue,
|
49
|
+
{
|
50
|
+
fn into_value_with(self, _: &Ruby) -> Value {
|
51
|
+
self.0.into_value()
|
52
|
+
}
|
53
|
+
}
|
54
|
+
|
55
|
+
impl<T> From<T> for HeapValue<T>
|
56
|
+
where
|
57
|
+
T: ReprValue,
|
58
|
+
{
|
59
|
+
fn from(value: T) -> Self {
|
60
|
+
HeapValue(BoxValue::new(value))
|
61
|
+
}
|
62
|
+
}
|
63
|
+
|
64
|
+
impl<T> Clone for HeapValue<T>
|
65
|
+
where
|
66
|
+
T: ReprValue + Clone,
|
67
|
+
{
|
68
|
+
fn clone(&self) -> Self {
|
69
|
+
HeapValue(BoxValue::new(*self.0.deref()))
|
70
|
+
}
|
71
|
+
}
|
72
|
+
|
73
|
+
impl<T> Debug for HeapValue<T>
|
74
|
+
where
|
75
|
+
T: ReprValue + Debug,
|
76
|
+
{
|
77
|
+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
78
|
+
write!(f, "{:?}", self.0)
|
79
|
+
}
|
80
|
+
}
|
81
|
+
|
82
|
+
unsafe impl<T> Send for HeapValue<T> where T: ReprValue {}
|
83
|
+
unsafe impl<T> Sync for HeapValue<T> where T: ReprValue {}
|
84
|
+
|
85
|
+
/// HeapVal is a wrapper for heap-allocated magnus Values
|
86
|
+
/// that is marked as thread-safe(Send and Sync)
|
87
|
+
/// It's up to the user to actually ensure this though,
|
88
|
+
/// typically by only interacting with the value from a thread which
|
89
|
+
/// holds the GVL.
|
90
|
+
pub struct HeapVal(HeapValue<Value>);
|
91
|
+
impl Deref for HeapVal {
|
92
|
+
type Target = Value;
|
93
|
+
|
94
|
+
fn deref(&self) -> &Self::Target {
|
95
|
+
&self.0
|
96
|
+
}
|
97
|
+
}
|
98
|
+
|
99
|
+
impl IntoValue for HeapVal {
|
100
|
+
fn into_value_with(self, _: &Ruby) -> Value {
|
101
|
+
self.0.into_value()
|
102
|
+
}
|
103
|
+
}
|
104
|
+
|
105
|
+
impl From<Value> for HeapVal {
|
106
|
+
fn from(value: Value) -> Self {
|
107
|
+
HeapVal(HeapValue(BoxValue::new(value)))
|
108
|
+
}
|
109
|
+
}
|
110
|
+
|
111
|
+
impl Clone for HeapVal {
|
112
|
+
fn clone(&self) -> Self {
|
113
|
+
HeapVal(HeapValue(BoxValue::new(*self.0.deref())))
|
114
|
+
}
|
115
|
+
}
|
116
|
+
|
117
|
+
impl Debug for HeapVal {
|
118
|
+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
119
|
+
write!(f, "{:?}", self.0)
|
120
|
+
}
|
121
|
+
}
|
@@ -1,23 +1,43 @@
|
|
1
|
-
use std::{os::raw::c_void, ptr::null_mut};
|
1
|
+
use std::{os::raw::c_void, ptr::null_mut, sync::Arc};
|
2
2
|
|
3
|
+
use magnus::{
|
4
|
+
RArray, Ruby, Thread, Value,
|
5
|
+
rb_sys::FromRawValue,
|
6
|
+
value::{LazyId, ReprValue},
|
7
|
+
};
|
3
8
|
use rb_sys::{
|
4
|
-
rb_thread_call_with_gvl, rb_thread_call_without_gvl, rb_thread_create,
|
9
|
+
rb_thread_call_with_gvl, rb_thread_call_without_gvl, rb_thread_create, rb_thread_schedule,
|
10
|
+
rb_thread_wakeup,
|
5
11
|
};
|
6
12
|
|
7
|
-
|
13
|
+
mod heap_value;
|
14
|
+
pub use heap_value::{HeapVal, HeapValue};
|
15
|
+
static ID_FORK: LazyId = LazyId::new("fork");
|
16
|
+
static ID_LIST: LazyId = LazyId::new("list");
|
17
|
+
static ID_EQ: LazyId = LazyId::new("==");
|
18
|
+
static ID_ALIVE: LazyId = LazyId::new("alive?");
|
19
|
+
static ID_THREAD_VARIABLE_GET: LazyId = LazyId::new("thread_variable_get");
|
20
|
+
|
21
|
+
pub fn schedule_thread() {
|
22
|
+
unsafe {
|
23
|
+
rb_thread_schedule();
|
24
|
+
};
|
25
|
+
}
|
26
|
+
pub fn create_ruby_thread<F>(f: F) -> Thread
|
8
27
|
where
|
9
|
-
F: FnOnce()
|
28
|
+
F: FnOnce() + Send + 'static,
|
10
29
|
{
|
11
30
|
extern "C" fn trampoline<F>(ptr: *mut c_void) -> u64
|
12
31
|
where
|
13
|
-
F: FnOnce()
|
32
|
+
F: FnOnce(),
|
14
33
|
{
|
15
34
|
// Reconstruct the boxed Option<F> that holds our closure.
|
16
35
|
let boxed_closure: Box<Option<F>> = unsafe { Box::from_raw(ptr as *mut Option<F>) };
|
17
36
|
// Extract the closure. (The Option should be Some; panic otherwise.)
|
18
37
|
let closure = (*boxed_closure).expect("Closure already taken");
|
19
38
|
// Call the closure and return its result.
|
20
|
-
closure()
|
39
|
+
closure();
|
40
|
+
0
|
21
41
|
}
|
22
42
|
|
23
43
|
// Box the closure (wrapped in an Option) to create a stable pointer.
|
@@ -26,7 +46,10 @@ where
|
|
26
46
|
|
27
47
|
// Call rb_thread_create with our trampoline and boxed closure.
|
28
48
|
unsafe {
|
29
|
-
|
49
|
+
let thread = rb_thread_create(Some(trampoline::<F>), ptr);
|
50
|
+
rb_thread_wakeup(thread);
|
51
|
+
rb_thread_schedule();
|
52
|
+
Thread::from_value(Value::from_raw(thread)).unwrap()
|
30
53
|
}
|
31
54
|
}
|
32
55
|
|
@@ -67,18 +90,18 @@ where
|
|
67
90
|
|
68
91
|
pub fn call_with_gvl<F, R>(f: F) -> R
|
69
92
|
where
|
70
|
-
F: FnOnce() -> R,
|
93
|
+
F: FnOnce(Ruby) -> R,
|
71
94
|
{
|
72
95
|
extern "C" fn trampoline<F, R>(arg: *mut c_void) -> *mut c_void
|
73
96
|
where
|
74
|
-
F: FnOnce() -> R,
|
97
|
+
F: FnOnce(Ruby) -> R,
|
75
98
|
{
|
76
99
|
// 1) Reconstruct the Box that holds our closure
|
77
100
|
let closure_ptr = arg as *mut Option<F>;
|
78
101
|
let closure = unsafe { (*closure_ptr).take().expect("Closure already taken") };
|
79
102
|
|
80
103
|
// 2) Call the user’s closure
|
81
|
-
let result = closure();
|
104
|
+
let result = closure(Ruby::get().unwrap());
|
82
105
|
|
83
106
|
// 3) Box up the result so we can return a pointer to it
|
84
107
|
let boxed_result = Box::new(result);
|
@@ -96,3 +119,60 @@ where
|
|
96
119
|
let result_box = unsafe { Box::from_raw(raw_result_ptr as *mut R) };
|
97
120
|
*result_box
|
98
121
|
}
|
122
|
+
|
123
|
+
pub fn fork(after_fork: Arc<Option<impl Fn()>>) -> Option<i32> {
|
124
|
+
let ruby = Ruby::get().unwrap();
|
125
|
+
let fork_result = ruby
|
126
|
+
.module_kernel()
|
127
|
+
.funcall::<_, _, Option<i32>>(*ID_FORK, ())
|
128
|
+
.unwrap();
|
129
|
+
if fork_result.is_none() {
|
130
|
+
if let Some(f) = &*after_fork {
|
131
|
+
f()
|
132
|
+
}
|
133
|
+
}
|
134
|
+
fork_result
|
135
|
+
}
|
136
|
+
|
137
|
+
pub fn kill_threads<T>(threads: Vec<T>)
|
138
|
+
where
|
139
|
+
T: ReprValue,
|
140
|
+
{
|
141
|
+
for thr in &threads {
|
142
|
+
let alive: bool = thr
|
143
|
+
.funcall(*ID_ALIVE, ())
|
144
|
+
.expect("Failed to check if thread is alive");
|
145
|
+
if !alive {
|
146
|
+
eprintln!("Thread killed");
|
147
|
+
break;
|
148
|
+
}
|
149
|
+
eprintln!("Killing thread {:?}", thr.as_value());
|
150
|
+
thr.funcall::<_, _, Value>("terminate", ())
|
151
|
+
.expect("Failed to kill thread");
|
152
|
+
}
|
153
|
+
}
|
154
|
+
|
155
|
+
pub fn terminate_non_fork_safe_threads() {
|
156
|
+
let ruby = Ruby::get().unwrap();
|
157
|
+
let thread_class = ruby.class_thread();
|
158
|
+
let current: Thread = ruby.thread_current();
|
159
|
+
let threads: RArray = thread_class
|
160
|
+
.funcall(*ID_LIST, ())
|
161
|
+
.expect("Failed to list Ruby threads");
|
162
|
+
|
163
|
+
let non_fork_safe_threads = threads
|
164
|
+
.into_iter()
|
165
|
+
.filter_map(|v| {
|
166
|
+
let v_thread = Thread::from_value(v).unwrap();
|
167
|
+
let non_fork_safe = !v_thread
|
168
|
+
.funcall::<_, _, bool>(*ID_EQ, (current,))
|
169
|
+
.unwrap_or(false)
|
170
|
+
&& !v_thread
|
171
|
+
.funcall::<_, _, bool>(*ID_THREAD_VARIABLE_GET, (ruby.sym_new("fork_safe"),))
|
172
|
+
.unwrap_or(false);
|
173
|
+
if non_fork_safe { Some(v_thread) } else { None }
|
174
|
+
})
|
175
|
+
.collect::<Vec<_>>();
|
176
|
+
|
177
|
+
kill_threads(non_fork_safe_threads);
|
178
|
+
}
|
@@ -10,7 +10,15 @@ publish = false
|
|
10
10
|
crate-type = ["cdylib"]
|
11
11
|
|
12
12
|
[dependencies]
|
13
|
-
magnus = { version = "0.
|
13
|
+
magnus = { version = "0.7.1", features = ["rb-sys", "bytes"] }
|
14
|
+
derive_more = { version = "2.0.1", features = ["debug"] }
|
14
15
|
itsi_tracing = { path = "../itsi_tracing" }
|
15
16
|
itsi_rb_helpers = { path = "../itsi_rb_helpers" }
|
16
17
|
itsi_error = { path = "../itsi_error" }
|
18
|
+
itsi_instrument_entry = { path = "../itsi_instrument_entry" }
|
19
|
+
parking_lot = "0.12.3"
|
20
|
+
mio = { version = "1.0.3", features = ["os-poll", "os-ext"] }
|
21
|
+
rb-sys = "0.9.105"
|
22
|
+
bytes = "1.10.1"
|
23
|
+
nix = "0.29.0"
|
24
|
+
tracing = "0.1.41"
|
@@ -0,0 +1,56 @@
|
|
1
|
+
use std::os::fd::RawFd;
|
2
|
+
|
3
|
+
use itsi_error::{ItsiError, Result};
|
4
|
+
use mio::Interest;
|
5
|
+
use nix::libc::{fcntl, poll, pollfd, F_GETFL, F_SETFL, O_NONBLOCK};
|
6
|
+
|
7
|
+
use super::Readiness;
|
8
|
+
|
9
|
+
pub fn set_nonblocking(fd: RawFd) -> itsi_error::Result<()> {
|
10
|
+
unsafe {
|
11
|
+
let flags = fcntl(fd, F_GETFL);
|
12
|
+
if flags < 0 {
|
13
|
+
return Err(ItsiError::ArgumentError(format!(
|
14
|
+
"fcntl(F_GETFL) error for fd {}: {}",
|
15
|
+
fd,
|
16
|
+
std::io::Error::last_os_error()
|
17
|
+
)));
|
18
|
+
}
|
19
|
+
let new_flags = flags | O_NONBLOCK;
|
20
|
+
if fcntl(fd, F_SETFL, new_flags) < 0 {
|
21
|
+
return Err(ItsiError::ArgumentError(format!(
|
22
|
+
"fcntl(F_SETFL) error for fd {}: {}",
|
23
|
+
fd,
|
24
|
+
std::io::Error::last_os_error()
|
25
|
+
)));
|
26
|
+
}
|
27
|
+
}
|
28
|
+
Ok(())
|
29
|
+
}
|
30
|
+
|
31
|
+
pub fn poll_readiness(fd: RawFd, events: i16) -> Option<Readiness> {
|
32
|
+
let mut pfd = pollfd {
|
33
|
+
fd,
|
34
|
+
events,
|
35
|
+
revents: 0,
|
36
|
+
};
|
37
|
+
let ret = unsafe { poll(&mut pfd as *mut pollfd, 1, 0) };
|
38
|
+
if ret > 0 {
|
39
|
+
return Some(Readiness(pfd.revents));
|
40
|
+
}
|
41
|
+
None
|
42
|
+
}
|
43
|
+
|
44
|
+
pub fn build_interest(events: i16) -> Result<Interest> {
|
45
|
+
let mut interest_opt = None;
|
46
|
+
if events & 1 != 0 {
|
47
|
+
interest_opt = Some(Interest::READABLE);
|
48
|
+
}
|
49
|
+
if events & 4 != 0 {
|
50
|
+
interest_opt = Some(match interest_opt {
|
51
|
+
Some(i) => i | Interest::WRITABLE,
|
52
|
+
None => Interest::WRITABLE,
|
53
|
+
});
|
54
|
+
}
|
55
|
+
interest_opt.ok_or_else(|| ItsiError::ArgumentError("No valid event specified".to_owned()))
|
56
|
+
}
|
@@ -0,0 +1,44 @@
|
|
1
|
+
use derive_more::Debug;
|
2
|
+
use mio::{event::Source, unix::SourceFd, Interest, Token};
|
3
|
+
use std::os::fd::RawFd;
|
4
|
+
|
5
|
+
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
6
|
+
pub struct IoWaiter {
|
7
|
+
pub fd: RawFd,
|
8
|
+
pub readiness: i16,
|
9
|
+
pub token: Token,
|
10
|
+
}
|
11
|
+
|
12
|
+
impl IoWaiter {
|
13
|
+
pub fn new(fd: RawFd, readiness: i16, token: Token) -> Self {
|
14
|
+
Self {
|
15
|
+
fd,
|
16
|
+
readiness,
|
17
|
+
token,
|
18
|
+
}
|
19
|
+
}
|
20
|
+
}
|
21
|
+
|
22
|
+
impl Source for IoWaiter {
|
23
|
+
fn register(
|
24
|
+
&mut self,
|
25
|
+
registry: &mio::Registry,
|
26
|
+
token: Token,
|
27
|
+
interests: Interest,
|
28
|
+
) -> std::io::Result<()> {
|
29
|
+
SourceFd(&self.fd).register(registry, token, interests)
|
30
|
+
}
|
31
|
+
|
32
|
+
fn reregister(
|
33
|
+
&mut self,
|
34
|
+
registry: &mio::Registry,
|
35
|
+
token: Token,
|
36
|
+
interests: Interest,
|
37
|
+
) -> std::io::Result<()> {
|
38
|
+
SourceFd(&self.fd).reregister(registry, token, interests)
|
39
|
+
}
|
40
|
+
|
41
|
+
fn deregister(&mut self, registry: &mio::Registry) -> std::io::Result<()> {
|
42
|
+
SourceFd(&self.fd).deregister(registry)
|
43
|
+
}
|
44
|
+
}
|
@@ -0,0 +1,44 @@
|
|
1
|
+
use std::{
|
2
|
+
cmp::Ordering,
|
3
|
+
time::{Duration, Instant},
|
4
|
+
};
|
5
|
+
|
6
|
+
use mio::Token;
|
7
|
+
|
8
|
+
#[derive(Debug, Clone, PartialEq, Eq)]
|
9
|
+
pub struct Timer {
|
10
|
+
pub wake_time: Instant,
|
11
|
+
pub token: Token,
|
12
|
+
}
|
13
|
+
impl PartialOrd for Timer {
|
14
|
+
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
15
|
+
Some(self.cmp(other))
|
16
|
+
}
|
17
|
+
}
|
18
|
+
|
19
|
+
impl Ord for Timer {
|
20
|
+
fn cmp(&self, other: &Self) -> Ordering {
|
21
|
+
// Reverse the order: a timer with an earlier wake_time should be considered greater.
|
22
|
+
other
|
23
|
+
.wake_time
|
24
|
+
.cmp(&self.wake_time)
|
25
|
+
.then_with(|| other.token.cmp(&self.token))
|
26
|
+
}
|
27
|
+
}
|
28
|
+
|
29
|
+
impl Timer {
|
30
|
+
pub fn new(wake_in: Duration, token: Token) -> Self {
|
31
|
+
Self {
|
32
|
+
wake_time: Instant::now() + wake_in,
|
33
|
+
token,
|
34
|
+
}
|
35
|
+
}
|
36
|
+
|
37
|
+
pub fn is_due(&self) -> bool {
|
38
|
+
self.wake_time <= Instant::now()
|
39
|
+
}
|
40
|
+
|
41
|
+
pub(crate) fn duration(&self) -> Option<Duration> {
|
42
|
+
self.wake_time.checked_duration_since(Instant::now())
|
43
|
+
}
|
44
|
+
}
|