itsi-scheduler 0.1.9 → 0.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/itsi_rb_helpers/src/lib.rs +27 -4
- data/ext/itsi_server/Cargo.toml +4 -1
- data/ext/itsi_server/src/lib.rs +69 -1
- data/ext/itsi_server/src/request/itsi_request.rs +2 -9
- data/ext/itsi_server/src/response/itsi_response.rs +2 -2
- data/ext/itsi_server/src/server/bind.rs +16 -12
- data/ext/itsi_server/src/server/itsi_server.rs +43 -49
- data/ext/itsi_server/src/server/listener.rs +9 -9
- data/ext/itsi_server/src/server/process_worker.rs +10 -3
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +15 -9
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +124 -111
- data/ext/itsi_server/src/server/signal.rs +1 -4
- data/ext/itsi_server/src/server/thread_worker.rs +52 -20
- data/ext/itsi_server/src/server/tls.rs +1 -1
- data/lib/itsi/scheduler/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: '06169b51c9344ad3892f08cf099c459f9bea7377d7e7b3342a54b9314e7f0c8f'
|
4
|
+
data.tar.gz: d18702aac863d7bf49c100cbcd0be525031d8ed7f334368d507c9ffee53abcbd
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 54a585ef25065bcf8e038a87d80a214a1a17aac9f5510a083d6598928d48c879866d89723c399473faf8ef96f8464a5c2b0b3f3d0d24cdc551e11aae9574b9fe
|
7
|
+
data.tar.gz: 3c102f1994d0c6beee8473261fe49a206addcd04f80afdc09a0eaf8ebf6d31f738cbfbc4afa00dff0d9790a65fa426e0bd7e34c4e7f341231a9b4c036bc0583d
|
@@ -1,7 +1,8 @@
|
|
1
|
-
use std::{os::raw::c_void, ptr::null_mut
|
1
|
+
use std::{os::raw::c_void, ptr::null_mut};
|
2
2
|
|
3
3
|
use magnus::{
|
4
4
|
RArray, Ruby, Thread, Value,
|
5
|
+
block::Proc,
|
5
6
|
rb_sys::FromRawValue,
|
6
7
|
value::{LazyId, ReprValue},
|
7
8
|
};
|
@@ -17,6 +18,7 @@ static ID_LIST: LazyId = LazyId::new("list");
|
|
17
18
|
static ID_EQ: LazyId = LazyId::new("==");
|
18
19
|
static ID_ALIVE: LazyId = LazyId::new("alive?");
|
19
20
|
static ID_THREAD_VARIABLE_GET: LazyId = LazyId::new("thread_variable_get");
|
21
|
+
static ID_BACKTRACE: LazyId = LazyId::new("backtrace");
|
20
22
|
|
21
23
|
pub fn schedule_thread() {
|
22
24
|
unsafe {
|
@@ -120,20 +122,30 @@ where
|
|
120
122
|
*result_box
|
121
123
|
}
|
122
124
|
|
123
|
-
pub fn fork(after_fork:
|
125
|
+
pub fn fork(after_fork: Option<HeapValue<Proc>>) -> Option<i32> {
|
124
126
|
let ruby = Ruby::get().unwrap();
|
125
127
|
let fork_result = ruby
|
126
128
|
.module_kernel()
|
127
129
|
.funcall::<_, _, Option<i32>>(*ID_FORK, ())
|
128
130
|
.unwrap();
|
129
131
|
if fork_result.is_none() {
|
130
|
-
if let Some(
|
131
|
-
|
132
|
+
if let Some(proc) = after_fork {
|
133
|
+
call_proc_and_log_errors(proc)
|
132
134
|
}
|
133
135
|
}
|
134
136
|
fork_result
|
135
137
|
}
|
136
138
|
|
139
|
+
pub fn call_proc_and_log_errors(proc: HeapValue<Proc>) {
|
140
|
+
if let Err(e) = proc.call::<_, Value>(()) {
|
141
|
+
if let Some(value) = e.value() {
|
142
|
+
print_rb_backtrace(value);
|
143
|
+
} else {
|
144
|
+
eprintln!("Error occurred {:?}", e);
|
145
|
+
}
|
146
|
+
}
|
147
|
+
}
|
148
|
+
|
137
149
|
pub fn kill_threads<T>(threads: Vec<T>)
|
138
150
|
where
|
139
151
|
T: ReprValue,
|
@@ -176,3 +188,14 @@ pub fn terminate_non_fork_safe_threads() {
|
|
176
188
|
|
177
189
|
kill_threads(non_fork_safe_threads);
|
178
190
|
}
|
191
|
+
|
192
|
+
pub fn print_rb_backtrace(rb_err: Value) {
|
193
|
+
let backtrace = rb_err
|
194
|
+
.funcall::<_, _, Vec<String>>(*ID_BACKTRACE, ())
|
195
|
+
.unwrap_or_default();
|
196
|
+
|
197
|
+
eprintln!("Ruby exception {:?}", rb_err);
|
198
|
+
for line in backtrace {
|
199
|
+
eprintln!("{}", line);
|
200
|
+
}
|
201
|
+
}
|
data/ext/itsi_server/Cargo.toml
CHANGED
@@ -24,7 +24,7 @@ rcgen = { version = "0.13.2", features = ["x509-parser", "pem"] }
|
|
24
24
|
base64 = "0.22.1"
|
25
25
|
http-body-util = "0.1.2"
|
26
26
|
hyper = { version = "1.5.0", features = ["full", "server", "http1", "http2"] }
|
27
|
-
tokio = { version = "1", features = ["full"] }
|
27
|
+
tokio = { version = "1.44.1", features = ["full"] }
|
28
28
|
hyper-util = { version = "0.1.10", features = ["full"] }
|
29
29
|
derive_more = { version = "2.0.1", features = ["debug"] }
|
30
30
|
http = "1.3.1"
|
@@ -45,3 +45,6 @@ fs2 = "0.4.3"
|
|
45
45
|
ring = "0.17.14"
|
46
46
|
async-trait = "0.1.87"
|
47
47
|
dirs = "6.0.0"
|
48
|
+
regex = "1.11.1"
|
49
|
+
route-recognizer = "0.3.1"
|
50
|
+
fnv = "1.0.7"
|
data/ext/itsi_server/src/lib.rs
CHANGED
@@ -1,5 +1,8 @@
|
|
1
1
|
use body_proxy::itsi_body_proxy::ItsiBodyProxy;
|
2
|
-
use magnus::{
|
2
|
+
use magnus::{
|
3
|
+
error::Result, function, method, value::Lazy, Module, Object, RClass, RHash, RModule, Ruby,
|
4
|
+
};
|
5
|
+
use regex::{Regex, RegexSet};
|
3
6
|
use request::itsi_request::ItsiRequest;
|
4
7
|
use response::itsi_response::ItsiResponse;
|
5
8
|
use server::{itsi_server::Server, signal::reset_signal_handlers};
|
@@ -54,6 +57,70 @@ pub fn log_error(msg: String) {
|
|
54
57
|
error!(msg);
|
55
58
|
}
|
56
59
|
|
60
|
+
const ROUTES: [&str; 39] = [
|
61
|
+
r"(?-u)^/organisations/(?<organisation_id>\d+)/users/(?<user_id>\d+)$",
|
62
|
+
r"(?-u)^/projects/(?<project_id>\d+)/tasks/(?<task_id>\d+)$",
|
63
|
+
r"(?-u)^/products/(?<product_id>\d+)(?:/reviews/(?<review_id>\d+))?$",
|
64
|
+
r"(?-u)^/orders/(?<order_id>\d+)/items(?:/(?<item_id>\d+))?$",
|
65
|
+
r"(?-u)^/posts/(?<post_id>\d+)/comments(?:/(?<comment_id>\d+))?$",
|
66
|
+
r"(?-u)^/teams/(?<team_id>\d+)(?:/members/(?<member_id>\d+))?$",
|
67
|
+
r"(?-u)^/categories/(?<category_id>\d+)/subcategories(?:/(?<subcategory_id>\d+))?$",
|
68
|
+
r"(?-u)^/departments/(?<department_id>\d+)/employees/(?<employee_id>\d+)$",
|
69
|
+
r"(?-u)^/events/(?<event_id>\d+)(?:/sessions/(?<session_id>\d+))?$",
|
70
|
+
r"(?-u)^/invoices/(?<invoice_id>\d+)/payments(?:/(?<payment_id>\d+))?$",
|
71
|
+
r"(?-u)^/tickets/(?<ticket_id>\d+)(?:/responses/(?<response_id>\d+))?$",
|
72
|
+
r"(?-u)^/forums/(?<forum_id>\d+)(?:/threads/(?<thread_id>\d+))?$",
|
73
|
+
r"(?-u)^/subscriptions/(?<subscription_id>\d+)/plans(?:/(?<plan_id>\d+))?$",
|
74
|
+
r"(?-u)^/profiles/(?<profile_id>\d+)/settings$",
|
75
|
+
r"(?-u)^/organizations/(?<organization_id>\d+)/billing(?:/(?<billing_id>\d+))?$",
|
76
|
+
r"(?-u)^/vendors/(?<vendor_id>\d+)/products(?:/(?<product_id>\d+))?$",
|
77
|
+
r"(?-u)^/courses/(?<course_id>\d+)/modules(?:/(?<module_id>\d+))?$",
|
78
|
+
r"(?-u)^/accounts/(?<account_id>\d+)(?:/transactions/(?<transaction_id>\d+))?$",
|
79
|
+
r"(?-u)^/warehouses/(?<warehouse_id>\d+)/inventory(?:/(?<inventory_id>\d+))?$",
|
80
|
+
r"(?-u)^/campaigns/(?<campaign_id>\d+)/ads(?:/(?<ad_id>\d+))?$",
|
81
|
+
r"(?-u)^/applications/(?<application_id>\d+)/stages(?:/(?<stage_id>\d+))?$",
|
82
|
+
r"(?-u)^/notifications/(?<notification_id>\d+)$",
|
83
|
+
r"(?-u)^/albums/(?<album_id>\d+)/photos(?:/(?<photo_id>\d+))?$",
|
84
|
+
r"(?-u)^/news/(?<news_id>\d+)/articles(?:/(?<article_id>\d+))?$",
|
85
|
+
r"(?-u)^/libraries/(?<library_id>\d+)/books(?:/(?<book_id>\d+))?$",
|
86
|
+
r"(?-u)^/universities/(?<university_id>\d+)/students(?:/(?<student_id>\d+))?$",
|
87
|
+
r"(?-u)^/banks/(?<bank_id>\d+)/branches(?:/(?<branch_id>\d+))?$",
|
88
|
+
r"(?-u)^/vehicles/(?<vehicle_id>\d+)/services(?:/(?<service_id>\d+))?$",
|
89
|
+
r"(?-u)^/hotels/(?<hotel_id>\d+)/rooms(?:/(?<room_id>\d+))?$",
|
90
|
+
r"(?-u)^/doctors/(?<doctor_id>\d+)/appointments(?:/(?<appointment_id>\d+))?$",
|
91
|
+
r"(?-u)^/gyms/(?<gym_id>\d+)/memberships(?:/(?<membership_id>\d+))?$",
|
92
|
+
r"(?-u)^/restaurants/(?<restaurant_id>\d+)/menus(?:/(?<menu_id>\d+))?$",
|
93
|
+
r"(?-u)^/parks/(?<park_id>\d+)/events(?:/(?<event_id>\d+))?$",
|
94
|
+
r"(?-u)^/theaters/(?<theater_id>\d+)/shows(?:/(?<show_id>\d+))?$",
|
95
|
+
r"(?-u)^/museums/(?<museum_id>\d+)/exhibits(?:/(?<exhibit_id>\d+))?$",
|
96
|
+
r"(?-u)^/stadiums/(?<stadium_id>\d+)/games(?:/(?<game_id>\d+))?$",
|
97
|
+
r"(?-u)^/schools/(?<school_id>\d+)/classes(?:/(?<class_id>\d+))?$",
|
98
|
+
r"(?-u)^/clubs/(?<club_id>\d+)/events(?:/(?<event_id>\d+))?$",
|
99
|
+
r"(?-u)^/festivals/(?<festival_id>\d+)/tickets(?:/(?<ticket_id>\d+))?$",
|
100
|
+
];
|
101
|
+
use std::sync::LazyLock;
|
102
|
+
|
103
|
+
static REGEX_SET: LazyLock<RegexSet> = LazyLock::new(|| RegexSet::new(ROUTES).unwrap());
|
104
|
+
static REGEXES: LazyLock<Vec<Regex>> =
|
105
|
+
LazyLock::new(|| ROUTES.iter().map(|&r| Regex::new(r).unwrap()).collect());
|
106
|
+
|
107
|
+
fn match_route(input: String) -> Result<Option<(usize, Option<RHash>)>> {
|
108
|
+
if let Some(index) = REGEX_SET.matches(&input).iter().next() {
|
109
|
+
let regex = ®EXES[index];
|
110
|
+
if let Some(captures) = regex.captures(&input) {
|
111
|
+
let params = RHash::with_capacity(captures.len());
|
112
|
+
for name in regex.capture_names().flatten() {
|
113
|
+
if let Some(value) = captures.name(name) {
|
114
|
+
params.aset(name, value.as_str()).ok();
|
115
|
+
}
|
116
|
+
}
|
117
|
+
return Ok(Some((index, Some(params))));
|
118
|
+
}
|
119
|
+
return Ok(Some((index, None)));
|
120
|
+
}
|
121
|
+
Ok(None)
|
122
|
+
}
|
123
|
+
|
57
124
|
#[magnus::init]
|
58
125
|
fn init(ruby: &Ruby) -> Result<()> {
|
59
126
|
itsi_tracing::init();
|
@@ -62,6 +129,7 @@ fn init(ruby: &Ruby) -> Result<()> {
|
|
62
129
|
.ok();
|
63
130
|
|
64
131
|
let itsi = ruby.get_inner(&ITSI_MODULE);
|
132
|
+
itsi.define_singleton_method("match_route", function!(match_route, 1))?;
|
65
133
|
itsi.define_singleton_method("log_debug", function!(log_debug, 1))?;
|
66
134
|
itsi.define_singleton_method("log_info", function!(log_info, 1))?;
|
67
135
|
itsi.define_singleton_method("log_warn", function!(log_warn, 1))?;
|
@@ -17,6 +17,7 @@ use http::{request::Parts, HeaderValue, Response, StatusCode};
|
|
17
17
|
use http_body_util::{combinators::BoxBody, BodyExt, Empty};
|
18
18
|
use hyper::{body::Incoming, Request};
|
19
19
|
use itsi_error::from::CLIENT_CONNECTION_CLOSED;
|
20
|
+
use itsi_rb_helpers::print_rb_backtrace;
|
20
21
|
use itsi_tracing::{debug, error};
|
21
22
|
use magnus::{
|
22
23
|
error::{ErrorType, Result as MagnusResult},
|
@@ -33,7 +34,6 @@ use tokio::sync::{
|
|
33
34
|
};
|
34
35
|
static ID_CALL: LazyId = LazyId::new("call");
|
35
36
|
static ID_MESSAGE: LazyId = LazyId::new("message");
|
36
|
-
static ID_BACKTRACE: LazyId = LazyId::new("backtrace");
|
37
37
|
|
38
38
|
#[derive(Debug)]
|
39
39
|
#[magnus::wrap(class = "Itsi::Request", free_immediately, size)]
|
@@ -115,14 +115,7 @@ impl ItsiRequest {
|
|
115
115
|
debug!("Connection closed by client");
|
116
116
|
response.close();
|
117
117
|
} else if let Some(rb_err) = err.value() {
|
118
|
-
|
119
|
-
.funcall::<_, _, Vec<String>>(*ID_BACKTRACE, ())
|
120
|
-
.unwrap_or_default();
|
121
|
-
|
122
|
-
error!("Error occurred in Handler: {:?}", rb_err);
|
123
|
-
for line in backtrace {
|
124
|
-
error!("{}", line);
|
125
|
-
}
|
118
|
+
print_rb_backtrace(rb_err);
|
126
119
|
response.internal_server_error(err.to_string());
|
127
120
|
} else {
|
128
121
|
response.internal_server_error(err.to_string());
|
@@ -77,7 +77,6 @@ impl ItsiResponse {
|
|
77
77
|
(ReceiverStream::new(receiver), shutdown_rx),
|
78
78
|
|(mut receiver, mut shutdown_rx)| async move {
|
79
79
|
if let RunningPhase::ShutdownPending = *shutdown_rx.borrow() {
|
80
|
-
warn!("Disconnecting streaming client.");
|
81
80
|
return None;
|
82
81
|
}
|
83
82
|
loop {
|
@@ -280,7 +279,8 @@ impl ItsiResponse {
|
|
280
279
|
if let Some(writer) = writer.write().as_ref() {
|
281
280
|
writer
|
282
281
|
.blocking_send(Some(frame))
|
283
|
-
.map_err(|_| itsi_error::ItsiError::ClientConnectionClosed)
|
282
|
+
.map_err(|_| itsi_error::ItsiError::ClientConnectionClosed)
|
283
|
+
.ok();
|
284
284
|
}
|
285
285
|
Ok(0)
|
286
286
|
}
|
@@ -101,7 +101,7 @@ impl FromStr for Bind {
|
|
101
101
|
"IPv6 addresses must use [ ] when specifying a port".to_owned(),
|
102
102
|
));
|
103
103
|
} else {
|
104
|
-
(h,
|
104
|
+
(h, p.parse::<u16>().ok()) // Treat as a hostname
|
105
105
|
}
|
106
106
|
} else {
|
107
107
|
(url, None)
|
@@ -110,18 +110,22 @@ impl FromStr for Bind {
|
|
110
110
|
let address = if let Ok(ip) = host.parse::<IpAddr>() {
|
111
111
|
BindAddress::Ip(ip)
|
112
112
|
} else {
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
113
|
+
match protocol {
|
114
|
+
BindProtocol::Https | BindProtocol::Http => resolve_hostname(host)
|
115
|
+
.map(BindAddress::Ip)
|
116
|
+
.ok_or(ItsiError::ArgumentError(format!(
|
117
|
+
"Failed to resolve hostname {}",
|
118
|
+
host
|
119
|
+
)))?,
|
120
|
+
BindProtocol::Unix | BindProtocol::Unixs => BindAddress::UnixSocket(host.into()),
|
121
|
+
}
|
119
122
|
};
|
120
|
-
|
121
|
-
|
122
|
-
BindProtocol::
|
123
|
-
BindProtocol::
|
124
|
-
BindProtocol::
|
123
|
+
|
124
|
+
let port = match protocol {
|
125
|
+
BindProtocol::Http => port.or(Some(80)),
|
126
|
+
BindProtocol::Https => port.or(Some(443)),
|
127
|
+
BindProtocol::Unix => None,
|
128
|
+
BindProtocol::Unixs => None,
|
125
129
|
};
|
126
130
|
|
127
131
|
let tls_config = match protocol {
|
@@ -8,17 +8,17 @@ use super::{
|
|
8
8
|
};
|
9
9
|
use crate::{request::itsi_request::ItsiRequest, server::serve_strategy::ServeStrategy};
|
10
10
|
use derive_more::Debug;
|
11
|
-
use itsi_rb_helpers::{call_without_gvl, HeapVal};
|
11
|
+
use itsi_rb_helpers::{call_without_gvl, HeapVal, HeapValue};
|
12
12
|
use itsi_tracing::{error, run_silently};
|
13
13
|
use magnus::{
|
14
14
|
block::Proc,
|
15
15
|
error::Result,
|
16
16
|
scan_args::{get_kwargs, scan_args, Args, KwArgs, ScanArgsKw, ScanArgsOpt, ScanArgsRequired},
|
17
|
-
value::
|
18
|
-
ArgList, RHash, Ruby, Symbol, Value,
|
17
|
+
value::ReprValue,
|
18
|
+
ArgList, RArray, RHash, Ruby, Symbol, Value,
|
19
19
|
};
|
20
20
|
use parking_lot::{Mutex, RwLock};
|
21
|
-
use std::{cmp::max, ops::Deref, sync::Arc};
|
21
|
+
use std::{cmp::max, collections::HashMap, ops::Deref, sync::Arc};
|
22
22
|
use tracing::{info, instrument};
|
23
23
|
|
24
24
|
static DEFAULT_BIND: &str = "http://localhost:3000";
|
@@ -36,7 +36,6 @@ impl Deref for Server {
|
|
36
36
|
&self.config
|
37
37
|
}
|
38
38
|
}
|
39
|
-
type AfterFork = Mutex<Arc<Option<Box<dyn Fn() + Send + Sync>>>>;
|
40
39
|
|
41
40
|
#[derive(Debug)]
|
42
41
|
pub struct ServerConfig {
|
@@ -51,15 +50,14 @@ pub struct ServerConfig {
|
|
51
50
|
pub script_name: String,
|
52
51
|
pub(crate) binds: Mutex<Vec<Bind>>,
|
53
52
|
#[debug(skip)]
|
54
|
-
pub
|
55
|
-
#[debug(skip)]
|
56
|
-
pub after_fork: AfterFork,
|
53
|
+
pub hooks: HashMap<String, HeapValue<Proc>>,
|
57
54
|
pub scheduler_class: Option<String>,
|
58
55
|
pub stream_body: Option<bool>,
|
59
56
|
pub worker_memory_limit: Option<u64>,
|
60
57
|
#[debug(skip)]
|
61
58
|
pub(crate) strategy: RwLock<Option<ServeStrategy>>,
|
62
59
|
pub silence: bool,
|
60
|
+
pub oob_gc_responses_threshold: Option<u64>,
|
63
61
|
}
|
64
62
|
|
65
63
|
#[derive(Debug)]
|
@@ -68,8 +66,6 @@ pub enum RequestJob {
|
|
68
66
|
Shutdown,
|
69
67
|
}
|
70
68
|
|
71
|
-
// Define your helper function.
|
72
|
-
// Here P, A, C correspond to the types for the first tuple, second tuple, and extra parameters respectively.
|
73
69
|
fn extract_args<Req, Opt, Splat>(
|
74
70
|
scan_args: &Args<(), (), (), (), RHash, ()>,
|
75
71
|
primaries: &[&str],
|
@@ -80,20 +76,17 @@ where
|
|
80
76
|
Opt: ScanArgsOpt,
|
81
77
|
Splat: ScanArgsKw,
|
82
78
|
{
|
83
|
-
// Combine the primary and rest names into one Vec of Symbols.
|
84
79
|
let symbols: Vec<Symbol> = primaries
|
85
80
|
.iter()
|
86
81
|
.chain(rest.iter())
|
87
82
|
.map(|&name| Symbol::new(name))
|
88
83
|
.collect();
|
89
84
|
|
90
|
-
// Call the "slice" function with the combined symbols.
|
91
85
|
let hash = scan_args
|
92
86
|
.keywords
|
93
87
|
.funcall::<_, _, RHash>("slice", symbols.into_arg_list_with(&Ruby::get().unwrap()))
|
94
88
|
.unwrap();
|
95
89
|
|
96
|
-
// Finally, call get_kwargs with the original name slices.
|
97
90
|
get_kwargs(hash, primaries, rest)
|
98
91
|
}
|
99
92
|
|
@@ -129,14 +122,14 @@ impl Server {
|
|
129
122
|
type Args2 = KwArgs<
|
130
123
|
(),
|
131
124
|
(
|
132
|
-
//
|
133
|
-
Option<
|
134
|
-
// After Fork
|
135
|
-
Option<Proc>,
|
125
|
+
// Hooks
|
126
|
+
Option<RHash>,
|
136
127
|
// Scheduler Class
|
137
128
|
Option<String>,
|
138
129
|
// Worker Memory Limit
|
139
130
|
Option<u64>,
|
131
|
+
// Out-of-band GC Responses Threshold
|
132
|
+
Option<u64>,
|
140
133
|
// Silence
|
141
134
|
Option<bool>,
|
142
135
|
),
|
@@ -160,14 +153,33 @@ impl Server {
|
|
160
153
|
&scan_args,
|
161
154
|
&[],
|
162
155
|
&[
|
163
|
-
"
|
164
|
-
"after_fork",
|
156
|
+
"hooks",
|
165
157
|
"scheduler_class",
|
166
158
|
"worker_memory_limit",
|
159
|
+
"oob_gc_responses_threshold",
|
167
160
|
"silence",
|
168
161
|
],
|
169
162
|
)?;
|
170
163
|
|
164
|
+
let hooks = args2
|
165
|
+
.optional
|
166
|
+
.0
|
167
|
+
.map(|rhash| -> Result<HashMap<String, HeapValue<Proc>>> {
|
168
|
+
let mut hook_map: HashMap<String, HeapValue<Proc>> = HashMap::new();
|
169
|
+
for pair in rhash.enumeratorize::<_, ()>("each", ()) {
|
170
|
+
if let Some(pair_value) = RArray::from_value(pair?) {
|
171
|
+
if let (Ok(key), Ok(value)) =
|
172
|
+
(pair_value.entry::<Value>(0), pair_value.entry::<Proc>(1))
|
173
|
+
{
|
174
|
+
hook_map.insert(key.to_string(), HeapValue::from(value));
|
175
|
+
}
|
176
|
+
}
|
177
|
+
}
|
178
|
+
Ok(hook_map)
|
179
|
+
})
|
180
|
+
.transpose()?
|
181
|
+
.unwrap_or_default();
|
182
|
+
|
171
183
|
let config = ServerConfig {
|
172
184
|
app: HeapVal::from(args1.required.0),
|
173
185
|
workers: max(args1.optional.0.unwrap_or(1), 1),
|
@@ -184,32 +196,16 @@ impl Server {
|
|
184
196
|
.collect::<itsi_error::Result<Vec<Bind>>>()?,
|
185
197
|
),
|
186
198
|
stream_body: args1.optional.5,
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
opaque_proc
|
191
|
-
.get_inner_with(&Ruby::get().unwrap())
|
192
|
-
.call::<_, Value>(())
|
193
|
-
.unwrap();
|
194
|
-
}) as Box<dyn FnOnce() + Send + Sync>
|
195
|
-
})),
|
196
|
-
after_fork: Mutex::new(Arc::new(args2.optional.1.map(|p| {
|
197
|
-
let opaque_proc = Opaque::from(p);
|
198
|
-
Box::new(move || {
|
199
|
-
opaque_proc
|
200
|
-
.get_inner_with(&Ruby::get().unwrap())
|
201
|
-
.call::<_, Value>(())
|
202
|
-
.unwrap();
|
203
|
-
}) as Box<dyn Fn() + Send + Sync>
|
204
|
-
}))),
|
205
|
-
scheduler_class: args2.optional.2.clone(),
|
206
|
-
worker_memory_limit: args2.optional.3,
|
207
|
-
silence: args2.optional.4.is_some_and(|s| s),
|
199
|
+
hooks,
|
200
|
+
scheduler_class: args2.optional.1.clone(),
|
201
|
+
worker_memory_limit: args2.optional.2,
|
208
202
|
strategy: RwLock::new(None),
|
203
|
+
oob_gc_responses_threshold: args2.optional.3,
|
204
|
+
silence: args2.optional.4.is_some_and(|s| s),
|
209
205
|
};
|
210
206
|
|
211
207
|
if !config.silence {
|
212
|
-
if let Some(scheduler_class) = args2.optional.
|
208
|
+
if let Some(scheduler_class) = args2.optional.1 {
|
213
209
|
info!(scheduler_class, fiber_scheduler = true);
|
214
210
|
} else {
|
215
211
|
info!(fiber_scheduler = false);
|
@@ -222,7 +218,7 @@ impl Server {
|
|
222
218
|
}
|
223
219
|
|
224
220
|
#[instrument(name = "Bind", skip_all, fields(binds=format!("{:?}", self.config.binds.lock())))]
|
225
|
-
pub(crate) fn build_listeners(&self) -> Result<
|
221
|
+
pub(crate) fn build_listeners(&self) -> Result<Vec<Listener>> {
|
226
222
|
let listeners = self
|
227
223
|
.config
|
228
224
|
.binds
|
@@ -232,13 +228,13 @@ impl Server {
|
|
232
228
|
.map(Listener::try_from)
|
233
229
|
.collect::<std::result::Result<Vec<Listener>, _>>()?
|
234
230
|
.into_iter()
|
235
|
-
.map(Arc::new)
|
236
231
|
.collect::<Vec<_>>();
|
237
232
|
info!("Bound {:?} listeners", listeners.len());
|
238
|
-
Ok(
|
233
|
+
Ok(listeners)
|
239
234
|
}
|
240
235
|
|
241
|
-
pub(crate) fn build_strategy(self
|
236
|
+
pub(crate) fn build_strategy(self) -> Result<()> {
|
237
|
+
let listeners = self.build_listeners()?;
|
242
238
|
let server = Arc::new(self);
|
243
239
|
let server_clone = server.clone();
|
244
240
|
|
@@ -276,11 +272,9 @@ impl Server {
|
|
276
272
|
fn build_and_run_strategy(&self) -> Result<()> {
|
277
273
|
reset_signal_handlers();
|
278
274
|
let rself = self.clone();
|
279
|
-
let listeners = self.build_listeners()?;
|
280
|
-
let listeners_clone = listeners.clone();
|
281
275
|
call_without_gvl(move || -> Result<()> {
|
282
|
-
rself.clone().build_strategy(
|
283
|
-
if let Err(e) = rself.
|
276
|
+
rself.clone().build_strategy()?;
|
277
|
+
if let Err(e) = rself.strategy.read().as_ref().unwrap().run() {
|
284
278
|
error!("Error running server: {}", e);
|
285
279
|
rself.strategy.read().as_ref().unwrap().stop()?;
|
286
280
|
}
|
@@ -243,20 +243,20 @@ impl std::fmt::Display for SockAddr {
|
|
243
243
|
}
|
244
244
|
|
245
245
|
impl Listener {
|
246
|
-
pub fn
|
246
|
+
pub fn into_tokio_listener(self) -> TokioListener {
|
247
247
|
match self {
|
248
|
-
Listener::Tcp(listener) =>
|
249
|
-
TokioTcpListener::from_std(
|
250
|
-
|
248
|
+
Listener::Tcp(listener) => {
|
249
|
+
TokioListener::Tcp(TokioTcpListener::from_std(listener).unwrap())
|
250
|
+
}
|
251
251
|
Listener::TcpTls((listener, acceptor)) => TokioListener::TcpTls(
|
252
|
-
TokioTcpListener::from_std(
|
252
|
+
TokioTcpListener::from_std(listener).unwrap(),
|
253
253
|
acceptor.clone(),
|
254
254
|
),
|
255
|
-
Listener::Unix(listener) =>
|
256
|
-
TokioUnixListener::from_std(
|
257
|
-
|
255
|
+
Listener::Unix(listener) => {
|
256
|
+
TokioListener::Unix(TokioUnixListener::from_std(listener).unwrap())
|
257
|
+
}
|
258
258
|
Listener::UnixTls((listener, acceptor)) => TokioListener::UnixTls(
|
259
|
-
TokioUnixListener::from_std(
|
259
|
+
TokioUnixListener::from_std(listener).unwrap(),
|
260
260
|
acceptor.clone(),
|
261
261
|
),
|
262
262
|
}
|
@@ -53,8 +53,8 @@ impl ProcessWorker {
|
|
53
53
|
}
|
54
54
|
*self.child_pid.lock() = None;
|
55
55
|
}
|
56
|
-
|
57
|
-
|
56
|
+
match call_with_gvl(|_ruby| fork(cluster_template.server.hooks.get("after_fork").cloned()))
|
57
|
+
{
|
58
58
|
Some(pid) => {
|
59
59
|
*self.child_pid.lock() = Some(Pid::from_raw(pid));
|
60
60
|
}
|
@@ -67,7 +67,7 @@ impl ProcessWorker {
|
|
67
67
|
}
|
68
68
|
match SingleMode::new(
|
69
69
|
cluster_template.server.clone(),
|
70
|
-
cluster_template.listeners.
|
70
|
+
cluster_template.listeners.lock().drain(..).collect(),
|
71
71
|
cluster_template.lifecycle_channel.clone(),
|
72
72
|
) {
|
73
73
|
Ok(single_mode) => {
|
@@ -83,6 +83,13 @@ impl ProcessWorker {
|
|
83
83
|
Ok(())
|
84
84
|
}
|
85
85
|
|
86
|
+
pub fn pid(&self) -> i32 {
|
87
|
+
if let Some(pid) = *self.child_pid.lock() {
|
88
|
+
return pid.as_raw();
|
89
|
+
}
|
90
|
+
0
|
91
|
+
}
|
92
|
+
|
86
93
|
pub(crate) fn memory_usage(&self) -> Option<u64> {
|
87
94
|
if let Some(pid) = *self.child_pid.lock() {
|
88
95
|
let s = System::new_all();
|
@@ -3,8 +3,11 @@ use crate::server::{
|
|
3
3
|
process_worker::ProcessWorker,
|
4
4
|
};
|
5
5
|
use itsi_error::{ItsiError, Result};
|
6
|
-
use itsi_rb_helpers::{
|
6
|
+
use itsi_rb_helpers::{
|
7
|
+
call_proc_and_log_errors, call_with_gvl, call_without_gvl, create_ruby_thread,
|
8
|
+
};
|
7
9
|
use itsi_tracing::{error, info, warn};
|
10
|
+
use magnus::Value;
|
8
11
|
use nix::{
|
9
12
|
libc::{self, exit},
|
10
13
|
unistd::Pid,
|
@@ -19,9 +22,9 @@ use tokio::{
|
|
19
22
|
sync::{broadcast, watch, Mutex},
|
20
23
|
time::{self, sleep},
|
21
24
|
};
|
22
|
-
use tracing::instrument;
|
25
|
+
use tracing::{debug, instrument};
|
23
26
|
pub(crate) struct ClusterMode {
|
24
|
-
pub listeners:
|
27
|
+
pub listeners: parking_lot::Mutex<Vec<Listener>>,
|
25
28
|
pub server: Arc<Server>,
|
26
29
|
pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
|
27
30
|
pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
@@ -34,12 +37,9 @@ static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
|
|
34
37
|
impl ClusterMode {
|
35
38
|
pub fn new(
|
36
39
|
server: Arc<Server>,
|
37
|
-
listeners:
|
40
|
+
listeners: Vec<Listener>,
|
38
41
|
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
39
42
|
) -> Self {
|
40
|
-
if let Some(f) = server.before_fork.lock().take() {
|
41
|
-
f();
|
42
|
-
}
|
43
43
|
let process_workers = (0..server.workers)
|
44
44
|
.map(|_| ProcessWorker {
|
45
45
|
worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
@@ -48,7 +48,7 @@ impl ClusterMode {
|
|
48
48
|
.collect();
|
49
49
|
|
50
50
|
Self {
|
51
|
-
listeners,
|
51
|
+
listeners: parking_lot::Mutex::new(listeners),
|
52
52
|
server,
|
53
53
|
process_workers: parking_lot::Mutex::new(process_workers),
|
54
54
|
lifecycle_channel,
|
@@ -152,7 +152,7 @@ impl ClusterMode {
|
|
152
152
|
|
153
153
|
tokio::select! {
|
154
154
|
_ = monitor_handle => {
|
155
|
-
|
155
|
+
debug!("All children exited early, exit normally")
|
156
156
|
}
|
157
157
|
_ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
|
158
158
|
warn!("Graceful shutdown timeout reached, force killing remaining children");
|
@@ -191,6 +191,9 @@ impl ClusterMode {
|
|
191
191
|
#[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
|
192
192
|
pub fn run(self: Arc<Self>) -> Result<()> {
|
193
193
|
info!("Starting in Cluster mode");
|
194
|
+
if let Some(proc) = self.server.hooks.get("before_fork") {
|
195
|
+
call_with_gvl(|_| call_proc_and_log_errors(proc.clone()))
|
196
|
+
}
|
194
197
|
self.process_workers
|
195
198
|
.lock()
|
196
199
|
.iter()
|
@@ -228,6 +231,9 @@ impl ClusterMode {
|
|
228
231
|
if let Some(current_mem_usage) = largest_worker.memory_usage(){
|
229
232
|
if current_mem_usage > memory_limit {
|
230
233
|
largest_worker.reboot(self_ref.clone()).await.ok();
|
234
|
+
if let Some(hook) = self_ref.server.hooks.get("after_memory_threshold_reached") {
|
235
|
+
call_with_gvl(|_| hook.call::<_, Value>((largest_worker.pid(),)).ok() );
|
236
|
+
}
|
231
237
|
}
|
232
238
|
}
|
233
239
|
}
|
@@ -15,10 +15,13 @@ use hyper_util::{
|
|
15
15
|
server::conn::auto::Builder,
|
16
16
|
};
|
17
17
|
use itsi_error::{ItsiError, Result};
|
18
|
+
use itsi_rb_helpers::print_rb_backtrace;
|
18
19
|
use itsi_tracing::{debug, error, info};
|
19
20
|
use nix::unistd::Pid;
|
21
|
+
use parking_lot::Mutex;
|
20
22
|
use std::{
|
21
23
|
num::NonZeroU8,
|
24
|
+
panic,
|
22
25
|
pin::Pin,
|
23
26
|
sync::Arc,
|
24
27
|
time::{Duration, Instant},
|
@@ -37,7 +40,7 @@ pub struct SingleMode {
|
|
37
40
|
pub executor: Builder<TokioExecutor>,
|
38
41
|
pub server: Arc<Server>,
|
39
42
|
pub sender: async_channel::Sender<RequestJob>,
|
40
|
-
pub(crate) listeners:
|
43
|
+
pub(crate) listeners: Mutex<Vec<Listener>>,
|
41
44
|
pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
|
42
45
|
pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
43
46
|
}
|
@@ -52,18 +55,24 @@ impl SingleMode {
|
|
52
55
|
#[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
|
53
56
|
pub(crate) fn new(
|
54
57
|
server: Arc<Server>,
|
55
|
-
listeners:
|
58
|
+
listeners: Vec<Listener>,
|
56
59
|
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
57
60
|
) -> Result<Self> {
|
58
61
|
let (thread_workers, sender) = build_thread_workers(
|
62
|
+
server.clone(),
|
59
63
|
Pid::this(),
|
60
64
|
NonZeroU8::try_from(server.threads).unwrap(),
|
61
65
|
server.app.clone(),
|
62
66
|
server.scheduler_class.clone(),
|
63
|
-
)
|
67
|
+
)
|
68
|
+
.inspect_err(|e| {
|
69
|
+
if let Some(err_val) = e.value() {
|
70
|
+
print_rb_backtrace(err_val);
|
71
|
+
}
|
72
|
+
})?;
|
64
73
|
Ok(Self {
|
65
74
|
executor: Builder::new(TokioExecutor::new()),
|
66
|
-
listeners,
|
75
|
+
listeners: Mutex::new(listeners),
|
67
76
|
server,
|
68
77
|
sender,
|
69
78
|
thread_workers,
|
@@ -83,82 +92,81 @@ impl SingleMode {
|
|
83
92
|
}
|
84
93
|
|
85
94
|
pub fn stop(&self) -> Result<()> {
|
86
|
-
self.lifecycle_channel
|
87
|
-
.send(LifecycleEvent::Shutdown)
|
88
|
-
.expect("Failed to send shutdown event");
|
95
|
+
self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
|
89
96
|
Ok(())
|
90
97
|
}
|
91
98
|
|
92
99
|
#[instrument(parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
|
93
100
|
pub fn run(self: Arc<Self>) -> Result<()> {
|
94
101
|
let mut listener_task_set = JoinSet::new();
|
95
|
-
let
|
96
|
-
let runtime = self_ref.build_runtime();
|
102
|
+
let runtime = self.build_runtime();
|
97
103
|
|
98
104
|
runtime.block_on(async {
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
let
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
105
|
+
let tokio_listeners = self
|
106
|
+
.listeners.lock()
|
107
|
+
.drain(..)
|
108
|
+
.map(|list| {
|
109
|
+
Arc::new(list.into_tokio_listener())
|
110
|
+
})
|
111
|
+
.collect::<Vec<_>>();
|
112
|
+
let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
|
113
|
+
for listener in tokio_listeners.iter() {
|
114
|
+
let mut lifecycle_rx = self.lifecycle_channel.subscribe();
|
115
|
+
let listener_info = Arc::new(listener.listener_info());
|
116
|
+
let self_ref = self.clone();
|
117
|
+
let listener = listener.clone();
|
118
|
+
let shutdown_sender = shutdown_sender.clone();
|
112
119
|
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
120
|
+
let listener_clone = listener.clone();
|
121
|
+
let mut shutdown_receiver = shutdown_sender.subscribe();
|
122
|
+
let shutdown_receiver_clone = shutdown_receiver.clone();
|
123
|
+
listener_task_set.spawn(async move {
|
124
|
+
listener_clone.spawn_state_task(shutdown_receiver_clone).await;
|
125
|
+
});
|
119
126
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
127
|
+
listener_task_set.spawn(async move {
|
128
|
+
let strategy_clone = self_ref.clone();
|
129
|
+
let mut acceptor_task_set = JoinSet::new();
|
130
|
+
loop {
|
131
|
+
tokio::select! {
|
132
|
+
accept_result = listener.accept() => match accept_result {
|
133
|
+
Ok(accept_result) => {
|
134
|
+
let strategy = strategy_clone.clone();
|
135
|
+
let listener_info = listener_info.clone();
|
136
|
+
let shutdown_receiver = shutdown_receiver.clone();
|
137
|
+
acceptor_task_set.spawn(async move {
|
138
|
+
strategy.serve_connection(accept_result, listener_info, shutdown_receiver).await;
|
139
|
+
});
|
129
140
|
},
|
130
|
-
Err(e) =>
|
131
|
-
}
|
132
|
-
|
133
|
-
|
134
|
-
},
|
135
|
-
_ = shutdown_receiver.changed() => {
|
136
|
-
break;
|
137
|
-
}
|
138
|
-
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
139
|
-
Ok(lifecycle_event) => {
|
140
|
-
if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
|
141
|
-
match e {
|
142
|
-
ItsiError::Break() => break,
|
143
|
-
_ => error!("Error in handle_lifecycle_event {:?}", e)
|
144
|
-
}
|
141
|
+
Err(e) => debug!("Listener.accept failed {:?}", e),
|
142
|
+
},
|
143
|
+
_ = shutdown_receiver.changed() => {
|
144
|
+
break;
|
145
145
|
}
|
146
|
+
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
147
|
+
Ok(lifecycle_event) => {
|
148
|
+
if let Err(e) = self_ref.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
|
149
|
+
match e {
|
150
|
+
ItsiError::Break() => break,
|
151
|
+
_ => error!("Error in handle_lifecycle_event {:?}", e)
|
152
|
+
}
|
153
|
+
}
|
146
154
|
|
147
|
-
|
148
|
-
|
149
|
-
|
155
|
+
},
|
156
|
+
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
157
|
+
}
|
158
|
+
}
|
150
159
|
}
|
151
|
-
|
152
|
-
|
160
|
+
while let Some(_res) = acceptor_task_set.join_next().await {}
|
161
|
+
});
|
153
162
|
|
154
|
-
|
163
|
+
}
|
155
164
|
|
156
|
-
|
165
|
+
while let Some(_res) = listener_task_set.join_next().await {}
|
157
166
|
|
158
|
-
|
167
|
+
});
|
159
168
|
runtime.shutdown_timeout(Duration::from_millis(100));
|
160
|
-
|
161
|
-
info!("Runtime has shut down");
|
169
|
+
debug!("Runtime has shut down");
|
162
170
|
Ok(())
|
163
171
|
}
|
164
172
|
|
@@ -167,62 +175,60 @@ impl SingleMode {
|
|
167
175
|
stream: IoStream,
|
168
176
|
listener: Arc<ListenerInfo>,
|
169
177
|
shutdown_channel: watch::Receiver<RunningPhase>,
|
170
|
-
)
|
178
|
+
) {
|
171
179
|
let sender_clone = self.sender.clone();
|
172
180
|
let addr = stream.addr();
|
173
181
|
let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
|
174
182
|
let server = self.server.clone();
|
175
183
|
let executor = self.executor.clone();
|
176
184
|
let mut shutdown_channel_clone = shutdown_channel.clone();
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
);
|
185
|
+
let server = server.clone();
|
186
|
+
let mut executor = executor.clone();
|
187
|
+
let mut binding = executor.http1();
|
188
|
+
let shutdown_channel = shutdown_channel_clone.clone();
|
189
|
+
let mut serve = Box::pin(
|
190
|
+
binding
|
191
|
+
.timer(TokioTimer::new())
|
192
|
+
.header_read_timeout(Duration::from_secs(1))
|
193
|
+
.serve_connection_with_upgrades(
|
194
|
+
io,
|
195
|
+
service_fn(move |hyper_request: Request<Incoming>| {
|
196
|
+
ItsiRequest::process_request(
|
197
|
+
hyper_request,
|
198
|
+
sender_clone.clone(),
|
199
|
+
server.clone(),
|
200
|
+
listener.clone(),
|
201
|
+
addr.clone(),
|
202
|
+
shutdown_channel.clone(),
|
203
|
+
)
|
204
|
+
}),
|
205
|
+
),
|
206
|
+
);
|
200
207
|
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
}
|
211
|
-
}
|
212
|
-
serve.as_mut().graceful_shutdown();
|
213
|
-
},
|
214
|
-
// A lifecycle event triggers shutdown.
|
215
|
-
_ = shutdown_channel_clone.changed() => {
|
216
|
-
// Initiate graceful shutdown.
|
217
|
-
serve.as_mut().graceful_shutdown();
|
218
|
-
// Now await the connection to finish shutting down.
|
219
|
-
if let Err(e) = serve.await {
|
220
|
-
debug!("Connection shutdown error: {:?}", e);
|
208
|
+
tokio::select! {
|
209
|
+
// Await the connection finishing naturally.
|
210
|
+
res = &mut serve => {
|
211
|
+
match res{
|
212
|
+
Ok(()) => {
|
213
|
+
debug!("Connection closed normally")
|
214
|
+
},
|
215
|
+
Err(res) => {
|
216
|
+
debug!("Connection finished with error: {:?}", res)
|
221
217
|
}
|
222
218
|
}
|
219
|
+
serve.as_mut().graceful_shutdown();
|
220
|
+
},
|
221
|
+
// A lifecycle event triggers shutdown.
|
222
|
+
_ = shutdown_channel_clone.changed() => {
|
223
|
+
// Initiate graceful shutdown.
|
224
|
+
serve.as_mut().graceful_shutdown();
|
225
|
+
|
226
|
+
// Now await the connection to finish shutting down.
|
227
|
+
if let Err(e) = serve.await {
|
228
|
+
debug!("Connection shutdown error: {:?}", e);
|
229
|
+
}
|
223
230
|
}
|
224
|
-
}
|
225
|
-
Ok(())
|
231
|
+
}
|
226
232
|
}
|
227
233
|
|
228
234
|
pub async fn handle_lifecycle_event(
|
@@ -232,13 +238,20 @@ impl SingleMode {
|
|
232
238
|
) -> Result<()> {
|
233
239
|
info!("Handling lifecycle event: {:?}", lifecycle_event);
|
234
240
|
if let LifecycleEvent::Shutdown = lifecycle_event {
|
241
|
+
//1. Stop accepting new connections.
|
235
242
|
shutdown_sender.send(RunningPhase::ShutdownPending).ok();
|
236
|
-
|
243
|
+
tokio::time::sleep(Duration::from_millis(25)).await;
|
244
|
+
|
245
|
+
//2. Break out of work queues.
|
237
246
|
for worker in &*self.thread_workers {
|
238
247
|
worker.request_shutdown().await;
|
239
248
|
}
|
249
|
+
|
250
|
+
tokio::time::sleep(Duration::from_millis(25)).await;
|
251
|
+
|
252
|
+
//3. Wait for all threads to finish.
|
253
|
+
let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
|
240
254
|
while Instant::now() < deadline {
|
241
|
-
tokio::time::sleep(Duration::from_millis(50)).await;
|
242
255
|
let alive_threads = self
|
243
256
|
.thread_workers
|
244
257
|
.iter()
|
@@ -250,7 +263,7 @@ impl SingleMode {
|
|
250
263
|
tokio::time::sleep(Duration::from_millis(200)).await;
|
251
264
|
}
|
252
265
|
|
253
|
-
|
266
|
+
//4. Force shutdown any stragglers
|
254
267
|
shutdown_sender.send(RunningPhase::Shutdown).ok();
|
255
268
|
self.thread_workers.iter().for_each(|worker| {
|
256
269
|
worker.poll_shutdown(deadline);
|
@@ -11,10 +11,7 @@ pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
|
|
11
11
|
)> = LazyLock::new(|| sync::broadcast::channel(5));
|
12
12
|
|
13
13
|
pub fn send_shutdown_event() {
|
14
|
-
SIGNAL_HANDLER_CHANNEL
|
15
|
-
.0
|
16
|
-
.send(LifecycleEvent::Shutdown)
|
17
|
-
.expect("Failed to send shutdown event");
|
14
|
+
SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
|
18
15
|
}
|
19
16
|
|
20
17
|
pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
|
@@ -1,4 +1,4 @@
|
|
1
|
-
use super::itsi_server::RequestJob;
|
1
|
+
use super::itsi_server::{RequestJob, Server};
|
2
2
|
use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
|
3
3
|
use itsi_rb_helpers::{
|
4
4
|
call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapVal, HeapValue,
|
@@ -24,6 +24,7 @@ use std::{
|
|
24
24
|
use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
|
25
25
|
use tracing::instrument;
|
26
26
|
pub struct ThreadWorker {
|
27
|
+
pub server: Arc<Server>,
|
27
28
|
pub id: String,
|
28
29
|
pub app: Opaque<Value>,
|
29
30
|
pub receiver: Arc<async_channel::Receiver<RequestJob>>,
|
@@ -48,8 +49,9 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
|
|
48
49
|
|
49
50
|
pub struct TerminateWakerSignal(bool);
|
50
51
|
|
51
|
-
#[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
|
52
|
+
#[instrument(name = "Boot", parent=None, skip(server, threads, app, pid, scheduler_class))]
|
52
53
|
pub fn build_thread_workers(
|
54
|
+
server: Arc<Server>,
|
53
55
|
pid: Pid,
|
54
56
|
threads: NonZeroU8,
|
55
57
|
app: HeapVal,
|
@@ -65,6 +67,7 @@ pub fn build_thread_workers(
|
|
65
67
|
.map(|id| {
|
66
68
|
info!(pid = pid.as_raw(), id, "Thread");
|
67
69
|
ThreadWorker::new(
|
70
|
+
server.clone(),
|
68
71
|
format!("{:?}#{:?}", pid, id),
|
69
72
|
app,
|
70
73
|
receiver_ref.clone(),
|
@@ -83,10 +86,7 @@ pub fn load_app(
|
|
83
86
|
scheduler_class: Option<String>,
|
84
87
|
) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
|
85
88
|
call_with_gvl(|ruby| {
|
86
|
-
let app = Opaque::from(
|
87
|
-
app.funcall::<_, _, Value>(*ID_CALL, ())
|
88
|
-
.expect("Couldn't load app"),
|
89
|
-
);
|
89
|
+
let app = Opaque::from(app.funcall::<_, _, Value>(*ID_CALL, ())?);
|
90
90
|
let scheduler_class = if let Some(scheduler_class) = scheduler_class {
|
91
91
|
Some(Opaque::from(
|
92
92
|
ruby.module_kernel()
|
@@ -100,6 +100,7 @@ pub fn load_app(
|
|
100
100
|
}
|
101
101
|
impl ThreadWorker {
|
102
102
|
pub fn new(
|
103
|
+
server: Arc<Server>,
|
103
104
|
id: String,
|
104
105
|
app: Opaque<Value>,
|
105
106
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
@@ -107,6 +108,7 @@ impl ThreadWorker {
|
|
107
108
|
scheduler_class: Option<Opaque<Value>>,
|
108
109
|
) -> Result<Self> {
|
109
110
|
let mut worker = Self {
|
111
|
+
server,
|
110
112
|
id,
|
111
113
|
app,
|
112
114
|
receiver,
|
@@ -125,7 +127,7 @@ impl ThreadWorker {
|
|
125
127
|
Ok(_) => {}
|
126
128
|
Err(err) => error!("Failed to send shutdown request: {}", err),
|
127
129
|
};
|
128
|
-
|
130
|
+
debug!("Requesting shutdown");
|
129
131
|
}
|
130
132
|
|
131
133
|
#[instrument(skip(self, deadline), fields(id = self.id))]
|
@@ -140,7 +142,7 @@ impl ThreadWorker {
|
|
140
142
|
if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
|
141
143
|
return true;
|
142
144
|
}
|
143
|
-
|
145
|
+
debug!("Thread has shut down");
|
144
146
|
}
|
145
147
|
self.thread.write().take();
|
146
148
|
|
@@ -154,17 +156,23 @@ impl ThreadWorker {
|
|
154
156
|
let receiver = self.receiver.clone();
|
155
157
|
let terminated = self.terminated.clone();
|
156
158
|
let scheduler_class = self.scheduler_class;
|
159
|
+
let server = self.server.clone();
|
157
160
|
call_with_gvl(|_| {
|
158
161
|
*self.thread.write() = Some(
|
159
162
|
create_ruby_thread(move || {
|
160
163
|
if let Some(scheduler_class) = scheduler_class {
|
161
|
-
if let Err(err) =
|
162
|
-
|
163
|
-
|
164
|
+
if let Err(err) = Self::fiber_accept_loop(
|
165
|
+
server,
|
166
|
+
id,
|
167
|
+
app,
|
168
|
+
receiver,
|
169
|
+
scheduler_class,
|
170
|
+
terminated,
|
171
|
+
) {
|
164
172
|
error!("Error in fiber_accept_loop: {:?}", err);
|
165
173
|
}
|
166
174
|
} else {
|
167
|
-
Self::accept_loop(id, app, receiver, terminated);
|
175
|
+
Self::accept_loop(server, id, app, receiver, terminated);
|
168
176
|
}
|
169
177
|
})
|
170
178
|
.into(),
|
@@ -180,6 +188,7 @@ impl ThreadWorker {
|
|
180
188
|
receiver: &Arc<async_channel::Receiver<RequestJob>>,
|
181
189
|
terminated: &Arc<AtomicBool>,
|
182
190
|
waker_sender: &watch::Sender<TerminateWakerSignal>,
|
191
|
+
oob_gc_responses_threshold: Option<u64>,
|
183
192
|
) -> magnus::block::Proc {
|
184
193
|
let leader = leader.clone();
|
185
194
|
let receiver = receiver.clone();
|
@@ -243,10 +252,15 @@ impl ThreadWorker {
|
|
243
252
|
}
|
244
253
|
|
245
254
|
let yield_result = if receiver.is_empty() {
|
255
|
+
let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
|
256
|
+
idle_counter = (idle_counter + 1) % oob_gc_threshold;
|
257
|
+
idle_counter == 0
|
258
|
+
} else {
|
259
|
+
false
|
260
|
+
};
|
246
261
|
waker_sender.send(TerminateWakerSignal(false)).unwrap();
|
247
|
-
idle_counter = (idle_counter + 1) % 100;
|
248
262
|
call_with_gvl(|ruby| {
|
249
|
-
if
|
263
|
+
if should_gc {
|
250
264
|
ruby.gc_start();
|
251
265
|
}
|
252
266
|
scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
|
@@ -264,6 +278,8 @@ impl ThreadWorker {
|
|
264
278
|
|
265
279
|
#[instrument(skip_all, fields(thread_worker=id))]
|
266
280
|
pub fn fiber_accept_loop(
|
281
|
+
server: Arc<Server>,
|
282
|
+
|
267
283
|
id: String,
|
268
284
|
app: Opaque<Value>,
|
269
285
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
@@ -273,10 +289,16 @@ impl ThreadWorker {
|
|
273
289
|
let ruby = Ruby::get().unwrap();
|
274
290
|
let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
|
275
291
|
let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
|
276
|
-
let
|
277
|
-
let scheduler_proc =
|
278
|
-
|
279
|
-
|
292
|
+
let server_class = ruby.get_inner(&ITSI_SERVER);
|
293
|
+
let scheduler_proc = Self::build_scheduler_proc(
|
294
|
+
app,
|
295
|
+
&leader,
|
296
|
+
&receiver,
|
297
|
+
&terminated,
|
298
|
+
&waker_sender,
|
299
|
+
server.oob_gc_responses_threshold,
|
300
|
+
);
|
301
|
+
let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
|
280
302
|
"start_scheduler_loop",
|
281
303
|
(scheduler_class, scheduler_proc),
|
282
304
|
)?;
|
@@ -337,21 +359,31 @@ impl ThreadWorker {
|
|
337
359
|
|
338
360
|
#[instrument(skip_all, fields(thread_worker=id))]
|
339
361
|
pub fn accept_loop(
|
362
|
+
server: Arc<Server>,
|
340
363
|
id: String,
|
341
364
|
app: Opaque<Value>,
|
342
365
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
343
366
|
terminated: Arc<AtomicBool>,
|
344
367
|
) {
|
345
368
|
let ruby = Ruby::get().unwrap();
|
346
|
-
let
|
369
|
+
let server_class = ruby.get_inner(&ITSI_SERVER);
|
370
|
+
let mut idle_counter = 0;
|
347
371
|
call_without_gvl(|| loop {
|
372
|
+
if receiver.is_empty() {
|
373
|
+
if let Some(oob_gc_threshold) = server.oob_gc_responses_threshold {
|
374
|
+
idle_counter = (idle_counter + 1) % oob_gc_threshold;
|
375
|
+
if idle_counter == 0 {
|
376
|
+
ruby.gc_start();
|
377
|
+
}
|
378
|
+
};
|
379
|
+
}
|
348
380
|
match receiver.recv_blocking() {
|
349
381
|
Ok(RequestJob::ProcessRequest(request)) => {
|
350
382
|
if terminated.load(Ordering::Relaxed) {
|
351
383
|
break;
|
352
384
|
}
|
353
385
|
call_with_gvl(|_ruby| {
|
354
|
-
request.process(&ruby,
|
386
|
+
request.process(&ruby, server_class, app).ok();
|
355
387
|
})
|
356
388
|
}
|
357
389
|
Ok(RequestJob::Shutdown) => {
|
@@ -63,7 +63,7 @@ pub fn configure_tls(
|
|
63
63
|
.map(|s| s.to_string())
|
64
64
|
.or_else(|| (*ITSI_ACME_CONTACT_EMAIL).as_ref().ok().map(|s| s.to_string()))
|
65
65
|
.ok_or_else(|| itsi_error::ItsiError::ArgumentError(
|
66
|
-
"
|
66
|
+
"acme_email query param or ITSI_ACME_CONTACT_EMAIL must be set before you can auto-generate let's encrypt certificates".to_string(),
|
67
67
|
))?;
|
68
68
|
|
69
69
|
let acme_config = AcmeConfig::new(domains)
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: itsi-scheduler
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.11
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Wouter Coppieters
|
8
8
|
bindir: exe
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-03-
|
10
|
+
date: 2025-03-17 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
12
|
- !ruby/object:Gem::Dependency
|
13
13
|
name: rb_sys
|