itsi-scheduler 0.1.5 → 0.1.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of itsi-scheduler might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CODE_OF_CONDUCT.md +7 -0
- data/Cargo.lock +90 -22
- data/README.md +5 -0
- data/_index.md +7 -0
- data/ext/itsi_error/Cargo.toml +1 -0
- data/ext/itsi_error/src/lib.rs +106 -7
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
- data/ext/itsi_rb_helpers/Cargo.toml +1 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
- data/ext/itsi_rb_helpers/src/lib.rs +59 -9
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +1 -1
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +72 -28
- data/ext/itsi_server/src/default_responses/mod.rs +11 -0
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +113 -75
- data/ext/itsi_server/src/prelude.rs +2 -0
- data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
- data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +29 -8
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +345 -0
- data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +84 -40
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +375 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +83 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
- data/ext/itsi_server/src/server/{bind.rs → binds/bind.rs} +56 -24
- data/ext/itsi_server/src/server/{listener.rs → binds/listener.rs} +218 -113
- data/ext/itsi_server/src/server/binds/mod.rs +4 -0
- data/ext/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +55 -17
- data/ext/itsi_server/src/server/{tls.rs → binds/tls.rs} +109 -28
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/http_message_types.rs +97 -0
- data/ext/itsi_server/src/server/io_stream.rs +2 -1
- data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +165 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +56 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +86 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +285 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +142 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +289 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +292 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +190 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +157 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +195 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +201 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +414 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +131 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +44 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +36 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +180 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +163 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +347 -0
- data/ext/itsi_server/src/server/mod.rs +6 -5
- data/ext/itsi_server/src/server/process_worker.rs +65 -14
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +137 -49
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +338 -164
- data/ext/itsi_server/src/server/signal.rs +32 -26
- data/ext/itsi_server/src/server/size_limited_incoming.rs +101 -0
- data/ext/itsi_server/src/server/thread_worker.rs +214 -107
- data/ext/itsi_server/src/services/cache_store.rs +74 -0
- data/ext/itsi_server/src/services/itsi_http_service.rs +239 -0
- data/ext/itsi_server/src/services/mime_types.rs +1416 -0
- data/ext/itsi_server/src/services/mod.rs +6 -0
- data/ext/itsi_server/src/services/password_hasher.rs +83 -0
- data/ext/itsi_server/src/services/rate_limiter.rs +569 -0
- data/ext/itsi_server/src/services/static_file_server.rs +1324 -0
- data/ext/itsi_tracing/Cargo.toml +1 -0
- data/ext/itsi_tracing/src/lib.rs +312 -34
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
- data/lib/itsi/scheduler/version.rb +1 -1
- data/lib/itsi/scheduler.rb +2 -2
- metadata +93 -21
- data/ext/itsi_error/src/from.rs +0 -71
- data/ext/itsi_server/extconf.rb +0 -6
- data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
- data/ext/itsi_server/src/request/itsi_request.rs +0 -277
- data/ext/itsi_server/src/request/mod.rs +0 -1
- data/ext/itsi_server/src/response/mod.rs +0 -1
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
- data/ext/itsi_server/src/server/itsi_server.rs +0 -244
- /data/ext/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
@@ -1,42 +1,53 @@
|
|
1
1
|
use crate::{
|
2
|
-
|
2
|
+
ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
|
3
3
|
server::{
|
4
|
+
binds::listener::ListenerInfo,
|
4
5
|
io_stream::IoStream,
|
5
|
-
itsi_server::{RequestJob, Server},
|
6
6
|
lifecycle_event::LifecycleEvent,
|
7
|
-
|
7
|
+
request_job::RequestJob,
|
8
|
+
signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
|
8
9
|
thread_worker::{build_thread_workers, ThreadWorker},
|
9
10
|
},
|
11
|
+
services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
|
10
12
|
};
|
11
|
-
use http::Request;
|
12
|
-
use hyper::{body::Incoming, service::service_fn};
|
13
13
|
use hyper_util::{
|
14
14
|
rt::{TokioExecutor, TokioIo, TokioTimer},
|
15
15
|
server::conn::auto::Builder,
|
16
16
|
};
|
17
17
|
use itsi_error::{ItsiError, Result};
|
18
|
+
use itsi_rb_helpers::{
|
19
|
+
call_with_gvl, call_without_gvl, create_ruby_thread, funcall_no_ret, print_rb_backtrace,
|
20
|
+
};
|
18
21
|
use itsi_tracing::{debug, error, info};
|
22
|
+
use magnus::value::ReprValue;
|
19
23
|
use nix::unistd::Pid;
|
24
|
+
use parking_lot::RwLock;
|
20
25
|
use std::{
|
21
|
-
|
26
|
+
collections::HashMap,
|
22
27
|
pin::Pin,
|
23
|
-
sync::
|
24
|
-
|
28
|
+
sync::{
|
29
|
+
atomic::{AtomicBool, Ordering},
|
30
|
+
Arc,
|
31
|
+
},
|
32
|
+
thread::sleep,
|
33
|
+
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
25
34
|
};
|
26
35
|
use tokio::{
|
27
36
|
runtime::{Builder as RuntimeBuilder, Runtime},
|
28
|
-
sync::
|
37
|
+
sync::{
|
38
|
+
broadcast,
|
39
|
+
watch::{self},
|
40
|
+
},
|
29
41
|
task::JoinSet,
|
30
42
|
};
|
31
43
|
use tracing::instrument;
|
32
44
|
|
33
45
|
pub struct SingleMode {
|
34
46
|
pub executor: Builder<TokioExecutor>,
|
35
|
-
pub
|
36
|
-
pub sender: async_channel::Sender<RequestJob>,
|
37
|
-
pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
|
38
|
-
pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
|
47
|
+
pub server_config: Arc<ItsiServerConfig>,
|
39
48
|
pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
49
|
+
pub restart_requested: AtomicBool,
|
50
|
+
pub status: RwLock<HashMap<u8, (u64, u64)>>,
|
40
51
|
}
|
41
52
|
|
42
53
|
pub enum RunningPhase {
|
@@ -46,30 +57,29 @@ pub enum RunningPhase {
|
|
46
57
|
}
|
47
58
|
|
48
59
|
impl SingleMode {
|
49
|
-
#[instrument(parent=None, skip_all
|
50
|
-
pub
|
51
|
-
|
52
|
-
listeners: Arc<Vec<Arc<Listener>>>,
|
53
|
-
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
54
|
-
) -> Result<Self> {
|
55
|
-
let (thread_workers, sender) = build_thread_workers(
|
56
|
-
Pid::this(),
|
57
|
-
NonZeroU8::try_from(server.threads).unwrap(),
|
58
|
-
server.app,
|
59
|
-
server.scheduler_class.clone(),
|
60
|
-
)?;
|
60
|
+
#[instrument(parent=None, skip_all)]
|
61
|
+
pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
|
62
|
+
server_config.server_params.read().preload_ruby()?;
|
61
63
|
Ok(Self {
|
62
64
|
executor: Builder::new(TokioExecutor::new()),
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
lifecycle_channel,
|
65
|
+
server_config,
|
66
|
+
lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
|
67
|
+
restart_requested: AtomicBool::new(false),
|
68
|
+
status: RwLock::new(HashMap::new()),
|
68
69
|
})
|
69
70
|
}
|
70
71
|
|
71
72
|
pub fn build_runtime(&self) -> Runtime {
|
72
|
-
let mut builder: RuntimeBuilder =
|
73
|
+
let mut builder: RuntimeBuilder = if self
|
74
|
+
.server_config
|
75
|
+
.server_params
|
76
|
+
.read()
|
77
|
+
.multithreaded_reactor
|
78
|
+
{
|
79
|
+
RuntimeBuilder::new_multi_thread()
|
80
|
+
} else {
|
81
|
+
RuntimeBuilder::new_current_thread()
|
82
|
+
};
|
73
83
|
builder
|
74
84
|
.thread_name("itsi-server-accept-loop")
|
75
85
|
.thread_stack_size(3 * 1024 * 1024)
|
@@ -80,168 +90,332 @@ impl SingleMode {
|
|
80
90
|
}
|
81
91
|
|
82
92
|
pub fn stop(&self) -> Result<()> {
|
93
|
+
self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
|
94
|
+
Ok(())
|
95
|
+
}
|
96
|
+
|
97
|
+
pub async fn print_info(&self, thread_workers: Arc<Vec<Arc<ThreadWorker>>>) -> Result<()> {
|
98
|
+
println!(" └─ Worker");
|
99
|
+
println!(
|
100
|
+
" - binds: {:?}",
|
101
|
+
self.server_config.server_params.read().binds
|
102
|
+
);
|
103
|
+
|
104
|
+
println!(
|
105
|
+
" ─ streaming body: {:?}",
|
106
|
+
self.server_config.server_params.read().streamable_body
|
107
|
+
);
|
108
|
+
println!(
|
109
|
+
" ─ multithreaded runtime: {:?}",
|
110
|
+
self.server_config
|
111
|
+
.server_params
|
112
|
+
.read()
|
113
|
+
.multithreaded_reactor
|
114
|
+
);
|
115
|
+
println!(
|
116
|
+
" ─ scheduler: {:?}",
|
117
|
+
self.server_config.server_params.read().scheduler_class
|
118
|
+
);
|
119
|
+
println!(
|
120
|
+
" ─ OOB GC Response threadhold: {:?}",
|
121
|
+
self.server_config
|
122
|
+
.server_params
|
123
|
+
.read()
|
124
|
+
.oob_gc_responses_threshold
|
125
|
+
);
|
126
|
+
for worker in thread_workers.iter() {
|
127
|
+
println!(" └─ - Thread : {:?}", worker.id);
|
128
|
+
println!(" - # Requests Processed: {:?}", worker.request_id);
|
129
|
+
println!(
|
130
|
+
" - Last Request Started: {:?} ago",
|
131
|
+
if worker.current_request_start.load(Ordering::Relaxed) == 0 {
|
132
|
+
Duration::from_secs(0)
|
133
|
+
} else {
|
134
|
+
SystemTime::now()
|
135
|
+
.duration_since(
|
136
|
+
UNIX_EPOCH
|
137
|
+
+ Duration::from_secs(
|
138
|
+
worker.current_request_start.load(Ordering::Relaxed),
|
139
|
+
),
|
140
|
+
)
|
141
|
+
.unwrap_or(Duration::from_secs(0))
|
142
|
+
}
|
143
|
+
);
|
144
|
+
call_with_gvl(|_| {
|
145
|
+
if let Some(thread) = worker.thread.read().as_ref() {
|
146
|
+
if let Ok(backtrace) = thread.funcall::<_, _, Vec<String>>("backtrace", ()) {
|
147
|
+
println!(" - Backtrace:");
|
148
|
+
for line in backtrace {
|
149
|
+
println!(" - {}", line);
|
150
|
+
}
|
151
|
+
}
|
152
|
+
}
|
153
|
+
})
|
154
|
+
}
|
155
|
+
|
83
156
|
Ok(())
|
84
157
|
}
|
85
158
|
|
86
|
-
|
159
|
+
pub fn start_monitors(
|
160
|
+
self: Arc<Self>,
|
161
|
+
thread_workers: Arc<Vec<Arc<ThreadWorker>>>,
|
162
|
+
) -> magnus::Thread {
|
163
|
+
call_with_gvl(move |_| {
|
164
|
+
create_ruby_thread(move || {
|
165
|
+
call_without_gvl(move || {
|
166
|
+
let monitor_runtime = RuntimeBuilder::new_current_thread()
|
167
|
+
.enable_time()
|
168
|
+
.build()
|
169
|
+
.unwrap();
|
170
|
+
let receiver = self.clone();
|
171
|
+
monitor_runtime.block_on({
|
172
|
+
let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
|
173
|
+
let receiver = receiver.clone();
|
174
|
+
let thread_workers = thread_workers.clone();
|
175
|
+
async move {
|
176
|
+
loop {
|
177
|
+
tokio::select! {
|
178
|
+
_ = tokio::time::sleep(Duration::from_secs(1)) => {
|
179
|
+
let mut status_lock = receiver.status.write();
|
180
|
+
thread_workers.iter().for_each(|worker| {
|
181
|
+
let worker_entry = status_lock.entry(worker.id);
|
182
|
+
let data = (
|
183
|
+
worker.request_id.load(Ordering::Relaxed),
|
184
|
+
worker.current_request_start.load(Ordering::Relaxed),
|
185
|
+
);
|
186
|
+
worker_entry.or_insert(data);
|
187
|
+
});
|
188
|
+
}
|
189
|
+
lifecycle_event = lifecycle_rx.recv() => {
|
190
|
+
match lifecycle_event {
|
191
|
+
Ok(LifecycleEvent::Restart) => {
|
192
|
+
receiver.restart().ok();
|
193
|
+
}
|
194
|
+
Ok(LifecycleEvent::Reload) => {
|
195
|
+
receiver.reload().ok();
|
196
|
+
}
|
197
|
+
Ok(LifecycleEvent::Shutdown) => {
|
198
|
+
break;
|
199
|
+
}
|
200
|
+
Ok(LifecycleEvent::PrintInfo) => {
|
201
|
+
receiver.print_info(thread_workers.clone()).await.ok();
|
202
|
+
}
|
203
|
+
_ => {}
|
204
|
+
}
|
205
|
+
}
|
206
|
+
}
|
207
|
+
}
|
208
|
+
}
|
209
|
+
})
|
210
|
+
})
|
211
|
+
})
|
212
|
+
})
|
213
|
+
}
|
214
|
+
|
215
|
+
#[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
|
87
216
|
pub fn run(self: Arc<Self>) -> Result<()> {
|
88
217
|
let mut listener_task_set = JoinSet::new();
|
89
|
-
let
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
218
|
+
let runtime = self.build_runtime();
|
219
|
+
|
220
|
+
let (thread_workers, job_sender, nonblocking_sender) =
|
221
|
+
build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
|
222
|
+
.inspect_err(|e| {
|
223
|
+
if let Some(err_val) = e.value() {
|
224
|
+
print_rb_backtrace(err_val);
|
225
|
+
}
|
226
|
+
})?;
|
227
|
+
|
228
|
+
info!(
|
229
|
+
threads = thread_workers.len(),
|
230
|
+
binds = format!("{:?}", self.server_config.server_params.read().binds)
|
231
|
+
);
|
232
|
+
|
233
|
+
let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
|
234
|
+
let thread = self.clone().start_monitors(thread_workers.clone());
|
235
|
+
if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
|
236
|
+
return Ok(());
|
237
|
+
}
|
238
|
+
runtime.block_on(
|
239
|
+
async {
|
240
|
+
let server_params = self.server_config.server_params.read().clone();
|
241
|
+
server_params.middleware.get().unwrap().initialize_layers().await?;
|
242
|
+
let tokio_listeners = server_params.listeners.lock()
|
243
|
+
.drain(..)
|
244
|
+
.map(|list| {
|
245
|
+
Arc::new(list.into_tokio_listener())
|
246
|
+
})
|
247
|
+
.collect::<Vec<_>>();
|
248
|
+
|
249
|
+
for listener in tokio_listeners.iter() {
|
250
|
+
let mut lifecycle_rx = self.lifecycle_channel.subscribe();
|
251
|
+
|
252
|
+
let listener_info = Arc::new(listener.listener_info());
|
253
|
+
let self_ref = self.clone();
|
254
|
+
let listener = listener.clone();
|
255
|
+
let shutdown_sender = shutdown_sender.clone();
|
256
|
+
let job_sender = job_sender.clone();
|
257
|
+
let nonblocking_sender = nonblocking_sender.clone();
|
258
|
+
let workers_clone = thread_workers.clone();
|
259
|
+
let listener_clone = listener.clone();
|
260
|
+
let mut shutdown_receiver = shutdown_sender.subscribe();
|
261
|
+
let shutdown_receiver_clone = shutdown_receiver.clone();
|
262
|
+
listener_task_set.spawn(async move {
|
263
|
+
listener_clone.spawn_state_task(shutdown_receiver_clone).await;
|
264
|
+
});
|
265
|
+
|
266
|
+
listener_task_set.spawn(async move {
|
267
|
+
let strategy_clone = self_ref.clone();
|
268
|
+
let mut acceptor_task_set = JoinSet::new();
|
269
|
+
loop {
|
270
|
+
tokio::select! {
|
271
|
+
accept_result = listener.accept() => match accept_result {
|
272
|
+
Ok(accept_result) => {
|
273
|
+
let strategy = strategy_clone.clone();
|
274
|
+
let listener_info = listener_info.clone();
|
275
|
+
let shutdown_receiver = shutdown_receiver.clone();
|
276
|
+
let job_sender = job_sender.clone();
|
277
|
+
let nonblocking_sender = nonblocking_sender.clone();
|
278
|
+
acceptor_task_set.spawn(async move {
|
279
|
+
strategy.serve_connection(accept_result, job_sender, nonblocking_sender, listener_info, shutdown_receiver).await;
|
280
|
+
});
|
281
|
+
},
|
282
|
+
Err(e) => debug!("Listener.accept failed {:?}", e),
|
283
|
+
},
|
284
|
+
_ = shutdown_receiver.changed() => {
|
285
|
+
break;
|
112
286
|
}
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
287
|
+
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
288
|
+
Ok(LifecycleEvent::Shutdown) => {
|
289
|
+
shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
|
290
|
+
tokio::time::sleep(Duration::from_millis(25)).await;
|
291
|
+
for _i in 0..workers_clone.len() {
|
292
|
+
job_sender.send(RequestJob::Shutdown).await.unwrap();
|
293
|
+
nonblocking_sender.send(RequestJob::Shutdown).await.unwrap();
|
294
|
+
}
|
295
|
+
break;
|
296
|
+
},
|
297
|
+
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
298
|
+
_ => {}
|
299
|
+
}
|
118
300
|
}
|
119
|
-
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
120
|
-
Ok(lifecycle_event) => {
|
121
|
-
if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
|
122
|
-
match e {
|
123
|
-
ItsiError::Break() => break,
|
124
|
-
_ => error!("Error in handle_lifecycle_event {:?}", e)
|
125
|
-
}
|
126
|
-
}
|
127
|
-
|
128
|
-
},
|
129
|
-
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
130
|
-
}
|
131
301
|
}
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
302
|
+
while let Some(_res) = acceptor_task_set.join_next().await {}
|
303
|
+
});
|
304
|
+
|
305
|
+
}
|
306
|
+
|
307
|
+
while let Some(_res) = listener_task_set.join_next().await {}
|
308
|
+
|
309
|
+
// Explicitly drop all listeners to ensure file descriptors are released
|
310
|
+
drop(tokio_listeners);
|
137
311
|
|
138
|
-
|
312
|
+
Ok::<(), ItsiError>(())
|
313
|
+
})?;
|
139
314
|
|
140
|
-
|
141
|
-
|
315
|
+
shutdown_sender.send(RunningPhase::Shutdown).ok();
|
316
|
+
let deadline = Instant::now()
|
317
|
+
+ Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
|
142
318
|
|
319
|
+
runtime.shutdown_timeout(Duration::from_millis(100));
|
320
|
+
|
321
|
+
loop {
|
322
|
+
if thread_workers
|
323
|
+
.iter()
|
324
|
+
.all(|worker| call_with_gvl(move |_| !worker.poll_shutdown(deadline)))
|
325
|
+
{
|
326
|
+
funcall_no_ret(thread, "join", ()).ok();
|
327
|
+
break;
|
328
|
+
}
|
329
|
+
sleep(Duration::from_millis(50));
|
330
|
+
}
|
331
|
+
|
332
|
+
if self.restart_requested.load(Ordering::SeqCst) {
|
333
|
+
self.restart_requested.store(false, Ordering::SeqCst);
|
334
|
+
info!("Worker restarting");
|
335
|
+
self.run()?;
|
336
|
+
}
|
337
|
+
debug!("Runtime has shut down");
|
143
338
|
Ok(())
|
144
339
|
}
|
145
340
|
|
146
341
|
pub(crate) async fn serve_connection(
|
147
342
|
&self,
|
148
343
|
stream: IoStream,
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
344
|
+
job_sender: async_channel::Sender<RequestJob>,
|
345
|
+
nonblocking_sender: async_channel::Sender<RequestJob>,
|
346
|
+
listener: Arc<ListenerInfo>,
|
347
|
+
shutdown_channel: watch::Receiver<RunningPhase>,
|
348
|
+
) {
|
153
349
|
let addr = stream.addr();
|
154
350
|
let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
|
155
|
-
let server = self.server.clone();
|
156
351
|
let executor = self.executor.clone();
|
157
352
|
let mut shutdown_channel_clone = shutdown_channel.clone();
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
let mut binding = executor.http1();
|
162
|
-
let shutdown_channel = shutdown_channel_clone.clone();
|
163
|
-
let mut serve = Box::pin(
|
164
|
-
binding
|
165
|
-
.timer(TokioTimer::new())
|
166
|
-
.header_read_timeout(Duration::from_secs(1))
|
167
|
-
.serve_connection_with_upgrades(
|
168
|
-
io,
|
169
|
-
service_fn(move |hyper_request: Request<Incoming>| {
|
170
|
-
ItsiRequest::process_request(
|
171
|
-
hyper_request,
|
172
|
-
sender_clone.clone(),
|
173
|
-
server.clone(),
|
174
|
-
listener.clone(),
|
175
|
-
addr.clone(),
|
176
|
-
shutdown_channel.clone(),
|
177
|
-
)
|
178
|
-
}),
|
179
|
-
),
|
180
|
-
);
|
353
|
+
let mut executor = executor.clone();
|
354
|
+
let mut binding = executor.http1();
|
355
|
+
let shutdown_channel = shutdown_channel_clone.clone();
|
181
356
|
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
357
|
+
let service = ItsiHttpService {
|
358
|
+
inner: Arc::new(ItsiHttpServiceInner {
|
359
|
+
sender: job_sender.clone(),
|
360
|
+
nonblocking_sender: nonblocking_sender.clone(),
|
361
|
+
server_params: self.server_config.server_params.read().clone(),
|
362
|
+
listener,
|
363
|
+
addr: addr.to_string(),
|
364
|
+
shutdown_channel: shutdown_channel.clone(),
|
365
|
+
}),
|
366
|
+
};
|
367
|
+
let mut serve = Box::pin(
|
368
|
+
binding
|
369
|
+
.timer(TokioTimer::new())
|
370
|
+
.header_read_timeout(Duration::from_secs(1))
|
371
|
+
.serve_connection_with_upgrades(io, service),
|
372
|
+
);
|
373
|
+
|
374
|
+
tokio::select! {
|
375
|
+
// Await the connection finishing naturally.
|
376
|
+
res = &mut serve => {
|
377
|
+
match res{
|
378
|
+
Ok(()) => {
|
379
|
+
debug!("Connection closed normally")
|
380
|
+
},
|
381
|
+
Err(res) => {
|
382
|
+
debug!("Connection closed abruptly: {:?}", res)
|
202
383
|
}
|
203
384
|
}
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
385
|
+
serve.as_mut().graceful_shutdown();
|
386
|
+
},
|
387
|
+
// A lifecycle event triggers shutdown.
|
388
|
+
_ = shutdown_channel_clone.changed() => {
|
389
|
+
// Initiate graceful shutdown.
|
390
|
+
serve.as_mut().graceful_shutdown();
|
208
391
|
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
shutdown_sender: tokio::sync::watch::Sender<RunningPhase>,
|
213
|
-
) -> Result<()> {
|
214
|
-
if let LifecycleEvent::Shutdown = lifecycle_event {
|
215
|
-
shutdown_sender
|
216
|
-
.send(RunningPhase::ShutdownPending)
|
217
|
-
.expect("Failed to send shutdown pending signal");
|
218
|
-
let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
|
219
|
-
for worker in &*self.thread_workers {
|
220
|
-
worker.request_shutdown().await;
|
221
|
-
}
|
222
|
-
while Instant::now() < deadline {
|
223
|
-
tokio::time::sleep(Duration::from_millis(50)).await;
|
224
|
-
let alive_threads = self
|
225
|
-
.thread_workers
|
226
|
-
.iter()
|
227
|
-
.filter(|worker| worker.poll_shutdown(deadline))
|
228
|
-
.count();
|
229
|
-
if alive_threads == 0 {
|
230
|
-
break;
|
392
|
+
// Now await the connection to finish shutting down.
|
393
|
+
if let Err(e) = serve.await {
|
394
|
+
debug!("Connection shutdown error: {:?}", e);
|
231
395
|
}
|
232
|
-
tokio::time::sleep(Duration::from_millis(200)).await;
|
233
396
|
}
|
397
|
+
}
|
398
|
+
}
|
234
399
|
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
return Err(ItsiError::Break());
|
400
|
+
/// Attempts to reload the config "live"
|
401
|
+
/// Not that when running in single mode this will not unload
|
402
|
+
/// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
|
403
|
+
pub fn reload(&self) -> Result<()> {
|
404
|
+
let should_reexec = self.server_config.clone().reload(false)?;
|
405
|
+
if should_reexec {
|
406
|
+
self.server_config.dup_fds()?;
|
407
|
+
self.server_config.reload_exec()?;
|
244
408
|
}
|
409
|
+
self.restart_requested.store(true, Ordering::SeqCst);
|
410
|
+
self.stop()?;
|
411
|
+
self.server_config.server_params.read().preload_ruby()?;
|
412
|
+
Ok(())
|
413
|
+
}
|
414
|
+
|
415
|
+
/// Restart the server while keeping connections open.
|
416
|
+
pub fn restart(&self) -> Result<()> {
|
417
|
+
self.server_config.dup_fds()?;
|
418
|
+
self.server_config.reload_exec()?;
|
245
419
|
Ok(())
|
246
420
|
}
|
247
421
|
}
|