itsi-scheduler 0.2.22-aarch64-linux
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rubocop.yml +8 -0
- data/Cargo.lock +997 -0
- data/Cargo.toml +7 -0
- data/Rakefile +39 -0
- data/ext/itsi_acme/Cargo.toml +86 -0
- data/ext/itsi_acme/examples/high_level.rs +63 -0
- data/ext/itsi_acme/examples/high_level_warp.rs +52 -0
- data/ext/itsi_acme/examples/low_level.rs +87 -0
- data/ext/itsi_acme/examples/low_level_axum.rs +66 -0
- data/ext/itsi_acme/src/acceptor.rs +81 -0
- data/ext/itsi_acme/src/acme.rs +354 -0
- data/ext/itsi_acme/src/axum.rs +86 -0
- data/ext/itsi_acme/src/cache.rs +39 -0
- data/ext/itsi_acme/src/caches/boxed.rs +80 -0
- data/ext/itsi_acme/src/caches/composite.rs +69 -0
- data/ext/itsi_acme/src/caches/dir.rs +106 -0
- data/ext/itsi_acme/src/caches/mod.rs +11 -0
- data/ext/itsi_acme/src/caches/no.rs +78 -0
- data/ext/itsi_acme/src/caches/test.rs +136 -0
- data/ext/itsi_acme/src/config.rs +172 -0
- data/ext/itsi_acme/src/https_helper.rs +69 -0
- data/ext/itsi_acme/src/incoming.rs +142 -0
- data/ext/itsi_acme/src/jose.rs +161 -0
- data/ext/itsi_acme/src/lib.rs +142 -0
- data/ext/itsi_acme/src/resolver.rs +59 -0
- data/ext/itsi_acme/src/state.rs +424 -0
- data/ext/itsi_error/Cargo.lock +368 -0
- data/ext/itsi_error/Cargo.toml +12 -0
- data/ext/itsi_error/src/lib.rs +140 -0
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.lock +355 -0
- data/ext/itsi_rb_helpers/Cargo.toml +11 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +139 -0
- data/ext/itsi_rb_helpers/src/lib.rs +232 -0
- data/ext/itsi_scheduler/Cargo.toml +24 -0
- data/ext/itsi_scheduler/extconf.rb +11 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +320 -0
- data/ext/itsi_scheduler/src/lib.rs +39 -0
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +94 -0
- data/ext/itsi_server/src/default_responses/mod.rs +14 -0
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +154 -0
- data/ext/itsi_server/src/prelude.rs +2 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/big_bytes.rs +116 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +149 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +346 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +265 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +399 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +447 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +545 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +650 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +102 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
- data/ext/itsi_server/src/server/binds/bind.rs +204 -0
- data/ext/itsi_server/src/server/binds/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/binds/listener.rs +485 -0
- data/ext/itsi_server/src/server/binds/mod.rs +4 -0
- data/ext/itsi_server/src/server/binds/tls/locked_dir_cache.rs +132 -0
- data/ext/itsi_server/src/server/binds/tls.rs +278 -0
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/frame_stream.rs +143 -0
- data/ext/itsi_server/src/server/http_message_types.rs +230 -0
- data/ext/itsi_server/src/server/io_stream.rs +128 -0
- data/ext/itsi_server/src/server/lifecycle_event.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +170 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +63 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +94 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +93 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +343 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +151 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +329 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +300 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +193 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +64 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +188 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +168 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +183 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +209 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +133 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +122 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +407 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +155 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +54 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +54 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +51 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +138 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +269 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +62 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +218 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +31 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +381 -0
- data/ext/itsi_server/src/server/mod.rs +14 -0
- data/ext/itsi_server/src/server/process_worker.rs +247 -0
- data/ext/itsi_server/src/server/redirect_type.rs +26 -0
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +100 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +411 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +31 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +449 -0
- data/ext/itsi_server/src/server/signal.rs +129 -0
- data/ext/itsi_server/src/server/size_limited_incoming.rs +107 -0
- data/ext/itsi_server/src/server/thread_worker.rs +504 -0
- data/ext/itsi_server/src/services/cache_store.rs +74 -0
- data/ext/itsi_server/src/services/itsi_http_service.rs +270 -0
- data/ext/itsi_server/src/services/mime_types.rs +2896 -0
- data/ext/itsi_server/src/services/mod.rs +6 -0
- data/ext/itsi_server/src/services/password_hasher.rs +89 -0
- data/ext/itsi_server/src/services/rate_limiter.rs +609 -0
- data/ext/itsi_server/src/services/static_file_server.rs +1400 -0
- data/ext/itsi_tracing/Cargo.lock +274 -0
- data/ext/itsi_tracing/Cargo.toml +17 -0
- data/ext/itsi_tracing/src/lib.rs +370 -0
- data/itsi-scheduler-100.png +0 -0
- data/lib/itsi/schedule_refinement.rb +96 -0
- data/lib/itsi/scheduler/3.1/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/3.2/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/3.3/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/3.4/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/4.0/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/native_extension.rb +34 -0
- data/lib/itsi/scheduler/version.rb +7 -0
- data/lib/itsi/scheduler.rb +153 -0
- data/vendor/rb-sys-build/.cargo-ok +1 -0
- data/vendor/rb-sys-build/.cargo_vcs_info.json +6 -0
- data/vendor/rb-sys-build/Cargo.lock +294 -0
- data/vendor/rb-sys-build/Cargo.toml +71 -0
- data/vendor/rb-sys-build/Cargo.toml.orig +32 -0
- data/vendor/rb-sys-build/LICENSE-APACHE +190 -0
- data/vendor/rb-sys-build/LICENSE-MIT +21 -0
- data/vendor/rb-sys-build/src/bindings/sanitizer.rs +185 -0
- data/vendor/rb-sys-build/src/bindings/stable_api.rs +247 -0
- data/vendor/rb-sys-build/src/bindings/wrapper.h +71 -0
- data/vendor/rb-sys-build/src/bindings.rs +280 -0
- data/vendor/rb-sys-build/src/cc.rs +421 -0
- data/vendor/rb-sys-build/src/lib.rs +12 -0
- data/vendor/rb-sys-build/src/rb_config/flags.rs +101 -0
- data/vendor/rb-sys-build/src/rb_config/library.rs +132 -0
- data/vendor/rb-sys-build/src/rb_config/search_path.rs +57 -0
- data/vendor/rb-sys-build/src/rb_config.rs +906 -0
- data/vendor/rb-sys-build/src/utils.rs +53 -0
- metadata +210 -0
|
@@ -0,0 +1,449 @@
|
|
|
1
|
+
use crate::{
|
|
2
|
+
ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
|
|
3
|
+
server::{
|
|
4
|
+
lifecycle_event::LifecycleEvent,
|
|
5
|
+
request_job::RequestJob,
|
|
6
|
+
serve_strategy::acceptor::{Acceptor, AcceptorArgs},
|
|
7
|
+
signal::{
|
|
8
|
+
send_lifecycle_event, subscribe_runtime_to_signals, unsubscribe_runtime,
|
|
9
|
+
SHUTDOWN_REQUESTED,
|
|
10
|
+
},
|
|
11
|
+
thread_worker::{build_thread_workers, ThreadWorker},
|
|
12
|
+
},
|
|
13
|
+
};
|
|
14
|
+
use hyper_util::{
|
|
15
|
+
rt::{TokioExecutor, TokioTimer},
|
|
16
|
+
server::conn::auto::Builder,
|
|
17
|
+
};
|
|
18
|
+
use itsi_error::{ItsiError, Result};
|
|
19
|
+
use itsi_rb_helpers::{
|
|
20
|
+
call_with_gvl, call_without_gvl, create_ruby_thread, funcall_no_ret, print_rb_backtrace,
|
|
21
|
+
};
|
|
22
|
+
use itsi_tracing::{debug, error, info};
|
|
23
|
+
use magnus::{value::ReprValue, Value};
|
|
24
|
+
use nix::unistd::Pid;
|
|
25
|
+
use parking_lot::RwLock;
|
|
26
|
+
use std::sync::Arc;
|
|
27
|
+
use std::{
|
|
28
|
+
collections::HashMap,
|
|
29
|
+
sync::atomic::{AtomicBool, Ordering},
|
|
30
|
+
thread::sleep,
|
|
31
|
+
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
|
32
|
+
};
|
|
33
|
+
use tokio::{
|
|
34
|
+
runtime::{Builder as RuntimeBuilder, Runtime},
|
|
35
|
+
sync::watch::{self},
|
|
36
|
+
task::JoinSet,
|
|
37
|
+
};
|
|
38
|
+
use tracing::instrument;
|
|
39
|
+
|
|
40
|
+
pub struct SingleMode {
|
|
41
|
+
pub worker_id: usize,
|
|
42
|
+
pub executor: Builder<TokioExecutor>,
|
|
43
|
+
pub server_config: Arc<ItsiServerConfig>,
|
|
44
|
+
pub restart_requested: AtomicBool,
|
|
45
|
+
pub status: RwLock<HashMap<u8, (u64, u64)>>,
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
#[derive(PartialEq, Debug)]
|
|
49
|
+
pub enum RunningPhase {
|
|
50
|
+
Running,
|
|
51
|
+
ShutdownPending,
|
|
52
|
+
Shutdown,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
impl SingleMode {
|
|
56
|
+
#[instrument(parent=None, skip_all)]
|
|
57
|
+
pub fn new(server_config: Arc<ItsiServerConfig>, worker_id: usize) -> Result<Self> {
|
|
58
|
+
server_config.server_params.read().preload_ruby()?;
|
|
59
|
+
let executor = {
|
|
60
|
+
let mut executor = Builder::new(TokioExecutor::new());
|
|
61
|
+
let server_params = server_config.server_params.read();
|
|
62
|
+
let mut http1_executor = executor.http1();
|
|
63
|
+
|
|
64
|
+
http1_executor
|
|
65
|
+
.header_read_timeout(server_params.header_read_timeout)
|
|
66
|
+
.pipeline_flush(server_params.pipeline_flush)
|
|
67
|
+
.timer(TokioTimer::new());
|
|
68
|
+
|
|
69
|
+
if let Some(writev) = server_params.writev {
|
|
70
|
+
http1_executor.writev(writev);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
executor
|
|
74
|
+
.http2()
|
|
75
|
+
.max_concurrent_streams(server_params.max_concurrent_streams)
|
|
76
|
+
.max_local_error_reset_streams(server_params.max_local_error_reset_streams)
|
|
77
|
+
.max_header_list_size(server_params.max_header_list_size)
|
|
78
|
+
.max_send_buf_size(server_params.max_send_buf_size)
|
|
79
|
+
.enable_connect_protocol();
|
|
80
|
+
executor
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
Ok(Self {
|
|
84
|
+
worker_id,
|
|
85
|
+
executor,
|
|
86
|
+
server_config,
|
|
87
|
+
restart_requested: AtomicBool::new(false),
|
|
88
|
+
status: RwLock::new(HashMap::new()),
|
|
89
|
+
})
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
pub fn is_zero_worker(&self) -> bool {
|
|
93
|
+
self.worker_id == 0
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
pub fn build_runtime(&self) -> Runtime {
|
|
97
|
+
let mut builder: RuntimeBuilder = if self
|
|
98
|
+
.server_config
|
|
99
|
+
.server_params
|
|
100
|
+
.read()
|
|
101
|
+
.multithreaded_reactor
|
|
102
|
+
{
|
|
103
|
+
RuntimeBuilder::new_multi_thread()
|
|
104
|
+
} else {
|
|
105
|
+
RuntimeBuilder::new_current_thread()
|
|
106
|
+
};
|
|
107
|
+
builder
|
|
108
|
+
.thread_name("itsi-server-accept-loop")
|
|
109
|
+
.thread_stack_size(512 * 1024)
|
|
110
|
+
.max_blocking_threads(4)
|
|
111
|
+
.event_interval(16)
|
|
112
|
+
.global_queue_interval(64)
|
|
113
|
+
.max_io_events_per_tick(256)
|
|
114
|
+
.enable_all()
|
|
115
|
+
.build()
|
|
116
|
+
.expect("Failed to build Tokio runtime")
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
pub fn stop(&self) -> Result<()> {
|
|
120
|
+
SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
121
|
+
send_lifecycle_event(LifecycleEvent::Shutdown);
|
|
122
|
+
Ok(())
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
pub async fn print_info(&self, thread_workers: Arc<Vec<Arc<ThreadWorker>>>) -> Result<()> {
|
|
126
|
+
println!(" └─ Worker");
|
|
127
|
+
println!(
|
|
128
|
+
" - binds: {:?}",
|
|
129
|
+
self.server_config.server_params.read().binds
|
|
130
|
+
);
|
|
131
|
+
|
|
132
|
+
println!(
|
|
133
|
+
" ─ streaming body: {:?}",
|
|
134
|
+
self.server_config.server_params.read().streamable_body
|
|
135
|
+
);
|
|
136
|
+
println!(
|
|
137
|
+
" ─ multithreaded runtime: {:?}",
|
|
138
|
+
self.server_config
|
|
139
|
+
.server_params
|
|
140
|
+
.read()
|
|
141
|
+
.multithreaded_reactor
|
|
142
|
+
);
|
|
143
|
+
println!(
|
|
144
|
+
" ─ scheduler: {:?}",
|
|
145
|
+
self.server_config.server_params.read().scheduler_class
|
|
146
|
+
);
|
|
147
|
+
println!(
|
|
148
|
+
" ─ OOB GC Response threadhold: {:?}",
|
|
149
|
+
self.server_config
|
|
150
|
+
.server_params
|
|
151
|
+
.read()
|
|
152
|
+
.oob_gc_responses_threshold
|
|
153
|
+
);
|
|
154
|
+
for worker in thread_workers.iter() {
|
|
155
|
+
println!(" └─ - Thread : {:?}", worker.id);
|
|
156
|
+
println!(" - # Requests Processed: {:?}", worker.request_id);
|
|
157
|
+
println!(
|
|
158
|
+
" - Last Request Started: {:?} ago",
|
|
159
|
+
if worker.current_request_start.load(Ordering::Relaxed) == 0 {
|
|
160
|
+
Duration::from_secs(0)
|
|
161
|
+
} else {
|
|
162
|
+
SystemTime::now()
|
|
163
|
+
.duration_since(
|
|
164
|
+
UNIX_EPOCH
|
|
165
|
+
+ Duration::from_secs(
|
|
166
|
+
worker.current_request_start.load(Ordering::Relaxed),
|
|
167
|
+
),
|
|
168
|
+
)
|
|
169
|
+
.unwrap_or(Duration::from_secs(0))
|
|
170
|
+
}
|
|
171
|
+
);
|
|
172
|
+
call_with_gvl(|_| {
|
|
173
|
+
if let Some(thread) = worker.thread.read().as_ref() {
|
|
174
|
+
if let Ok(backtrace) = thread.funcall::<_, _, Vec<String>>("backtrace", ()) {
|
|
175
|
+
println!(" - Backtrace:");
|
|
176
|
+
for line in backtrace {
|
|
177
|
+
println!(" - {}", line);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
})
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
Ok(())
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
pub fn start_monitors(
|
|
188
|
+
self: Arc<Self>,
|
|
189
|
+
thread_workers: Arc<Vec<Arc<ThreadWorker>>>,
|
|
190
|
+
) -> Option<magnus::Thread> {
|
|
191
|
+
call_with_gvl(move |_| {
|
|
192
|
+
create_ruby_thread(move || {
|
|
193
|
+
call_without_gvl(move || {
|
|
194
|
+
let monitor_runtime = RuntimeBuilder::new_current_thread()
|
|
195
|
+
.enable_all()
|
|
196
|
+
.build()
|
|
197
|
+
.unwrap();
|
|
198
|
+
let receiver = self.clone();
|
|
199
|
+
monitor_runtime.block_on({
|
|
200
|
+
let mut lifecycle_rx = subscribe_runtime_to_signals();
|
|
201
|
+
let receiver = receiver.clone();
|
|
202
|
+
let thread_workers = thread_workers.clone();
|
|
203
|
+
async move {
|
|
204
|
+
loop {
|
|
205
|
+
tokio::select! {
|
|
206
|
+
_ = tokio::time::sleep(Duration::from_secs(1)) => {
|
|
207
|
+
let mut status_lock = receiver.status.write();
|
|
208
|
+
thread_workers.iter().for_each(|worker| {
|
|
209
|
+
let worker_entry = status_lock.entry(worker.id);
|
|
210
|
+
let data = (
|
|
211
|
+
worker.request_id.load(Ordering::Relaxed),
|
|
212
|
+
worker.current_request_start.load(Ordering::Relaxed),
|
|
213
|
+
);
|
|
214
|
+
worker_entry.or_insert(data);
|
|
215
|
+
});
|
|
216
|
+
}
|
|
217
|
+
lifecycle_event = lifecycle_rx.recv() => {
|
|
218
|
+
match lifecycle_event {
|
|
219
|
+
Ok(LifecycleEvent::Restart) | Ok(LifecycleEvent::Reload) => {
|
|
220
|
+
receiver.restart().await.ok();
|
|
221
|
+
}
|
|
222
|
+
Ok(LifecycleEvent::Shutdown) => {
|
|
223
|
+
break;
|
|
224
|
+
}
|
|
225
|
+
Ok(LifecycleEvent::PrintInfo) => {
|
|
226
|
+
receiver.print_info(thread_workers.clone()).await.ok();
|
|
227
|
+
}
|
|
228
|
+
Err(e) => {
|
|
229
|
+
debug!("Lifecycle channel closed: {:?}, exiting single mode monitor loop", e);
|
|
230
|
+
break;
|
|
231
|
+
}
|
|
232
|
+
_ => {}
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
})
|
|
239
|
+
})
|
|
240
|
+
})
|
|
241
|
+
})
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
#[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
|
|
245
|
+
pub fn run(self: Arc<Self>) -> Result<()> {
|
|
246
|
+
let (thread_workers, job_sender, nonblocking_sender) = build_thread_workers(
|
|
247
|
+
self.server_config.server_params.read().clone(),
|
|
248
|
+
self.worker_id,
|
|
249
|
+
)
|
|
250
|
+
.inspect_err(|e| {
|
|
251
|
+
if let Some(err_val) = e.value() {
|
|
252
|
+
print_rb_backtrace(err_val);
|
|
253
|
+
}
|
|
254
|
+
})?;
|
|
255
|
+
|
|
256
|
+
let worker_count = thread_workers.len();
|
|
257
|
+
info!(
|
|
258
|
+
threads = worker_count,
|
|
259
|
+
binds = format!("{:?}", self.server_config.server_params.read().binds)
|
|
260
|
+
);
|
|
261
|
+
|
|
262
|
+
let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
|
|
263
|
+
let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
|
|
264
|
+
let monitor_thread = self.clone().start_monitors(thread_workers.clone());
|
|
265
|
+
|
|
266
|
+
// If we're on Linux with reuse_port enabled, we can use
|
|
267
|
+
// kernel level load balancing across processes sharing a port.
|
|
268
|
+
// To take advantage of this, these forks will rebind to the same port upon boot.
|
|
269
|
+
// Worker 0 is special (this one just inherits the bind from the master process).
|
|
270
|
+
let is_zero_worker = self.is_zero_worker();
|
|
271
|
+
let should_rebind = !is_zero_worker && self.server_config.use_reuse_port_load_balancing();
|
|
272
|
+
|
|
273
|
+
if monitor_thread.is_none() {
|
|
274
|
+
error!("Failed to start monitor thread");
|
|
275
|
+
return Err(ItsiError::new("Failed to start monitor thread"));
|
|
276
|
+
}
|
|
277
|
+
let monitor_thread = monitor_thread.unwrap();
|
|
278
|
+
if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
|
|
279
|
+
return Ok(());
|
|
280
|
+
}
|
|
281
|
+
let runtime = self.build_runtime();
|
|
282
|
+
let result = runtime.block_on(async {
|
|
283
|
+
let mut listener_task_set = JoinSet::new();
|
|
284
|
+
let server_params = self.server_config.server_params.read().clone();
|
|
285
|
+
if let Err(err) = server_params.initialize_middleware().await {
|
|
286
|
+
error!("Failed to initialize middleware: {}", err);
|
|
287
|
+
return Err(ItsiError::new("Failed to initialize middleware"));
|
|
288
|
+
}
|
|
289
|
+
let tokio_listeners = server_params
|
|
290
|
+
.listeners
|
|
291
|
+
.lock()
|
|
292
|
+
.drain(..)
|
|
293
|
+
.map(|list| Arc::new(list.into_tokio_listener(should_rebind)))
|
|
294
|
+
.collect::<Vec<_>>();
|
|
295
|
+
|
|
296
|
+
tokio_listeners.iter().cloned().for_each(|listener| {
|
|
297
|
+
let shutdown_sender = shutdown_sender.clone();
|
|
298
|
+
let job_sender = job_sender.clone();
|
|
299
|
+
let nonblocking_sender = nonblocking_sender.clone();
|
|
300
|
+
|
|
301
|
+
let mut lifecycle_rx = subscribe_runtime_to_signals();
|
|
302
|
+
let mut shutdown_receiver = shutdown_sender.subscribe();
|
|
303
|
+
let mut acceptor = Acceptor {
|
|
304
|
+
acceptor_args: Arc::new(AcceptorArgs {
|
|
305
|
+
strategy: self.clone(),
|
|
306
|
+
listener_info: listener.listener_info(),
|
|
307
|
+
shutdown_receiver: shutdown_sender.subscribe(),
|
|
308
|
+
job_sender: job_sender.clone(),
|
|
309
|
+
nonblocking_sender: nonblocking_sender.clone(),
|
|
310
|
+
server_params: server_params.clone(),
|
|
311
|
+
}),
|
|
312
|
+
join_set: JoinSet::new(),
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
let shutdown_rx_for_acme_task = shutdown_receiver.clone();
|
|
316
|
+
let acme_task_listener_clone = listener.clone();
|
|
317
|
+
|
|
318
|
+
let mut after_accept_wait: Option<Duration> = None::<Duration>;
|
|
319
|
+
|
|
320
|
+
if cfg!(target_os = "macos") {
|
|
321
|
+
after_accept_wait = if server_params.workers > 1 && !(server_params.socket_opts.reuse_port && server_params.socket_opts.reuse_address) {
|
|
322
|
+
Some(Duration::from_nanos(10 * server_params.workers as u64))
|
|
323
|
+
} else {
|
|
324
|
+
None
|
|
325
|
+
};
|
|
326
|
+
};
|
|
327
|
+
|
|
328
|
+
listener_task_set.spawn(async move {
|
|
329
|
+
acme_task_listener_clone
|
|
330
|
+
.spawn_acme_event_task(shutdown_rx_for_acme_task)
|
|
331
|
+
.await;
|
|
332
|
+
});
|
|
333
|
+
|
|
334
|
+
listener_task_set.spawn(async move {
|
|
335
|
+
loop {
|
|
336
|
+
// Process any pending signals before select
|
|
337
|
+
tokio::select! {
|
|
338
|
+
accept_result = listener.accept() => {
|
|
339
|
+
match accept_result {
|
|
340
|
+
Ok(accepted) => acceptor.serve_connection(accepted).await,
|
|
341
|
+
Err(e) => debug!("Listener.accept failed: {:?}", e)
|
|
342
|
+
}
|
|
343
|
+
if cfg!(target_os = "macos") {
|
|
344
|
+
if let Some(after_accept_wait) = after_accept_wait{
|
|
345
|
+
tokio::time::sleep(after_accept_wait).await;
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
},
|
|
349
|
+
_ = shutdown_receiver.changed() => {
|
|
350
|
+
debug!("Shutdown requested via receiver");
|
|
351
|
+
break;
|
|
352
|
+
},
|
|
353
|
+
lifecycle_event = lifecycle_rx.recv() => {
|
|
354
|
+
match lifecycle_event {
|
|
355
|
+
Ok(LifecycleEvent::Shutdown) => {
|
|
356
|
+
debug!("Received LifecycleEvent::Shutdown");
|
|
357
|
+
let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
|
|
358
|
+
break;
|
|
359
|
+
},
|
|
360
|
+
Err(e) => {
|
|
361
|
+
debug!("Lifecycle channel closed: {:?}, exiting accept loop", e);
|
|
362
|
+
break
|
|
363
|
+
},
|
|
364
|
+
_ => ()
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
acceptor.join().await;
|
|
370
|
+
});
|
|
371
|
+
});
|
|
372
|
+
|
|
373
|
+
if self.is_single_mode() {
|
|
374
|
+
self.invoke_hook("after_start");
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
while let Some(_res) = listener_task_set.join_next().await {}
|
|
378
|
+
drop(tokio_listeners);
|
|
379
|
+
|
|
380
|
+
Ok::<(), ItsiError>(())
|
|
381
|
+
});
|
|
382
|
+
|
|
383
|
+
debug!("Single mode runtime exited.");
|
|
384
|
+
|
|
385
|
+
for _i in 0..thread_workers.len() {
|
|
386
|
+
job_sender.send_blocking(RequestJob::Shutdown).unwrap();
|
|
387
|
+
nonblocking_sender
|
|
388
|
+
.send_blocking(RequestJob::Shutdown)
|
|
389
|
+
.unwrap();
|
|
390
|
+
}
|
|
391
|
+
if result.is_err() {
|
|
392
|
+
send_lifecycle_event(LifecycleEvent::Shutdown);
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
shutdown_sender.send(RunningPhase::Shutdown).ok();
|
|
396
|
+
runtime.shutdown_timeout(Duration::from_millis(100));
|
|
397
|
+
unsubscribe_runtime();
|
|
398
|
+
|
|
399
|
+
debug!("Shutdown timeout finished.");
|
|
400
|
+
|
|
401
|
+
let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout);
|
|
402
|
+
loop {
|
|
403
|
+
if thread_workers
|
|
404
|
+
.iter()
|
|
405
|
+
.all(|worker| call_with_gvl(move |_| !worker.poll_shutdown(deadline)))
|
|
406
|
+
{
|
|
407
|
+
funcall_no_ret(monitor_thread, "join", ()).ok();
|
|
408
|
+
break;
|
|
409
|
+
}
|
|
410
|
+
sleep(Duration::from_millis(50));
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
if self.is_single_mode() {
|
|
414
|
+
self.invoke_hook("before_shutdown");
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
if self.restart_requested.load(Ordering::SeqCst) {
|
|
418
|
+
self.restart_requested.store(false, Ordering::SeqCst);
|
|
419
|
+
info!("Worker restarting");
|
|
420
|
+
self.run()?;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
debug!("Runtime has shut down");
|
|
424
|
+
result
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
pub fn is_single_mode(&self) -> bool {
|
|
428
|
+
self.server_config.server_params.read().workers == 1
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
pub fn invoke_hook(&self, hook_name: &str) {
|
|
432
|
+
if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) {
|
|
433
|
+
call_with_gvl(|_| hook.call::<_, Value>(()).ok());
|
|
434
|
+
}
|
|
435
|
+
}
|
|
436
|
+
/// Restart the server while keeping connections open.
|
|
437
|
+
pub async fn restart(&self) -> Result<()> {
|
|
438
|
+
if !self.server_config.check_config().await {
|
|
439
|
+
return Ok(());
|
|
440
|
+
}
|
|
441
|
+
if self.is_single_mode() {
|
|
442
|
+
self.invoke_hook("before_restart");
|
|
443
|
+
}
|
|
444
|
+
self.server_config.stop_watcher()?;
|
|
445
|
+
self.server_config.dup_fds()?;
|
|
446
|
+
self.server_config.reload_exec()?;
|
|
447
|
+
Ok(())
|
|
448
|
+
}
|
|
449
|
+
}
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
use std::{
|
|
2
|
+
collections::VecDeque,
|
|
3
|
+
sync::atomic::{AtomicBool, AtomicI8, Ordering},
|
|
4
|
+
};
|
|
5
|
+
|
|
6
|
+
use nix::libc::{self, sighandler_t};
|
|
7
|
+
use parking_lot::Mutex;
|
|
8
|
+
use tokio::sync::broadcast;
|
|
9
|
+
use tracing::{debug, warn};
|
|
10
|
+
|
|
11
|
+
use super::lifecycle_event::LifecycleEvent;
|
|
12
|
+
|
|
13
|
+
pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
|
|
14
|
+
pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
|
|
15
|
+
pub static SIGNAL_HANDLER_CHANNEL: Mutex<Option<broadcast::Sender<LifecycleEvent>>> =
|
|
16
|
+
Mutex::new(None);
|
|
17
|
+
|
|
18
|
+
pub static PENDING_QUEUE: Mutex<VecDeque<LifecycleEvent>> = Mutex::new(VecDeque::new());
|
|
19
|
+
|
|
20
|
+
pub fn subscribe_runtime_to_signals() -> broadcast::Receiver<LifecycleEvent> {
|
|
21
|
+
let mut guard = SIGNAL_HANDLER_CHANNEL.lock();
|
|
22
|
+
if let Some(sender) = guard.as_ref() {
|
|
23
|
+
return sender.subscribe();
|
|
24
|
+
}
|
|
25
|
+
let (sender, receiver) = broadcast::channel(32);
|
|
26
|
+
let sender_clone = sender.clone();
|
|
27
|
+
std::thread::spawn(move || {
|
|
28
|
+
std::thread::sleep(std::time::Duration::from_millis(10));
|
|
29
|
+
for event in PENDING_QUEUE.lock().drain(..) {
|
|
30
|
+
if let Err(e) = sender_clone.send(event) {
|
|
31
|
+
eprintln!("Warning: Failed to send pending lifecycle event {:?}", e);
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
guard.replace(sender);
|
|
37
|
+
|
|
38
|
+
receiver
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
pub fn unsubscribe_runtime() {
|
|
42
|
+
SIGNAL_HANDLER_CHANNEL.lock().take();
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
pub fn send_lifecycle_event(event: LifecycleEvent) {
|
|
46
|
+
if let Some(sender) = SIGNAL_HANDLER_CHANNEL.lock().as_ref() {
|
|
47
|
+
if let Err(e) = sender.send(event) {
|
|
48
|
+
if matches!(
|
|
49
|
+
e.0,
|
|
50
|
+
LifecycleEvent::Shutdown | LifecycleEvent::ForceShutdown
|
|
51
|
+
) {
|
|
52
|
+
SHUTDOWN_REQUESTED.store(true, Ordering::SeqCst);
|
|
53
|
+
warn!(
|
|
54
|
+
"Dropping shutdown lifecycle event after receiver closed: {:?}",
|
|
55
|
+
e
|
|
56
|
+
);
|
|
57
|
+
} else {
|
|
58
|
+
eprintln!("Warning: Failed to send lifecycle event {:?}", e);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
} else {
|
|
62
|
+
PENDING_QUEUE.lock().push_back(event);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
fn receive_signal(signum: i32, _: sighandler_t) {
|
|
67
|
+
debug!("Received signal: {}", signum);
|
|
68
|
+
SIGINT_COUNT.fetch_add(-1, Ordering::SeqCst);
|
|
69
|
+
let event = match signum {
|
|
70
|
+
libc::SIGTERM | libc::SIGINT => {
|
|
71
|
+
debug!("Received shutdown signal (SIGTERM/SIGINT)");
|
|
72
|
+
SHUTDOWN_REQUESTED.store(true, Ordering::SeqCst);
|
|
73
|
+
SIGINT_COUNT.fetch_add(2, Ordering::SeqCst);
|
|
74
|
+
if SIGINT_COUNT.load(Ordering::SeqCst) < 2 {
|
|
75
|
+
debug!("First shutdown signal, requesting graceful shutdown");
|
|
76
|
+
Some(LifecycleEvent::Shutdown)
|
|
77
|
+
} else {
|
|
78
|
+
warn!("Multiple shutdown signals received, forcing immediate shutdown");
|
|
79
|
+
Some(LifecycleEvent::ForceShutdown)
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
libc::SIGUSR2 => Some(LifecycleEvent::PrintInfo),
|
|
83
|
+
libc::SIGUSR1 => Some(LifecycleEvent::Restart),
|
|
84
|
+
libc::SIGHUP => Some(LifecycleEvent::Reload),
|
|
85
|
+
libc::SIGTTIN => Some(LifecycleEvent::IncreaseWorkers),
|
|
86
|
+
libc::SIGTTOU => Some(LifecycleEvent::DecreaseWorkers),
|
|
87
|
+
libc::SIGCHLD => Some(LifecycleEvent::ChildTerminated),
|
|
88
|
+
_ => None,
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
if let Some(event) = event {
|
|
92
|
+
debug!("Signal {} mapped to lifecycle event: {:?}", signum, event);
|
|
93
|
+
send_lifecycle_event(event);
|
|
94
|
+
} else {
|
|
95
|
+
debug!("Signal {} not mapped to any lifecycle event", signum);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
pub fn reset_signal_handlers() -> bool {
|
|
100
|
+
debug!("Resetting signal handlers");
|
|
101
|
+
SIGINT_COUNT.store(0, Ordering::SeqCst);
|
|
102
|
+
SHUTDOWN_REQUESTED.store(false, Ordering::SeqCst);
|
|
103
|
+
|
|
104
|
+
unsafe {
|
|
105
|
+
libc::signal(libc::SIGTERM, receive_signal as usize);
|
|
106
|
+
libc::signal(libc::SIGINT, receive_signal as usize);
|
|
107
|
+
libc::signal(libc::SIGUSR2, receive_signal as usize);
|
|
108
|
+
libc::signal(libc::SIGUSR1, receive_signal as usize);
|
|
109
|
+
libc::signal(libc::SIGHUP, receive_signal as usize);
|
|
110
|
+
libc::signal(libc::SIGTTIN, receive_signal as usize);
|
|
111
|
+
libc::signal(libc::SIGTTOU, receive_signal as usize);
|
|
112
|
+
libc::signal(libc::SIGCHLD, receive_signal as usize);
|
|
113
|
+
}
|
|
114
|
+
true
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
pub fn clear_signal_handlers() {
|
|
118
|
+
debug!("Clearing signal handlers");
|
|
119
|
+
unsafe {
|
|
120
|
+
libc::signal(libc::SIGTERM, libc::SIG_DFL);
|
|
121
|
+
libc::signal(libc::SIGINT, libc::SIG_DFL);
|
|
122
|
+
libc::signal(libc::SIGUSR2, libc::SIG_DFL);
|
|
123
|
+
libc::signal(libc::SIGUSR1, libc::SIG_DFL);
|
|
124
|
+
libc::signal(libc::SIGHUP, libc::SIG_DFL);
|
|
125
|
+
libc::signal(libc::SIGTTIN, libc::SIG_DFL);
|
|
126
|
+
libc::signal(libc::SIGTTOU, libc::SIG_DFL);
|
|
127
|
+
libc::signal(libc::SIGCHLD, libc::SIG_DFL);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
use bytes::Buf;
|
|
2
|
+
use hyper::body::Body;
|
|
3
|
+
use hyper::body::Frame;
|
|
4
|
+
use hyper::body::SizeHint;
|
|
5
|
+
use std::error::Error;
|
|
6
|
+
use std::fmt;
|
|
7
|
+
use std::ops::Deref;
|
|
8
|
+
use std::pin::Pin;
|
|
9
|
+
use std::sync::atomic::AtomicUsize;
|
|
10
|
+
use std::sync::atomic::Ordering;
|
|
11
|
+
use std::task::Context;
|
|
12
|
+
use std::task::Poll;
|
|
13
|
+
use tracing::debug;
|
|
14
|
+
|
|
15
|
+
/// Custom error to indicate that the maximum body size was exceeded.
|
|
16
|
+
#[derive(Debug)]
|
|
17
|
+
pub struct MaxBodySizeReached;
|
|
18
|
+
impl fmt::Display for MaxBodySizeReached {
|
|
19
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
20
|
+
write!(f, "Maximum body size reached")
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
impl Error for MaxBodySizeReached {}
|
|
25
|
+
|
|
26
|
+
#[derive(Debug)]
|
|
27
|
+
pub struct SizeLimitedIncoming<B> {
|
|
28
|
+
pub inner: B,
|
|
29
|
+
pub limit: AtomicUsize,
|
|
30
|
+
current: usize,
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
impl<B> Deref for SizeLimitedIncoming<B> {
|
|
34
|
+
type Target = B;
|
|
35
|
+
|
|
36
|
+
fn deref(&self) -> &Self::Target {
|
|
37
|
+
&self.inner
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
impl<B> SizeLimitedIncoming<B> {
|
|
42
|
+
pub fn new(inner: B) -> Self {
|
|
43
|
+
Self {
|
|
44
|
+
inner,
|
|
45
|
+
limit: AtomicUsize::new(usize::MAX),
|
|
46
|
+
current: 0,
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
impl<B> Body for SizeLimitedIncoming<B>
|
|
52
|
+
where
|
|
53
|
+
B: Body + Unpin,
|
|
54
|
+
B::Data: Buf,
|
|
55
|
+
// Ensure that the inner error converts into our boxed error type.
|
|
56
|
+
B::Error: Into<Box<dyn Error + Send + Sync>>,
|
|
57
|
+
{
|
|
58
|
+
type Data = B::Data;
|
|
59
|
+
type Error = Box<dyn Error + Send + Sync>;
|
|
60
|
+
|
|
61
|
+
fn poll_frame(
|
|
62
|
+
mut self: Pin<&mut Self>,
|
|
63
|
+
cx: &mut Context<'_>,
|
|
64
|
+
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
|
|
65
|
+
// Pin the inner body.
|
|
66
|
+
let inner = Pin::new(&mut self.inner);
|
|
67
|
+
match inner.poll_frame(cx) {
|
|
68
|
+
Poll::Ready(Some(Ok(frame))) => {
|
|
69
|
+
// Use public methods since we cannot match on the private enum.
|
|
70
|
+
if frame.is_data() {
|
|
71
|
+
match frame.into_data() {
|
|
72
|
+
Ok(data) => {
|
|
73
|
+
let len = data.remaining();
|
|
74
|
+
self.current += len;
|
|
75
|
+
debug!(
|
|
76
|
+
target: "option::max_body",
|
|
77
|
+
"current: {}, limit: {}",
|
|
78
|
+
self.current, self.limit.load(Ordering::Relaxed)
|
|
79
|
+
);
|
|
80
|
+
if self.current > self.limit.load(Ordering::Relaxed) {
|
|
81
|
+
Poll::Ready(Some(Err(Box::new(MaxBodySizeReached))))
|
|
82
|
+
} else {
|
|
83
|
+
Poll::Ready(Some(Ok(Frame::data(data))))
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
// Should not occur if is_data() was true, but pass through if it does.
|
|
87
|
+
Err(frame) => Poll::Ready(Some(Ok(frame))),
|
|
88
|
+
}
|
|
89
|
+
} else {
|
|
90
|
+
// For non-data frames (e.g. trailers), just pass them along.
|
|
91
|
+
Poll::Ready(Some(Ok(frame)))
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
|
|
95
|
+
Poll::Ready(None) => Poll::Ready(None),
|
|
96
|
+
Poll::Pending => Poll::Pending,
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
fn is_end_stream(&self) -> bool {
|
|
101
|
+
self.inner.is_end_stream()
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
fn size_hint(&self) -> SizeHint {
|
|
105
|
+
self.inner.size_hint()
|
|
106
|
+
}
|
|
107
|
+
}
|