itsi-server 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/exe/itsi +88 -28
- data/ext/itsi_error/Cargo.toml +2 -0
- data/ext/itsi_error/src/from.rs +70 -0
- data/ext/itsi_error/src/lib.rs +10 -37
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.toml +2 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +121 -0
- data/ext/itsi_rb_helpers/src/lib.rs +90 -10
- data/ext/itsi_scheduler/Cargo.toml +24 -0
- data/ext/itsi_scheduler/extconf.rb +6 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
- data/ext/itsi_scheduler/src/lib.rs +38 -0
- data/ext/itsi_server/Cargo.toml +14 -2
- data/ext/itsi_server/extconf.rb +1 -1
- data/ext/itsi_server/src/body_proxy/big_bytes.rs +104 -0
- data/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +122 -0
- data/ext/itsi_server/src/body_proxy/mod.rs +2 -0
- data/ext/itsi_server/src/lib.rs +58 -7
- data/ext/itsi_server/src/request/itsi_request.rs +238 -104
- data/ext/itsi_server/src/response/itsi_response.rs +347 -0
- data/ext/itsi_server/src/response/mod.rs +1 -0
- data/ext/itsi_server/src/server/bind.rs +50 -20
- data/ext/itsi_server/src/server/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/io_stream.rs +104 -0
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +11 -30
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +3 -50
- data/ext/itsi_server/src/server/itsi_server.rs +196 -134
- data/ext/itsi_server/src/server/lifecycle_event.rs +9 -0
- data/ext/itsi_server/src/server/listener.rs +184 -127
- data/ext/itsi_server/src/server/mod.rs +7 -1
- data/ext/itsi_server/src/server/process_worker.rs +196 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +254 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +27 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +241 -0
- data/ext/itsi_server/src/server/signal.rs +70 -0
- data/ext/itsi_server/src/server/thread_worker.rs +368 -0
- data/ext/itsi_server/src/server/tls.rs +42 -28
- data/ext/itsi_tracing/Cargo.toml +4 -0
- data/ext/itsi_tracing/src/lib.rs +36 -6
- data/lib/itsi/request.rb +30 -14
- data/lib/itsi/server/rack/handler/itsi.rb +25 -0
- data/lib/itsi/server/scheduler_mode.rb +6 -0
- data/lib/itsi/server/version.rb +1 -1
- data/lib/itsi/server.rb +82 -2
- data/lib/itsi/signals.rb +23 -0
- data/lib/itsi/stream_io.rb +38 -0
- metadata +38 -25
- data/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
- data/ext/itsi_server/src/stream_writer/mod.rs +0 -21
@@ -0,0 +1,254 @@
|
|
1
|
+
use crate::server::{
|
2
|
+
itsi_server::Server, lifecycle_event::LifecycleEvent, listener::Listener,
|
3
|
+
process_worker::ProcessWorker,
|
4
|
+
};
|
5
|
+
use itsi_error::{ItsiError, Result};
|
6
|
+
use itsi_rb_helpers::{call_without_gvl, create_ruby_thread};
|
7
|
+
use itsi_tracing::{error, info, warn};
|
8
|
+
use nix::{
|
9
|
+
libc::{self, exit},
|
10
|
+
unistd::Pid,
|
11
|
+
};
|
12
|
+
|
13
|
+
use std::{
|
14
|
+
sync::{atomic::AtomicUsize, Arc},
|
15
|
+
time::{Duration, Instant},
|
16
|
+
};
|
17
|
+
use tokio::{
|
18
|
+
runtime::{Builder as RuntimeBuilder, Runtime},
|
19
|
+
sync::{broadcast, watch, Mutex},
|
20
|
+
time::{self, sleep},
|
21
|
+
};
|
22
|
+
use tracing::instrument;
|
23
|
+
pub(crate) struct ClusterMode {
|
24
|
+
pub listeners: Arc<Vec<Arc<Listener>>>,
|
25
|
+
pub server: Arc<Server>,
|
26
|
+
pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
|
27
|
+
pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
28
|
+
}
|
29
|
+
|
30
|
+
static WORKER_ID: AtomicUsize = AtomicUsize::new(0);
|
31
|
+
static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
|
32
|
+
parking_lot::Mutex::new(None);
|
33
|
+
|
34
|
+
impl ClusterMode {
|
35
|
+
pub fn new(
|
36
|
+
server: Arc<Server>,
|
37
|
+
listeners: Arc<Vec<Arc<Listener>>>,
|
38
|
+
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
39
|
+
) -> Self {
|
40
|
+
if let Some(f) = server.before_fork.lock().take() {
|
41
|
+
f();
|
42
|
+
}
|
43
|
+
let process_workers = (0..server.workers)
|
44
|
+
.map(|_| ProcessWorker {
|
45
|
+
worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
46
|
+
..Default::default()
|
47
|
+
})
|
48
|
+
.collect();
|
49
|
+
|
50
|
+
Self {
|
51
|
+
listeners,
|
52
|
+
server,
|
53
|
+
process_workers: parking_lot::Mutex::new(process_workers),
|
54
|
+
lifecycle_channel,
|
55
|
+
}
|
56
|
+
}
|
57
|
+
|
58
|
+
pub fn build_runtime(&self) -> Runtime {
|
59
|
+
let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
|
60
|
+
builder
|
61
|
+
.thread_name("itsi-server-accept-loop")
|
62
|
+
.thread_stack_size(3 * 1024 * 1024)
|
63
|
+
.enable_io()
|
64
|
+
.enable_time()
|
65
|
+
.build()
|
66
|
+
.expect("Failed to build Tokio runtime")
|
67
|
+
}
|
68
|
+
|
69
|
+
#[allow(clippy::await_holding_lock)]
|
70
|
+
pub async fn handle_lifecycle_event(
|
71
|
+
self: Arc<Self>,
|
72
|
+
lifecycle_event: LifecycleEvent,
|
73
|
+
) -> Result<()> {
|
74
|
+
match lifecycle_event {
|
75
|
+
LifecycleEvent::Start => Ok(()),
|
76
|
+
LifecycleEvent::Shutdown => {
|
77
|
+
self.shutdown().await?;
|
78
|
+
Ok(())
|
79
|
+
}
|
80
|
+
LifecycleEvent::Restart => {
|
81
|
+
for worker in self.process_workers.lock().iter() {
|
82
|
+
worker.reboot(self.clone()).await?;
|
83
|
+
}
|
84
|
+
Ok(())
|
85
|
+
}
|
86
|
+
LifecycleEvent::IncreaseWorkers => {
|
87
|
+
let mut workers = self.process_workers.lock();
|
88
|
+
let worker = ProcessWorker {
|
89
|
+
worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
90
|
+
..Default::default()
|
91
|
+
};
|
92
|
+
let worker_clone = worker.clone();
|
93
|
+
let self_clone = self.clone();
|
94
|
+
create_ruby_thread(move || {
|
95
|
+
call_without_gvl(move || {
|
96
|
+
worker_clone.boot(self_clone).ok();
|
97
|
+
})
|
98
|
+
});
|
99
|
+
workers.push(worker);
|
100
|
+
Ok(())
|
101
|
+
}
|
102
|
+
LifecycleEvent::DecreaseWorkers => {
|
103
|
+
let worker = {
|
104
|
+
let mut workers = self.process_workers.lock();
|
105
|
+
workers.pop()
|
106
|
+
};
|
107
|
+
if let Some(dropped_worker) = worker {
|
108
|
+
dropped_worker.request_shutdown();
|
109
|
+
let force_kill_time =
|
110
|
+
Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
|
111
|
+
while dropped_worker.is_alive() && force_kill_time > Instant::now() {
|
112
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
113
|
+
}
|
114
|
+
if dropped_worker.is_alive() {
|
115
|
+
dropped_worker.force_kill();
|
116
|
+
}
|
117
|
+
};
|
118
|
+
Ok(())
|
119
|
+
}
|
120
|
+
LifecycleEvent::ForceShutdown => {
|
121
|
+
for worker in self.process_workers.lock().iter() {
|
122
|
+
worker.force_kill();
|
123
|
+
}
|
124
|
+
unsafe { exit(0) };
|
125
|
+
}
|
126
|
+
}
|
127
|
+
}
|
128
|
+
|
129
|
+
pub async fn shutdown(&self) -> Result<()> {
|
130
|
+
let shutdown_timeout = self.server.shutdown_timeout;
|
131
|
+
let workers = self.process_workers.lock().clone();
|
132
|
+
|
133
|
+
workers.iter().for_each(|worker| worker.request_shutdown());
|
134
|
+
|
135
|
+
let remaining_children = Arc::new(Mutex::new(workers.len()));
|
136
|
+
let monitor_handle = {
|
137
|
+
let remaining_children: Arc<Mutex<usize>> = Arc::clone(&remaining_children);
|
138
|
+
let mut workers = workers.clone();
|
139
|
+
tokio::spawn(async move {
|
140
|
+
loop {
|
141
|
+
// Check if all workers have exited
|
142
|
+
let mut remaining = remaining_children.lock().await;
|
143
|
+
workers.retain(|worker| worker.is_alive());
|
144
|
+
*remaining = workers.len();
|
145
|
+
if *remaining == 0 {
|
146
|
+
break;
|
147
|
+
}
|
148
|
+
sleep(Duration::from_millis(100)).await;
|
149
|
+
}
|
150
|
+
})
|
151
|
+
};
|
152
|
+
|
153
|
+
tokio::select! {
|
154
|
+
_ = monitor_handle => {
|
155
|
+
info!("All children exited early, exit normally")
|
156
|
+
}
|
157
|
+
_ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
|
158
|
+
warn!("Graceful shutdown timeout reached, force killing remaining children");
|
159
|
+
workers.iter().for_each(|worker| worker.force_kill());
|
160
|
+
}
|
161
|
+
}
|
162
|
+
|
163
|
+
Err(ItsiError::Break())
|
164
|
+
}
|
165
|
+
|
166
|
+
pub fn receive_signal(signal: i32) {
|
167
|
+
match signal {
|
168
|
+
libc::SIGCHLD => {
|
169
|
+
CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
|
170
|
+
i.send(()).ok();
|
171
|
+
});
|
172
|
+
}
|
173
|
+
_ => {
|
174
|
+
// Handle other signals
|
175
|
+
}
|
176
|
+
}
|
177
|
+
}
|
178
|
+
|
179
|
+
pub fn stop(&self) -> Result<()> {
|
180
|
+
unsafe { libc::signal(libc::SIGCHLD, libc::SIG_DFL) };
|
181
|
+
|
182
|
+
for worker in self.process_workers.lock().iter() {
|
183
|
+
if worker.is_alive() {
|
184
|
+
worker.force_kill();
|
185
|
+
}
|
186
|
+
}
|
187
|
+
|
188
|
+
Ok(())
|
189
|
+
}
|
190
|
+
|
191
|
+
#[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
|
192
|
+
pub fn run(self: Arc<Self>) -> Result<()> {
|
193
|
+
info!("Starting in Cluster mode");
|
194
|
+
self.process_workers
|
195
|
+
.lock()
|
196
|
+
.iter()
|
197
|
+
.try_for_each(|worker| worker.boot(Arc::clone(&self)))?;
|
198
|
+
|
199
|
+
let (sender, mut receiver) = watch::channel(());
|
200
|
+
*CHILD_SIGNAL_SENDER.lock() = Some(sender);
|
201
|
+
|
202
|
+
unsafe { libc::signal(libc::SIGCHLD, Self::receive_signal as usize) };
|
203
|
+
|
204
|
+
let mut lifecycle_rx = self.lifecycle_channel.subscribe();
|
205
|
+
let self_ref = self.clone();
|
206
|
+
|
207
|
+
self.build_runtime().block_on(async {
|
208
|
+
let self_ref = self_ref.clone();
|
209
|
+
let mut memory_check_interval = time::interval(time::Duration::from_secs(2));
|
210
|
+
loop {
|
211
|
+
tokio::select! {
|
212
|
+
_ = receiver.changed() => {
|
213
|
+
let mut workers = self_ref.process_workers.lock();
|
214
|
+
workers.retain(|worker| {
|
215
|
+
worker.boot_if_dead(Arc::clone(&self_ref))
|
216
|
+
});
|
217
|
+
if workers.is_empty() {
|
218
|
+
warn!("No workers running. Send SIGTTIN to increase worker count");
|
219
|
+
}
|
220
|
+
}
|
221
|
+
_ = memory_check_interval.tick() => {
|
222
|
+
if let Some(memory_limit) = self_ref.server.worker_memory_limit {
|
223
|
+
let largest_worker = {
|
224
|
+
let workers = self_ref.process_workers.lock();
|
225
|
+
workers.iter().max_by(|wa, wb| wa.memory_usage().cmp(&wb.memory_usage())).cloned()
|
226
|
+
};
|
227
|
+
if let Some(largest_worker) = largest_worker {
|
228
|
+
if let Some(current_mem_usage) = largest_worker.memory_usage(){
|
229
|
+
if current_mem_usage > memory_limit {
|
230
|
+
largest_worker.reboot(self_ref.clone()).await.ok();
|
231
|
+
}
|
232
|
+
}
|
233
|
+
}
|
234
|
+
}
|
235
|
+
}
|
236
|
+
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
237
|
+
Ok(lifecycle_event) => {
|
238
|
+
if let Err(e) = self_ref.clone().handle_lifecycle_event(lifecycle_event).await{
|
239
|
+
match e {
|
240
|
+
ItsiError::Break() => break,
|
241
|
+
_ => error!("Error in handle_lifecycle_event {:?}", e)
|
242
|
+
}
|
243
|
+
}
|
244
|
+
|
245
|
+
},
|
246
|
+
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
247
|
+
}
|
248
|
+
}
|
249
|
+
}
|
250
|
+
});
|
251
|
+
|
252
|
+
Ok(())
|
253
|
+
}
|
254
|
+
}
|
@@ -0,0 +1,27 @@
|
|
1
|
+
use cluster_mode::ClusterMode;
|
2
|
+
use itsi_error::Result;
|
3
|
+
use single_mode::SingleMode;
|
4
|
+
use std::sync::Arc;
|
5
|
+
pub mod cluster_mode;
|
6
|
+
pub mod single_mode;
|
7
|
+
|
8
|
+
pub(crate) enum ServeStrategy {
|
9
|
+
Single(Arc<SingleMode>),
|
10
|
+
Cluster(Arc<ClusterMode>),
|
11
|
+
}
|
12
|
+
|
13
|
+
impl ServeStrategy {
|
14
|
+
pub fn run(&self) -> Result<()> {
|
15
|
+
match self {
|
16
|
+
ServeStrategy::Single(single_router) => single_router.clone().run(),
|
17
|
+
ServeStrategy::Cluster(cluster_router) => cluster_router.clone().run(),
|
18
|
+
}
|
19
|
+
}
|
20
|
+
|
21
|
+
pub(crate) fn stop(&self) -> Result<()> {
|
22
|
+
match self {
|
23
|
+
ServeStrategy::Single(single_router) => single_router.clone().stop(),
|
24
|
+
ServeStrategy::Cluster(cluster_router) => cluster_router.clone().stop(),
|
25
|
+
}
|
26
|
+
}
|
27
|
+
}
|
@@ -0,0 +1,241 @@
|
|
1
|
+
use crate::{
|
2
|
+
request::itsi_request::ItsiRequest,
|
3
|
+
server::{
|
4
|
+
io_stream::IoStream,
|
5
|
+
itsi_server::{RequestJob, Server},
|
6
|
+
lifecycle_event::LifecycleEvent,
|
7
|
+
listener::{Listener, TokioListener},
|
8
|
+
thread_worker::{build_thread_workers, ThreadWorker},
|
9
|
+
},
|
10
|
+
};
|
11
|
+
use http::Request;
|
12
|
+
use hyper::{body::Incoming, service::service_fn};
|
13
|
+
use hyper_util::{
|
14
|
+
rt::{TokioExecutor, TokioIo, TokioTimer},
|
15
|
+
server::conn::auto::Builder,
|
16
|
+
};
|
17
|
+
use itsi_error::{ItsiError, Result};
|
18
|
+
use itsi_tracing::{debug, error, info};
|
19
|
+
use nix::unistd::Pid;
|
20
|
+
use std::{
|
21
|
+
num::NonZeroU8,
|
22
|
+
pin::Pin,
|
23
|
+
sync::Arc,
|
24
|
+
time::{Duration, Instant},
|
25
|
+
};
|
26
|
+
use tokio::{
|
27
|
+
runtime::{Builder as RuntimeBuilder, Runtime},
|
28
|
+
sync::broadcast,
|
29
|
+
task::JoinSet,
|
30
|
+
};
|
31
|
+
use tracing::instrument;
|
32
|
+
|
33
|
+
pub struct SingleMode {
|
34
|
+
pub executor: Builder<TokioExecutor>,
|
35
|
+
pub server: Arc<Server>,
|
36
|
+
pub sender: async_channel::Sender<RequestJob>,
|
37
|
+
pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
|
38
|
+
pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
|
39
|
+
pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
40
|
+
}
|
41
|
+
|
42
|
+
pub enum RunningPhase {
|
43
|
+
Running,
|
44
|
+
ShutdownPending,
|
45
|
+
Shutdown,
|
46
|
+
}
|
47
|
+
|
48
|
+
impl SingleMode {
|
49
|
+
#[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
|
50
|
+
pub(crate) fn new(
|
51
|
+
server: Arc<Server>,
|
52
|
+
listeners: Arc<Vec<Arc<Listener>>>,
|
53
|
+
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
54
|
+
) -> Result<Self> {
|
55
|
+
let (thread_workers, sender) = build_thread_workers(
|
56
|
+
Pid::this(),
|
57
|
+
NonZeroU8::try_from(server.threads).unwrap(),
|
58
|
+
server.app,
|
59
|
+
server.scheduler_class.clone(),
|
60
|
+
)?;
|
61
|
+
Ok(Self {
|
62
|
+
executor: Builder::new(TokioExecutor::new()),
|
63
|
+
listeners,
|
64
|
+
server,
|
65
|
+
sender,
|
66
|
+
thread_workers,
|
67
|
+
lifecycle_channel,
|
68
|
+
})
|
69
|
+
}
|
70
|
+
|
71
|
+
pub fn build_runtime(&self) -> Runtime {
|
72
|
+
let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
|
73
|
+
builder
|
74
|
+
.thread_name("itsi-server-accept-loop")
|
75
|
+
.thread_stack_size(3 * 1024 * 1024)
|
76
|
+
.enable_io()
|
77
|
+
.enable_time()
|
78
|
+
.build()
|
79
|
+
.expect("Failed to build Tokio runtime")
|
80
|
+
}
|
81
|
+
|
82
|
+
pub fn stop(&self) -> Result<()> {
|
83
|
+
Ok(())
|
84
|
+
}
|
85
|
+
|
86
|
+
#[instrument(parent=None, skip(self))]
|
87
|
+
pub fn run(self: Arc<Self>) -> Result<()> {
|
88
|
+
let mut listener_task_set = JoinSet::new();
|
89
|
+
let self_ref = Arc::new(self);
|
90
|
+
self_ref.build_runtime().block_on(async {
|
91
|
+
|
92
|
+
for listener in self_ref.listeners.clone().iter() {
|
93
|
+
let listener = Arc::new(listener.to_tokio_listener());
|
94
|
+
let mut lifecycle_rx = self_ref.lifecycle_channel.subscribe();
|
95
|
+
let self_ref = self_ref.clone();
|
96
|
+
let listener = listener.clone();
|
97
|
+
let (shutdown_sender, mut shutdown_receiver) = tokio::sync::watch::channel::<RunningPhase>(RunningPhase::Running);
|
98
|
+
listener_task_set.spawn(async move {
|
99
|
+
let strategy = self_ref.clone();
|
100
|
+
loop {
|
101
|
+
tokio::select! {
|
102
|
+
accept_result = listener.accept() => match accept_result {
|
103
|
+
Ok(accept_result) => {
|
104
|
+
if let Err(e) = strategy.serve_connection(accept_result, listener.clone(), shutdown_receiver.clone()).await {
|
105
|
+
error!("Error in serve_connection {:?}", e)
|
106
|
+
}
|
107
|
+
},
|
108
|
+
Err(e) => debug!("Listener.accept failed {:?}", e),
|
109
|
+
},
|
110
|
+
_ = shutdown_receiver.changed() => {
|
111
|
+
break;
|
112
|
+
}
|
113
|
+
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
114
|
+
Ok(lifecycle_event) => {
|
115
|
+
if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
|
116
|
+
match e {
|
117
|
+
ItsiError::Break() => break,
|
118
|
+
_ => error!("Error in handle_lifecycle_event {:?}", e)
|
119
|
+
}
|
120
|
+
}
|
121
|
+
|
122
|
+
},
|
123
|
+
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
124
|
+
}
|
125
|
+
}
|
126
|
+
}
|
127
|
+
if let Ok(listener) = Arc::try_unwrap(listener){
|
128
|
+
listener.unbind();
|
129
|
+
}
|
130
|
+
});
|
131
|
+
|
132
|
+
}
|
133
|
+
|
134
|
+
while let Some(_res) = listener_task_set.join_next().await {}
|
135
|
+
});
|
136
|
+
|
137
|
+
Ok(())
|
138
|
+
}
|
139
|
+
|
140
|
+
pub(crate) async fn serve_connection(
|
141
|
+
&self,
|
142
|
+
stream: IoStream,
|
143
|
+
listener: Arc<TokioListener>,
|
144
|
+
shutdown_channel: tokio::sync::watch::Receiver<RunningPhase>,
|
145
|
+
) -> Result<()> {
|
146
|
+
let sender_clone = self.sender.clone();
|
147
|
+
let addr = stream.addr();
|
148
|
+
let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
|
149
|
+
let server = self.server.clone();
|
150
|
+
let executor = self.executor.clone();
|
151
|
+
let mut shutdown_channel_clone = shutdown_channel.clone();
|
152
|
+
tokio::spawn(async move {
|
153
|
+
let server = server.clone();
|
154
|
+
let mut executor = executor.clone();
|
155
|
+
let mut binding = executor.http1();
|
156
|
+
let shutdown_channel = shutdown_channel_clone.clone();
|
157
|
+
let mut serve = Box::pin(
|
158
|
+
binding
|
159
|
+
.timer(TokioTimer::new())
|
160
|
+
.header_read_timeout(Duration::from_secs(1))
|
161
|
+
.serve_connection_with_upgrades(
|
162
|
+
io,
|
163
|
+
service_fn(move |hyper_request: Request<Incoming>| {
|
164
|
+
ItsiRequest::process_request(
|
165
|
+
hyper_request,
|
166
|
+
sender_clone.clone(),
|
167
|
+
server.clone(),
|
168
|
+
listener.clone(),
|
169
|
+
addr.clone(),
|
170
|
+
shutdown_channel.clone(),
|
171
|
+
)
|
172
|
+
}),
|
173
|
+
),
|
174
|
+
);
|
175
|
+
|
176
|
+
tokio::select! {
|
177
|
+
// Await the connection finishing naturally.
|
178
|
+
res = &mut serve => {
|
179
|
+
match res{
|
180
|
+
Ok(()) => {
|
181
|
+
debug!("Connection closed normally")
|
182
|
+
},
|
183
|
+
Err(res) => {
|
184
|
+
debug!("Connection finished with error: {:?}", res)
|
185
|
+
}
|
186
|
+
}
|
187
|
+
serve.as_mut().graceful_shutdown();
|
188
|
+
},
|
189
|
+
// A lifecycle event triggers shutdown.
|
190
|
+
_ = shutdown_channel_clone.changed() => {
|
191
|
+
// Initiate graceful shutdown.
|
192
|
+
serve.as_mut().graceful_shutdown();
|
193
|
+
// Now await the connection to finish shutting down.
|
194
|
+
if let Err(e) = serve.await {
|
195
|
+
debug!("Connection shutdown error: {:?}", e);
|
196
|
+
}
|
197
|
+
}
|
198
|
+
}
|
199
|
+
});
|
200
|
+
Ok(())
|
201
|
+
}
|
202
|
+
|
203
|
+
pub async fn handle_lifecycle_event(
|
204
|
+
&self,
|
205
|
+
lifecycle_event: LifecycleEvent,
|
206
|
+
shutdown_sender: tokio::sync::watch::Sender<RunningPhase>,
|
207
|
+
) -> Result<()> {
|
208
|
+
if let LifecycleEvent::Shutdown = lifecycle_event {
|
209
|
+
shutdown_sender
|
210
|
+
.send(RunningPhase::ShutdownPending)
|
211
|
+
.expect("Failed to send shutdown pending signal");
|
212
|
+
let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
|
213
|
+
for worker in &*self.thread_workers {
|
214
|
+
worker.request_shutdown().await;
|
215
|
+
}
|
216
|
+
while Instant::now() < deadline {
|
217
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
218
|
+
let alive_threads = self
|
219
|
+
.thread_workers
|
220
|
+
.iter()
|
221
|
+
.filter(|worker| worker.poll_shutdown(deadline))
|
222
|
+
.count();
|
223
|
+
if alive_threads == 0 {
|
224
|
+
break;
|
225
|
+
}
|
226
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
227
|
+
}
|
228
|
+
|
229
|
+
info!("Sending shutdown signal");
|
230
|
+
shutdown_sender
|
231
|
+
.send(RunningPhase::Shutdown)
|
232
|
+
.expect("Failed to send shutdown signal");
|
233
|
+
self.thread_workers.iter().for_each(|worker| {
|
234
|
+
worker.poll_shutdown(deadline);
|
235
|
+
});
|
236
|
+
|
237
|
+
return Err(ItsiError::Break());
|
238
|
+
}
|
239
|
+
Ok(())
|
240
|
+
}
|
241
|
+
}
|
@@ -0,0 +1,70 @@
|
|
1
|
+
use std::sync::{atomic::AtomicI8, LazyLock};
|
2
|
+
|
3
|
+
use nix::libc::{self, sighandler_t};
|
4
|
+
use tokio::sync::{self, broadcast};
|
5
|
+
|
6
|
+
use super::lifecycle_event::LifecycleEvent;
|
7
|
+
|
8
|
+
pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
|
9
|
+
broadcast::Sender<LifecycleEvent>,
|
10
|
+
broadcast::Receiver<LifecycleEvent>,
|
11
|
+
)> = LazyLock::new(|| sync::broadcast::channel(5));
|
12
|
+
|
13
|
+
pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
|
14
|
+
fn receive_signal(signum: i32, _: sighandler_t) {
|
15
|
+
SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
|
16
|
+
match signum {
|
17
|
+
libc::SIGTERM | libc::SIGINT => {
|
18
|
+
SIGINT_COUNT.fetch_add(2, std::sync::atomic::Ordering::SeqCst);
|
19
|
+
if SIGINT_COUNT.load(std::sync::atomic::Ordering::SeqCst) < 2 {
|
20
|
+
SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
|
21
|
+
} else {
|
22
|
+
// Not messing about. Force shutdown.
|
23
|
+
SIGNAL_HANDLER_CHANNEL
|
24
|
+
.0
|
25
|
+
.send(LifecycleEvent::ForceShutdown)
|
26
|
+
.ok();
|
27
|
+
}
|
28
|
+
}
|
29
|
+
libc::SIGUSR1 => {
|
30
|
+
SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Restart).ok();
|
31
|
+
}
|
32
|
+
libc::SIGTTIN => {
|
33
|
+
SIGNAL_HANDLER_CHANNEL
|
34
|
+
.0
|
35
|
+
.send(LifecycleEvent::IncreaseWorkers)
|
36
|
+
.ok();
|
37
|
+
}
|
38
|
+
libc::SIGTTOU => {
|
39
|
+
SIGNAL_HANDLER_CHANNEL
|
40
|
+
.0
|
41
|
+
.send(LifecycleEvent::DecreaseWorkers)
|
42
|
+
.ok();
|
43
|
+
}
|
44
|
+
_ => {}
|
45
|
+
}
|
46
|
+
}
|
47
|
+
|
48
|
+
pub fn reset_signal_handlers() -> bool {
|
49
|
+
SIGINT_COUNT.store(0, std::sync::atomic::Ordering::SeqCst);
|
50
|
+
unsafe {
|
51
|
+
libc::signal(libc::SIGTERM, receive_signal as usize);
|
52
|
+
libc::signal(libc::SIGINT, receive_signal as usize);
|
53
|
+
libc::signal(libc::SIGUSR1, receive_signal as usize);
|
54
|
+
libc::signal(libc::SIGUSR2, receive_signal as usize);
|
55
|
+
libc::signal(libc::SIGTTIN, receive_signal as usize);
|
56
|
+
libc::signal(libc::SIGTTOU, receive_signal as usize);
|
57
|
+
}
|
58
|
+
true
|
59
|
+
}
|
60
|
+
|
61
|
+
pub fn clear_signal_handlers() {
|
62
|
+
unsafe {
|
63
|
+
libc::signal(libc::SIGTERM, libc::SIG_DFL);
|
64
|
+
libc::signal(libc::SIGINT, libc::SIG_DFL);
|
65
|
+
libc::signal(libc::SIGUSR1, libc::SIG_DFL);
|
66
|
+
libc::signal(libc::SIGUSR2, libc::SIG_DFL);
|
67
|
+
libc::signal(libc::SIGTTIN, libc::SIG_DFL);
|
68
|
+
libc::signal(libc::SIGTTOU, libc::SIG_DFL);
|
69
|
+
}
|
70
|
+
}
|