itsi-scheduler 0.2.22-aarch64-linux
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rubocop.yml +8 -0
- data/Cargo.lock +997 -0
- data/Cargo.toml +7 -0
- data/Rakefile +39 -0
- data/ext/itsi_acme/Cargo.toml +86 -0
- data/ext/itsi_acme/examples/high_level.rs +63 -0
- data/ext/itsi_acme/examples/high_level_warp.rs +52 -0
- data/ext/itsi_acme/examples/low_level.rs +87 -0
- data/ext/itsi_acme/examples/low_level_axum.rs +66 -0
- data/ext/itsi_acme/src/acceptor.rs +81 -0
- data/ext/itsi_acme/src/acme.rs +354 -0
- data/ext/itsi_acme/src/axum.rs +86 -0
- data/ext/itsi_acme/src/cache.rs +39 -0
- data/ext/itsi_acme/src/caches/boxed.rs +80 -0
- data/ext/itsi_acme/src/caches/composite.rs +69 -0
- data/ext/itsi_acme/src/caches/dir.rs +106 -0
- data/ext/itsi_acme/src/caches/mod.rs +11 -0
- data/ext/itsi_acme/src/caches/no.rs +78 -0
- data/ext/itsi_acme/src/caches/test.rs +136 -0
- data/ext/itsi_acme/src/config.rs +172 -0
- data/ext/itsi_acme/src/https_helper.rs +69 -0
- data/ext/itsi_acme/src/incoming.rs +142 -0
- data/ext/itsi_acme/src/jose.rs +161 -0
- data/ext/itsi_acme/src/lib.rs +142 -0
- data/ext/itsi_acme/src/resolver.rs +59 -0
- data/ext/itsi_acme/src/state.rs +424 -0
- data/ext/itsi_error/Cargo.lock +368 -0
- data/ext/itsi_error/Cargo.toml +12 -0
- data/ext/itsi_error/src/lib.rs +140 -0
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.lock +355 -0
- data/ext/itsi_rb_helpers/Cargo.toml +11 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +139 -0
- data/ext/itsi_rb_helpers/src/lib.rs +232 -0
- data/ext/itsi_scheduler/Cargo.toml +24 -0
- data/ext/itsi_scheduler/extconf.rb +11 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +320 -0
- data/ext/itsi_scheduler/src/lib.rs +39 -0
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +94 -0
- data/ext/itsi_server/src/default_responses/mod.rs +14 -0
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +154 -0
- data/ext/itsi_server/src/prelude.rs +2 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/big_bytes.rs +116 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +149 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +346 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +265 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +399 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +447 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +545 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +650 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +102 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
- data/ext/itsi_server/src/server/binds/bind.rs +204 -0
- data/ext/itsi_server/src/server/binds/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/binds/listener.rs +485 -0
- data/ext/itsi_server/src/server/binds/mod.rs +4 -0
- data/ext/itsi_server/src/server/binds/tls/locked_dir_cache.rs +132 -0
- data/ext/itsi_server/src/server/binds/tls.rs +278 -0
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/frame_stream.rs +143 -0
- data/ext/itsi_server/src/server/http_message_types.rs +230 -0
- data/ext/itsi_server/src/server/io_stream.rs +128 -0
- data/ext/itsi_server/src/server/lifecycle_event.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +170 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +63 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +94 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +93 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +343 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +151 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +329 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +300 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +193 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +64 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +188 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +168 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +183 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +209 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +133 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +122 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +407 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +155 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +54 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +54 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +51 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +138 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +269 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +62 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +218 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +31 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +381 -0
- data/ext/itsi_server/src/server/mod.rs +14 -0
- data/ext/itsi_server/src/server/process_worker.rs +247 -0
- data/ext/itsi_server/src/server/redirect_type.rs +26 -0
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +100 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +411 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +31 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +449 -0
- data/ext/itsi_server/src/server/signal.rs +129 -0
- data/ext/itsi_server/src/server/size_limited_incoming.rs +107 -0
- data/ext/itsi_server/src/server/thread_worker.rs +504 -0
- data/ext/itsi_server/src/services/cache_store.rs +74 -0
- data/ext/itsi_server/src/services/itsi_http_service.rs +270 -0
- data/ext/itsi_server/src/services/mime_types.rs +2896 -0
- data/ext/itsi_server/src/services/mod.rs +6 -0
- data/ext/itsi_server/src/services/password_hasher.rs +89 -0
- data/ext/itsi_server/src/services/rate_limiter.rs +609 -0
- data/ext/itsi_server/src/services/static_file_server.rs +1400 -0
- data/ext/itsi_tracing/Cargo.lock +274 -0
- data/ext/itsi_tracing/Cargo.toml +17 -0
- data/ext/itsi_tracing/src/lib.rs +370 -0
- data/itsi-scheduler-100.png +0 -0
- data/lib/itsi/schedule_refinement.rb +96 -0
- data/lib/itsi/scheduler/3.1/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/3.2/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/3.3/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/3.4/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/4.0/itsi_scheduler.so +0 -0
- data/lib/itsi/scheduler/native_extension.rb +34 -0
- data/lib/itsi/scheduler/version.rb +7 -0
- data/lib/itsi/scheduler.rb +153 -0
- data/vendor/rb-sys-build/.cargo-ok +1 -0
- data/vendor/rb-sys-build/.cargo_vcs_info.json +6 -0
- data/vendor/rb-sys-build/Cargo.lock +294 -0
- data/vendor/rb-sys-build/Cargo.toml +71 -0
- data/vendor/rb-sys-build/Cargo.toml.orig +32 -0
- data/vendor/rb-sys-build/LICENSE-APACHE +190 -0
- data/vendor/rb-sys-build/LICENSE-MIT +21 -0
- data/vendor/rb-sys-build/src/bindings/sanitizer.rs +185 -0
- data/vendor/rb-sys-build/src/bindings/stable_api.rs +247 -0
- data/vendor/rb-sys-build/src/bindings/wrapper.h +71 -0
- data/vendor/rb-sys-build/src/bindings.rs +280 -0
- data/vendor/rb-sys-build/src/cc.rs +421 -0
- data/vendor/rb-sys-build/src/lib.rs +12 -0
- data/vendor/rb-sys-build/src/rb_config/flags.rs +101 -0
- data/vendor/rb-sys-build/src/rb_config/library.rs +132 -0
- data/vendor/rb-sys-build/src/rb_config/search_path.rs +57 -0
- data/vendor/rb-sys-build/src/rb_config.rs +906 -0
- data/vendor/rb-sys-build/src/utils.rs +53 -0
- metadata +210 -0
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
use super::serve_strategy::{cluster_mode::ClusterMode, single_mode::SingleMode};
|
|
2
|
+
use core_affinity::CoreId;
|
|
3
|
+
use itsi_error::{ItsiError, Result};
|
|
4
|
+
use itsi_rb_helpers::{call_with_gvl, call_without_gvl, create_ruby_thread, fork};
|
|
5
|
+
use itsi_tracing::error;
|
|
6
|
+
use nix::{
|
|
7
|
+
errno::Errno,
|
|
8
|
+
sys::{
|
|
9
|
+
signal::{
|
|
10
|
+
kill,
|
|
11
|
+
Signal::{SIGKILL, SIGTERM, SIGUSR2},
|
|
12
|
+
},
|
|
13
|
+
wait::{waitpid, WaitPidFlag, WaitStatus},
|
|
14
|
+
},
|
|
15
|
+
unistd::{setpgid, Pid},
|
|
16
|
+
};
|
|
17
|
+
use parking_lot::Mutex;
|
|
18
|
+
use std::{
|
|
19
|
+
process::{self, exit},
|
|
20
|
+
sync::{Arc, LazyLock},
|
|
21
|
+
time::{Duration, Instant},
|
|
22
|
+
};
|
|
23
|
+
use sysinfo::System;
|
|
24
|
+
|
|
25
|
+
use tokio::{sync::watch, time::sleep};
|
|
26
|
+
use tracing::{info, instrument, warn};
|
|
27
|
+
|
|
28
|
+
#[derive(Clone, Debug)]
|
|
29
|
+
pub struct ProcessWorker {
|
|
30
|
+
pub worker_id: usize,
|
|
31
|
+
pub child_pid: Arc<Mutex<Option<Pid>>>,
|
|
32
|
+
pub started_at: Instant,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
impl Default for ProcessWorker {
|
|
36
|
+
fn default() -> Self {
|
|
37
|
+
Self {
|
|
38
|
+
worker_id: 0,
|
|
39
|
+
child_pid: Arc::new(Mutex::new(None)),
|
|
40
|
+
started_at: Instant::now(),
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
pub static CORE_IDS: LazyLock<Vec<CoreId>> =
|
|
46
|
+
LazyLock::new(|| core_affinity::get_core_ids().unwrap());
|
|
47
|
+
|
|
48
|
+
impl ProcessWorker {
|
|
49
|
+
#[instrument(skip(self, cluster_template), fields(self.worker_id = %self.worker_id))]
|
|
50
|
+
pub(crate) fn boot(&self, cluster_template: Arc<ClusterMode>) -> Result<()> {
|
|
51
|
+
let child_pid = *self.child_pid.lock();
|
|
52
|
+
if let Some(pid) = child_pid {
|
|
53
|
+
if self.is_alive() {
|
|
54
|
+
if let Err(e) = kill(pid, SIGTERM) {
|
|
55
|
+
info!("Failed to send SIGTERM to process {}: {}", pid, e);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
*self.child_pid.lock() = None;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
match call_with_gvl(|_ruby| {
|
|
62
|
+
fork(
|
|
63
|
+
cluster_template
|
|
64
|
+
.server_config
|
|
65
|
+
.server_params
|
|
66
|
+
.read()
|
|
67
|
+
.hooks
|
|
68
|
+
.get("after_fork")
|
|
69
|
+
.cloned(),
|
|
70
|
+
)
|
|
71
|
+
}) {
|
|
72
|
+
Some(pid) => {
|
|
73
|
+
*self.child_pid.lock() = Some(Pid::from_raw(pid));
|
|
74
|
+
}
|
|
75
|
+
None => {
|
|
76
|
+
if let Err(e) = setpgid(
|
|
77
|
+
Pid::from_raw(process::id() as i32),
|
|
78
|
+
Pid::from_raw(process::id() as i32),
|
|
79
|
+
) {
|
|
80
|
+
error!("Failed to set process group ID: {}", e);
|
|
81
|
+
}
|
|
82
|
+
match SingleMode::new(cluster_template.server_config.clone(), self.worker_id) {
|
|
83
|
+
Ok(single_mode) => {
|
|
84
|
+
if cluster_template
|
|
85
|
+
.server_config
|
|
86
|
+
.server_params
|
|
87
|
+
.read()
|
|
88
|
+
.pin_worker_cores
|
|
89
|
+
{
|
|
90
|
+
core_affinity::set_for_current(
|
|
91
|
+
CORE_IDS[(2 * self.worker_id) % CORE_IDS.len()],
|
|
92
|
+
);
|
|
93
|
+
}
|
|
94
|
+
Arc::new(single_mode).run().ok();
|
|
95
|
+
}
|
|
96
|
+
Err(e) => {
|
|
97
|
+
error!("Failed to boot into worker mode: {}", e);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
exit(0)
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
Ok(())
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
pub fn pid(&self) -> i32 {
|
|
107
|
+
if let Some(pid) = *self.child_pid.lock() {
|
|
108
|
+
return pid.as_raw();
|
|
109
|
+
}
|
|
110
|
+
0
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
pub(crate) fn memory_usage(&self) -> Option<u64> {
|
|
114
|
+
if let Some(pid) = *self.child_pid.lock() {
|
|
115
|
+
let s = System::new_all();
|
|
116
|
+
if let Some(process) = s.process(sysinfo::Pid::from(pid.as_raw() as usize)) {
|
|
117
|
+
return Some(process.memory());
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
None
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
pub(crate) async fn reboot(&self, cluster_template: Arc<ClusterMode>) -> Result<bool> {
|
|
124
|
+
self.graceful_shutdown(cluster_template.clone()).await;
|
|
125
|
+
let self_clone = self.clone();
|
|
126
|
+
let (booted_sender, mut booted_receiver) = watch::channel(false);
|
|
127
|
+
create_ruby_thread(move || {
|
|
128
|
+
call_without_gvl(move || {
|
|
129
|
+
if self_clone.boot(cluster_template).is_ok() {
|
|
130
|
+
booted_sender.send(true).ok()
|
|
131
|
+
} else {
|
|
132
|
+
booted_sender.send(false).ok()
|
|
133
|
+
};
|
|
134
|
+
})
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
booted_receiver
|
|
138
|
+
.changed()
|
|
139
|
+
.await
|
|
140
|
+
.map_err(|_| ItsiError::InternalServerError("Failed to boot worker".to_owned()))?;
|
|
141
|
+
|
|
142
|
+
let guard = booted_receiver.borrow();
|
|
143
|
+
let result = guard.to_owned();
|
|
144
|
+
// Not very robust, we should check to see if the worker is actually listening before considering this successful.
|
|
145
|
+
sleep(Duration::from_secs(1)).await;
|
|
146
|
+
Ok(result)
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
pub(crate) async fn graceful_shutdown(&self, cluster_template: Arc<ClusterMode>) {
|
|
150
|
+
let self_clone = self.clone();
|
|
151
|
+
self_clone.request_shutdown();
|
|
152
|
+
let force_kill_time = Instant::now()
|
|
153
|
+
+ Duration::from_secs_f64(
|
|
154
|
+
cluster_template
|
|
155
|
+
.server_config
|
|
156
|
+
.server_params
|
|
157
|
+
.read()
|
|
158
|
+
.shutdown_timeout,
|
|
159
|
+
);
|
|
160
|
+
while self_clone.is_alive() && force_kill_time > Instant::now() {
|
|
161
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
162
|
+
}
|
|
163
|
+
if self_clone.is_alive() {
|
|
164
|
+
self_clone.force_kill();
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
pub(crate) fn boot_if_dead(&self, cluster_template: Arc<ClusterMode>) -> bool {
|
|
169
|
+
if !self.is_alive() && self.child_pid.lock().is_some() {
|
|
170
|
+
if self.just_started() {
|
|
171
|
+
error!(
|
|
172
|
+
"Worker in crash loop {:?}. Refusing to restart",
|
|
173
|
+
self.child_pid.lock()
|
|
174
|
+
);
|
|
175
|
+
return false;
|
|
176
|
+
} else {
|
|
177
|
+
let self_clone = self.clone();
|
|
178
|
+
create_ruby_thread(move || {
|
|
179
|
+
call_without_gvl(move || {
|
|
180
|
+
self_clone.boot(cluster_template).ok();
|
|
181
|
+
})
|
|
182
|
+
});
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
true
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
pub(crate) fn request_shutdown(&self) {
|
|
189
|
+
let child_pid = *self.child_pid.lock();
|
|
190
|
+
if let Some(pid) = child_pid {
|
|
191
|
+
if self.is_alive() {
|
|
192
|
+
if let Err(e) = kill(pid, SIGTERM) {
|
|
193
|
+
error!("Failed to send SIGTERM to process {}: {}", pid, e);
|
|
194
|
+
}
|
|
195
|
+
} else {
|
|
196
|
+
error!("Trying to shutdown a dead process");
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
pub(crate) fn force_kill(&self) {
|
|
202
|
+
let child_pid = *self.child_pid.lock();
|
|
203
|
+
if let Some(pid) = child_pid {
|
|
204
|
+
if self.is_alive() {
|
|
205
|
+
if let Err(e) = kill(pid, SIGKILL) {
|
|
206
|
+
error!("Failed to force kill process {}: {}", pid, e);
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
pub fn print_info(&self) -> Result<()> {
|
|
213
|
+
let child_pid = *self.child_pid.lock();
|
|
214
|
+
if let Some(pid) = child_pid {
|
|
215
|
+
println!("Worker {:?}, PID: {:?}", self.worker_id, pid);
|
|
216
|
+
if let Err(e) = kill(pid, SIGUSR2) {
|
|
217
|
+
error!("Failed to send SIGUSR2 to process {}: {}", pid, e);
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
Ok(())
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
pub(crate) fn just_started(&self) -> bool {
|
|
225
|
+
let now = Instant::now();
|
|
226
|
+
now.duration_since(self.started_at).as_millis() < 2000
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
pub(crate) fn is_alive(&self) -> bool {
|
|
230
|
+
let child_pid = *self.child_pid.lock();
|
|
231
|
+
if let Some(pid) = child_pid {
|
|
232
|
+
match waitpid(pid, Some(WaitPidFlag::WNOHANG)) {
|
|
233
|
+
Ok(WaitStatus::Exited(_, _)) | Ok(WaitStatus::Signaled(_, _, _)) => {
|
|
234
|
+
return false;
|
|
235
|
+
}
|
|
236
|
+
Ok(WaitStatus::StillAlive) | Ok(_) => {}
|
|
237
|
+
Err(_) => return false,
|
|
238
|
+
}
|
|
239
|
+
match kill(pid, None) {
|
|
240
|
+
Ok(_) => true,
|
|
241
|
+
Err(errno) => !matches!(errno, Errno::ESRCH),
|
|
242
|
+
}
|
|
243
|
+
} else {
|
|
244
|
+
false
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
use http::StatusCode;
|
|
2
|
+
use serde::Deserialize;
|
|
3
|
+
|
|
4
|
+
#[derive(Debug, Clone, Deserialize, Default)]
|
|
5
|
+
pub enum RedirectType {
|
|
6
|
+
#[serde(rename(deserialize = "permanent"))]
|
|
7
|
+
#[default]
|
|
8
|
+
Permanent,
|
|
9
|
+
#[serde(rename(deserialize = "temporary"))]
|
|
10
|
+
Temporary,
|
|
11
|
+
#[serde(rename(deserialize = "found"))]
|
|
12
|
+
Found,
|
|
13
|
+
#[serde(rename(deserialize = "moved_permanently"))]
|
|
14
|
+
MovedPermanently,
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
impl RedirectType {
|
|
18
|
+
pub fn status_code(&self) -> StatusCode {
|
|
19
|
+
match self {
|
|
20
|
+
RedirectType::Permanent => StatusCode::PERMANENT_REDIRECT,
|
|
21
|
+
RedirectType::Temporary => StatusCode::TEMPORARY_REDIRECT,
|
|
22
|
+
RedirectType::Found => StatusCode::FOUND,
|
|
23
|
+
RedirectType::MovedPermanently => StatusCode::MOVED_PERMANENTLY,
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
use crate::ruby_types::{itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest};
|
|
2
|
+
use itsi_rb_helpers::HeapValue;
|
|
3
|
+
use magnus::block::Proc;
|
|
4
|
+
use std::sync::Arc;
|
|
5
|
+
|
|
6
|
+
#[derive(Debug)]
|
|
7
|
+
pub enum RequestJob {
|
|
8
|
+
ProcessHttpRequest(ItsiHttpRequest, Arc<HeapValue<Proc>>),
|
|
9
|
+
ProcessGrpcRequest(ItsiGrpcCall, Arc<HeapValue<Proc>>),
|
|
10
|
+
Shutdown,
|
|
11
|
+
}
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
use hyper_util::rt::TokioIo;
|
|
2
|
+
use std::{ops::Deref, pin::Pin, sync::Arc, time::Duration};
|
|
3
|
+
use tokio::task::JoinSet;
|
|
4
|
+
use tracing::debug;
|
|
5
|
+
|
|
6
|
+
use crate::{
|
|
7
|
+
ruby_types::itsi_server::itsi_server_config::ServerParams,
|
|
8
|
+
server::{binds::listener::ListenerInfo, io_stream::IoStream, request_job::RequestJob},
|
|
9
|
+
services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
use super::single_mode::{RunningPhase, SingleMode};
|
|
13
|
+
|
|
14
|
+
pub struct Acceptor {
|
|
15
|
+
pub acceptor_args: Arc<AcceptorArgs>,
|
|
16
|
+
pub join_set: JoinSet<()>,
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
impl Deref for Acceptor {
|
|
20
|
+
type Target = Arc<AcceptorArgs>;
|
|
21
|
+
|
|
22
|
+
fn deref(&self) -> &Self::Target {
|
|
23
|
+
&self.acceptor_args
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
pub struct AcceptorArgs {
|
|
28
|
+
pub strategy: Arc<SingleMode>,
|
|
29
|
+
pub listener_info: ListenerInfo,
|
|
30
|
+
pub shutdown_receiver: tokio::sync::watch::Receiver<RunningPhase>,
|
|
31
|
+
pub job_sender: async_channel::Sender<RequestJob>,
|
|
32
|
+
pub nonblocking_sender: async_channel::Sender<RequestJob>,
|
|
33
|
+
pub server_params: Arc<ServerParams>,
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
impl Acceptor {
|
|
37
|
+
pub(crate) async fn serve_connection(&mut self, stream: IoStream) {
|
|
38
|
+
let addr = stream.addr();
|
|
39
|
+
let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
|
|
40
|
+
let mut shutdown_channel = self.shutdown_receiver.clone();
|
|
41
|
+
let acceptor_args = self.acceptor_args.clone();
|
|
42
|
+
let service = ItsiHttpService {
|
|
43
|
+
inner: Arc::new(ItsiHttpServiceInner {
|
|
44
|
+
acceptor_args: acceptor_args.clone(),
|
|
45
|
+
addr,
|
|
46
|
+
}),
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
self.join_set.spawn(async move {
|
|
50
|
+
let executor = &acceptor_args.strategy.executor;
|
|
51
|
+
let svc = hyper::service::service_fn(move |req| {
|
|
52
|
+
let service = service.clone();
|
|
53
|
+
async move { service.handle_request(req).await }
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
let mut serve = Box::pin(executor.serve_connection_with_upgrades(io, svc));
|
|
57
|
+
|
|
58
|
+
tokio::select! {
|
|
59
|
+
// Await the connection finishing naturally.
|
|
60
|
+
res = &mut serve => {
|
|
61
|
+
match res {
|
|
62
|
+
Ok(()) => {
|
|
63
|
+
debug!("Connection closed normally");
|
|
64
|
+
},
|
|
65
|
+
Err(res) => {
|
|
66
|
+
debug!("Connection closed abruptly: {:?}", res);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
},
|
|
70
|
+
// A lifecycle event triggers shutdown.
|
|
71
|
+
_ = shutdown_channel.changed() => {
|
|
72
|
+
// Initiate graceful shutdown.
|
|
73
|
+
serve.as_mut().graceful_shutdown();
|
|
74
|
+
|
|
75
|
+
// Now await the connection to finish shutting down.
|
|
76
|
+
if let Err(e) = serve.await {
|
|
77
|
+
debug!("Connection shutdown error: {:?}", e);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
pub async fn join(&mut self) {
|
|
85
|
+
// Join all acceptor tasks with timeout
|
|
86
|
+
|
|
87
|
+
let deadline = tokio::time::Instant::now()
|
|
88
|
+
+ Duration::from_secs_f64(self.server_params.shutdown_timeout);
|
|
89
|
+
let sleep_until = tokio::time::sleep_until(deadline);
|
|
90
|
+
tokio::select! {
|
|
91
|
+
_ = async {
|
|
92
|
+
while (self.join_set.join_next().await).is_some() {}
|
|
93
|
+
} => {},
|
|
94
|
+
_ = sleep_until => {
|
|
95
|
+
self.join_set.abort_all();
|
|
96
|
+
debug!("Shutdown timeout reached; abandoning remaining acceptor tasks.");
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|