itsi-server 0.1.1 → 0.1.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +7 -0
- data/Cargo.lock +3937 -0
- data/Cargo.toml +7 -0
- data/README.md +4 -0
- data/Rakefile +8 -1
- data/_index.md +6 -0
- data/exe/itsi +141 -46
- data/ext/itsi_error/Cargo.toml +3 -0
- data/ext/itsi_error/src/lib.rs +98 -24
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.toml +3 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +139 -0
- data/ext/itsi_rb_helpers/src/lib.rs +140 -10
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
- data/ext/itsi_scheduler/Cargo.toml +24 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
- data/ext/itsi_scheduler/src/lib.rs +38 -0
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +72 -14
- data/ext/itsi_server/extconf.rb +1 -1
- data/ext/itsi_server/src/default_responses/html/401.html +68 -0
- data/ext/itsi_server/src/default_responses/html/403.html +68 -0
- data/ext/itsi_server/src/default_responses/html/404.html +68 -0
- data/ext/itsi_server/src/default_responses/html/413.html +71 -0
- data/ext/itsi_server/src/default_responses/html/429.html +68 -0
- data/ext/itsi_server/src/default_responses/html/500.html +71 -0
- data/ext/itsi_server/src/default_responses/html/502.html +71 -0
- data/ext/itsi_server/src/default_responses/html/503.html +68 -0
- data/ext/itsi_server/src/default_responses/html/504.html +69 -0
- data/ext/itsi_server/src/default_responses/html/index.html +238 -0
- data/ext/itsi_server/src/default_responses/json/401.json +6 -0
- data/ext/itsi_server/src/default_responses/json/403.json +6 -0
- data/ext/itsi_server/src/default_responses/json/404.json +6 -0
- data/ext/itsi_server/src/default_responses/json/413.json +6 -0
- data/ext/itsi_server/src/default_responses/json/429.json +6 -0
- data/ext/itsi_server/src/default_responses/json/500.json +6 -0
- data/ext/itsi_server/src/default_responses/json/502.json +6 -0
- data/ext/itsi_server/src/default_responses/json/503.json +6 -0
- data/ext/itsi_server/src/default_responses/json/504.json +6 -0
- data/ext/itsi_server/src/default_responses/mod.rs +11 -0
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +132 -40
- data/ext/itsi_server/src/prelude.rs +2 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/big_bytes.rs +109 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +143 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +345 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +391 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +375 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +83 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
- data/ext/itsi_server/src/server/binds/bind.rs +201 -0
- data/ext/itsi_server/src/server/binds/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/binds/listener.rs +432 -0
- data/ext/itsi_server/src/server/binds/mod.rs +4 -0
- data/ext/itsi_server/src/server/binds/tls/locked_dir_cache.rs +132 -0
- data/ext/itsi_server/src/server/binds/tls.rs +270 -0
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/http_message_types.rs +97 -0
- data/ext/itsi_server/src/server/io_stream.rs +105 -0
- data/ext/itsi_server/src/server/lifecycle_event.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +165 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +56 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +86 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +285 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +142 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +289 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +292 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +190 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +157 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +195 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +201 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +414 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +131 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +44 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +36 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +180 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +163 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +347 -0
- data/ext/itsi_server/src/server/mod.rs +12 -5
- data/ext/itsi_server/src/server/process_worker.rs +247 -0
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +342 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +30 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +421 -0
- data/ext/itsi_server/src/server/signal.rs +76 -0
- data/ext/itsi_server/src/server/size_limited_incoming.rs +101 -0
- data/ext/itsi_server/src/server/thread_worker.rs +475 -0
- data/ext/itsi_server/src/services/cache_store.rs +74 -0
- data/ext/itsi_server/src/services/itsi_http_service.rs +239 -0
- data/ext/itsi_server/src/services/mime_types.rs +1416 -0
- data/ext/itsi_server/src/services/mod.rs +6 -0
- data/ext/itsi_server/src/services/password_hasher.rs +83 -0
- data/ext/itsi_server/src/services/rate_limiter.rs +569 -0
- data/ext/itsi_server/src/services/static_file_server.rs +1324 -0
- data/ext/itsi_tracing/Cargo.toml +5 -0
- data/ext/itsi_tracing/src/lib.rs +315 -7
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
- data/lib/itsi/http_request/response_status_shortcodes.rb +74 -0
- data/lib/itsi/http_request.rb +186 -0
- data/lib/itsi/http_response.rb +41 -0
- data/lib/itsi/passfile.rb +109 -0
- data/lib/itsi/server/config/dsl.rb +565 -0
- data/lib/itsi/server/config.rb +166 -0
- data/lib/itsi/server/default_app/default_app.rb +34 -0
- data/lib/itsi/server/default_app/index.html +115 -0
- data/lib/itsi/server/default_config/Itsi-rackup.rb +119 -0
- data/lib/itsi/server/default_config/Itsi.rb +107 -0
- data/lib/itsi/server/grpc/grpc_call.rb +246 -0
- data/lib/itsi/server/grpc/grpc_interface.rb +100 -0
- data/lib/itsi/server/grpc/reflection/v1/reflection_pb.rb +26 -0
- data/lib/itsi/server/grpc/reflection/v1/reflection_services_pb.rb +122 -0
- data/lib/itsi/server/rack/handler/itsi.rb +27 -0
- data/lib/itsi/server/rack_interface.rb +94 -0
- data/lib/itsi/server/route_tester.rb +107 -0
- data/lib/itsi/server/scheduler_interface.rb +21 -0
- data/lib/itsi/server/scheduler_mode.rb +10 -0
- data/lib/itsi/server/signal_trap.rb +29 -0
- data/lib/itsi/server/typed_handlers/param_parser.rb +200 -0
- data/lib/itsi/server/typed_handlers/source_parser.rb +55 -0
- data/lib/itsi/server/typed_handlers.rb +17 -0
- data/lib/itsi/server/version.rb +1 -1
- data/lib/itsi/server.rb +160 -9
- data/lib/itsi/standard_headers.rb +86 -0
- data/lib/ruby_lsp/itsi/addon.rb +111 -0
- data/lib/shell_completions/completions.rb +26 -0
- metadata +182 -25
- data/ext/itsi_server/src/request/itsi_request.rs +0 -143
- data/ext/itsi_server/src/request/mod.rs +0 -1
- data/ext/itsi_server/src/server/bind.rs +0 -138
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -32
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -52
- data/ext/itsi_server/src/server/itsi_server.rs +0 -182
- data/ext/itsi_server/src/server/listener.rs +0 -218
- data/ext/itsi_server/src/server/tls.rs +0 -138
- data/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
- data/ext/itsi_server/src/stream_writer/mod.rs +0 -21
- data/lib/itsi/request.rb +0 -39
@@ -0,0 +1,421 @@
|
|
1
|
+
use crate::{
|
2
|
+
ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
|
3
|
+
server::{
|
4
|
+
binds::listener::ListenerInfo,
|
5
|
+
io_stream::IoStream,
|
6
|
+
lifecycle_event::LifecycleEvent,
|
7
|
+
request_job::RequestJob,
|
8
|
+
signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
|
9
|
+
thread_worker::{build_thread_workers, ThreadWorker},
|
10
|
+
},
|
11
|
+
services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
|
12
|
+
};
|
13
|
+
use hyper_util::{
|
14
|
+
rt::{TokioExecutor, TokioIo, TokioTimer},
|
15
|
+
server::conn::auto::Builder,
|
16
|
+
};
|
17
|
+
use itsi_error::{ItsiError, Result};
|
18
|
+
use itsi_rb_helpers::{
|
19
|
+
call_with_gvl, call_without_gvl, create_ruby_thread, funcall_no_ret, print_rb_backtrace,
|
20
|
+
};
|
21
|
+
use itsi_tracing::{debug, error, info};
|
22
|
+
use magnus::value::ReprValue;
|
23
|
+
use nix::unistd::Pid;
|
24
|
+
use parking_lot::RwLock;
|
25
|
+
use std::{
|
26
|
+
collections::HashMap,
|
27
|
+
pin::Pin,
|
28
|
+
sync::{
|
29
|
+
atomic::{AtomicBool, Ordering},
|
30
|
+
Arc,
|
31
|
+
},
|
32
|
+
thread::sleep,
|
33
|
+
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
34
|
+
};
|
35
|
+
use tokio::{
|
36
|
+
runtime::{Builder as RuntimeBuilder, Runtime},
|
37
|
+
sync::{
|
38
|
+
broadcast,
|
39
|
+
watch::{self},
|
40
|
+
},
|
41
|
+
task::JoinSet,
|
42
|
+
};
|
43
|
+
use tracing::instrument;
|
44
|
+
|
45
|
+
pub struct SingleMode {
|
46
|
+
pub executor: Builder<TokioExecutor>,
|
47
|
+
pub server_config: Arc<ItsiServerConfig>,
|
48
|
+
pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
49
|
+
pub restart_requested: AtomicBool,
|
50
|
+
pub status: RwLock<HashMap<u8, (u64, u64)>>,
|
51
|
+
}
|
52
|
+
|
53
|
+
pub enum RunningPhase {
|
54
|
+
Running,
|
55
|
+
ShutdownPending,
|
56
|
+
Shutdown,
|
57
|
+
}
|
58
|
+
|
59
|
+
impl SingleMode {
|
60
|
+
#[instrument(parent=None, skip_all)]
|
61
|
+
pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
|
62
|
+
server_config.server_params.read().preload_ruby()?;
|
63
|
+
Ok(Self {
|
64
|
+
executor: Builder::new(TokioExecutor::new()),
|
65
|
+
server_config,
|
66
|
+
lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
|
67
|
+
restart_requested: AtomicBool::new(false),
|
68
|
+
status: RwLock::new(HashMap::new()),
|
69
|
+
})
|
70
|
+
}
|
71
|
+
|
72
|
+
pub fn build_runtime(&self) -> Runtime {
|
73
|
+
let mut builder: RuntimeBuilder = if self
|
74
|
+
.server_config
|
75
|
+
.server_params
|
76
|
+
.read()
|
77
|
+
.multithreaded_reactor
|
78
|
+
{
|
79
|
+
RuntimeBuilder::new_multi_thread()
|
80
|
+
} else {
|
81
|
+
RuntimeBuilder::new_current_thread()
|
82
|
+
};
|
83
|
+
builder
|
84
|
+
.thread_name("itsi-server-accept-loop")
|
85
|
+
.thread_stack_size(3 * 1024 * 1024)
|
86
|
+
.enable_io()
|
87
|
+
.enable_time()
|
88
|
+
.build()
|
89
|
+
.expect("Failed to build Tokio runtime")
|
90
|
+
}
|
91
|
+
|
92
|
+
pub fn stop(&self) -> Result<()> {
|
93
|
+
self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
|
94
|
+
Ok(())
|
95
|
+
}
|
96
|
+
|
97
|
+
pub async fn print_info(&self, thread_workers: Arc<Vec<Arc<ThreadWorker>>>) -> Result<()> {
|
98
|
+
println!(" └─ Worker");
|
99
|
+
println!(
|
100
|
+
" - binds: {:?}",
|
101
|
+
self.server_config.server_params.read().binds
|
102
|
+
);
|
103
|
+
|
104
|
+
println!(
|
105
|
+
" ─ streaming body: {:?}",
|
106
|
+
self.server_config.server_params.read().streamable_body
|
107
|
+
);
|
108
|
+
println!(
|
109
|
+
" ─ multithreaded runtime: {:?}",
|
110
|
+
self.server_config
|
111
|
+
.server_params
|
112
|
+
.read()
|
113
|
+
.multithreaded_reactor
|
114
|
+
);
|
115
|
+
println!(
|
116
|
+
" ─ scheduler: {:?}",
|
117
|
+
self.server_config.server_params.read().scheduler_class
|
118
|
+
);
|
119
|
+
println!(
|
120
|
+
" ─ OOB GC Response threadhold: {:?}",
|
121
|
+
self.server_config
|
122
|
+
.server_params
|
123
|
+
.read()
|
124
|
+
.oob_gc_responses_threshold
|
125
|
+
);
|
126
|
+
for worker in thread_workers.iter() {
|
127
|
+
println!(" └─ - Thread : {:?}", worker.id);
|
128
|
+
println!(" - # Requests Processed: {:?}", worker.request_id);
|
129
|
+
println!(
|
130
|
+
" - Last Request Started: {:?} ago",
|
131
|
+
if worker.current_request_start.load(Ordering::Relaxed) == 0 {
|
132
|
+
Duration::from_secs(0)
|
133
|
+
} else {
|
134
|
+
SystemTime::now()
|
135
|
+
.duration_since(
|
136
|
+
UNIX_EPOCH
|
137
|
+
+ Duration::from_secs(
|
138
|
+
worker.current_request_start.load(Ordering::Relaxed),
|
139
|
+
),
|
140
|
+
)
|
141
|
+
.unwrap_or(Duration::from_secs(0))
|
142
|
+
}
|
143
|
+
);
|
144
|
+
call_with_gvl(|_| {
|
145
|
+
if let Some(thread) = worker.thread.read().as_ref() {
|
146
|
+
if let Ok(backtrace) = thread.funcall::<_, _, Vec<String>>("backtrace", ()) {
|
147
|
+
println!(" - Backtrace:");
|
148
|
+
for line in backtrace {
|
149
|
+
println!(" - {}", line);
|
150
|
+
}
|
151
|
+
}
|
152
|
+
}
|
153
|
+
})
|
154
|
+
}
|
155
|
+
|
156
|
+
Ok(())
|
157
|
+
}
|
158
|
+
|
159
|
+
pub fn start_monitors(
|
160
|
+
self: Arc<Self>,
|
161
|
+
thread_workers: Arc<Vec<Arc<ThreadWorker>>>,
|
162
|
+
) -> magnus::Thread {
|
163
|
+
call_with_gvl(move |_| {
|
164
|
+
create_ruby_thread(move || {
|
165
|
+
call_without_gvl(move || {
|
166
|
+
let monitor_runtime = RuntimeBuilder::new_current_thread()
|
167
|
+
.enable_time()
|
168
|
+
.build()
|
169
|
+
.unwrap();
|
170
|
+
let receiver = self.clone();
|
171
|
+
monitor_runtime.block_on({
|
172
|
+
let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
|
173
|
+
let receiver = receiver.clone();
|
174
|
+
let thread_workers = thread_workers.clone();
|
175
|
+
async move {
|
176
|
+
loop {
|
177
|
+
tokio::select! {
|
178
|
+
_ = tokio::time::sleep(Duration::from_secs(1)) => {
|
179
|
+
let mut status_lock = receiver.status.write();
|
180
|
+
thread_workers.iter().for_each(|worker| {
|
181
|
+
let worker_entry = status_lock.entry(worker.id);
|
182
|
+
let data = (
|
183
|
+
worker.request_id.load(Ordering::Relaxed),
|
184
|
+
worker.current_request_start.load(Ordering::Relaxed),
|
185
|
+
);
|
186
|
+
worker_entry.or_insert(data);
|
187
|
+
});
|
188
|
+
}
|
189
|
+
lifecycle_event = lifecycle_rx.recv() => {
|
190
|
+
match lifecycle_event {
|
191
|
+
Ok(LifecycleEvent::Restart) => {
|
192
|
+
receiver.restart().ok();
|
193
|
+
}
|
194
|
+
Ok(LifecycleEvent::Reload) => {
|
195
|
+
receiver.reload().ok();
|
196
|
+
}
|
197
|
+
Ok(LifecycleEvent::Shutdown) => {
|
198
|
+
break;
|
199
|
+
}
|
200
|
+
Ok(LifecycleEvent::PrintInfo) => {
|
201
|
+
receiver.print_info(thread_workers.clone()).await.ok();
|
202
|
+
}
|
203
|
+
_ => {}
|
204
|
+
}
|
205
|
+
}
|
206
|
+
}
|
207
|
+
}
|
208
|
+
}
|
209
|
+
})
|
210
|
+
})
|
211
|
+
})
|
212
|
+
})
|
213
|
+
}
|
214
|
+
|
215
|
+
#[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
|
216
|
+
pub fn run(self: Arc<Self>) -> Result<()> {
|
217
|
+
let mut listener_task_set = JoinSet::new();
|
218
|
+
let runtime = self.build_runtime();
|
219
|
+
|
220
|
+
let (thread_workers, job_sender, nonblocking_sender) =
|
221
|
+
build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
|
222
|
+
.inspect_err(|e| {
|
223
|
+
if let Some(err_val) = e.value() {
|
224
|
+
print_rb_backtrace(err_val);
|
225
|
+
}
|
226
|
+
})?;
|
227
|
+
|
228
|
+
info!(
|
229
|
+
threads = thread_workers.len(),
|
230
|
+
binds = format!("{:?}", self.server_config.server_params.read().binds)
|
231
|
+
);
|
232
|
+
|
233
|
+
let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
|
234
|
+
let thread = self.clone().start_monitors(thread_workers.clone());
|
235
|
+
if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
|
236
|
+
return Ok(());
|
237
|
+
}
|
238
|
+
runtime.block_on(
|
239
|
+
async {
|
240
|
+
let server_params = self.server_config.server_params.read().clone();
|
241
|
+
server_params.middleware.get().unwrap().initialize_layers().await?;
|
242
|
+
let tokio_listeners = server_params.listeners.lock()
|
243
|
+
.drain(..)
|
244
|
+
.map(|list| {
|
245
|
+
Arc::new(list.into_tokio_listener())
|
246
|
+
})
|
247
|
+
.collect::<Vec<_>>();
|
248
|
+
|
249
|
+
for listener in tokio_listeners.iter() {
|
250
|
+
let mut lifecycle_rx = self.lifecycle_channel.subscribe();
|
251
|
+
|
252
|
+
let listener_info = Arc::new(listener.listener_info());
|
253
|
+
let self_ref = self.clone();
|
254
|
+
let listener = listener.clone();
|
255
|
+
let shutdown_sender = shutdown_sender.clone();
|
256
|
+
let job_sender = job_sender.clone();
|
257
|
+
let nonblocking_sender = nonblocking_sender.clone();
|
258
|
+
let workers_clone = thread_workers.clone();
|
259
|
+
let listener_clone = listener.clone();
|
260
|
+
let mut shutdown_receiver = shutdown_sender.subscribe();
|
261
|
+
let shutdown_receiver_clone = shutdown_receiver.clone();
|
262
|
+
listener_task_set.spawn(async move {
|
263
|
+
listener_clone.spawn_state_task(shutdown_receiver_clone).await;
|
264
|
+
});
|
265
|
+
|
266
|
+
listener_task_set.spawn(async move {
|
267
|
+
let strategy_clone = self_ref.clone();
|
268
|
+
let mut acceptor_task_set = JoinSet::new();
|
269
|
+
loop {
|
270
|
+
tokio::select! {
|
271
|
+
accept_result = listener.accept() => match accept_result {
|
272
|
+
Ok(accept_result) => {
|
273
|
+
let strategy = strategy_clone.clone();
|
274
|
+
let listener_info = listener_info.clone();
|
275
|
+
let shutdown_receiver = shutdown_receiver.clone();
|
276
|
+
let job_sender = job_sender.clone();
|
277
|
+
let nonblocking_sender = nonblocking_sender.clone();
|
278
|
+
acceptor_task_set.spawn(async move {
|
279
|
+
strategy.serve_connection(accept_result, job_sender, nonblocking_sender, listener_info, shutdown_receiver).await;
|
280
|
+
});
|
281
|
+
},
|
282
|
+
Err(e) => debug!("Listener.accept failed {:?}", e),
|
283
|
+
},
|
284
|
+
_ = shutdown_receiver.changed() => {
|
285
|
+
break;
|
286
|
+
}
|
287
|
+
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
288
|
+
Ok(LifecycleEvent::Shutdown) => {
|
289
|
+
shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
|
290
|
+
tokio::time::sleep(Duration::from_millis(25)).await;
|
291
|
+
for _i in 0..workers_clone.len() {
|
292
|
+
job_sender.send(RequestJob::Shutdown).await.unwrap();
|
293
|
+
nonblocking_sender.send(RequestJob::Shutdown).await.unwrap();
|
294
|
+
}
|
295
|
+
break;
|
296
|
+
},
|
297
|
+
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
298
|
+
_ => {}
|
299
|
+
}
|
300
|
+
}
|
301
|
+
}
|
302
|
+
while let Some(_res) = acceptor_task_set.join_next().await {}
|
303
|
+
});
|
304
|
+
|
305
|
+
}
|
306
|
+
|
307
|
+
while let Some(_res) = listener_task_set.join_next().await {}
|
308
|
+
|
309
|
+
// Explicitly drop all listeners to ensure file descriptors are released
|
310
|
+
drop(tokio_listeners);
|
311
|
+
|
312
|
+
Ok::<(), ItsiError>(())
|
313
|
+
})?;
|
314
|
+
|
315
|
+
shutdown_sender.send(RunningPhase::Shutdown).ok();
|
316
|
+
let deadline = Instant::now()
|
317
|
+
+ Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
|
318
|
+
|
319
|
+
runtime.shutdown_timeout(Duration::from_millis(100));
|
320
|
+
|
321
|
+
loop {
|
322
|
+
if thread_workers
|
323
|
+
.iter()
|
324
|
+
.all(|worker| call_with_gvl(move |_| !worker.poll_shutdown(deadline)))
|
325
|
+
{
|
326
|
+
funcall_no_ret(thread, "join", ()).ok();
|
327
|
+
break;
|
328
|
+
}
|
329
|
+
sleep(Duration::from_millis(50));
|
330
|
+
}
|
331
|
+
|
332
|
+
if self.restart_requested.load(Ordering::SeqCst) {
|
333
|
+
self.restart_requested.store(false, Ordering::SeqCst);
|
334
|
+
info!("Worker restarting");
|
335
|
+
self.run()?;
|
336
|
+
}
|
337
|
+
debug!("Runtime has shut down");
|
338
|
+
Ok(())
|
339
|
+
}
|
340
|
+
|
341
|
+
pub(crate) async fn serve_connection(
|
342
|
+
&self,
|
343
|
+
stream: IoStream,
|
344
|
+
job_sender: async_channel::Sender<RequestJob>,
|
345
|
+
nonblocking_sender: async_channel::Sender<RequestJob>,
|
346
|
+
listener: Arc<ListenerInfo>,
|
347
|
+
shutdown_channel: watch::Receiver<RunningPhase>,
|
348
|
+
) {
|
349
|
+
let addr = stream.addr();
|
350
|
+
let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
|
351
|
+
let executor = self.executor.clone();
|
352
|
+
let mut shutdown_channel_clone = shutdown_channel.clone();
|
353
|
+
let mut executor = executor.clone();
|
354
|
+
let mut binding = executor.http1();
|
355
|
+
let shutdown_channel = shutdown_channel_clone.clone();
|
356
|
+
|
357
|
+
let service = ItsiHttpService {
|
358
|
+
inner: Arc::new(ItsiHttpServiceInner {
|
359
|
+
sender: job_sender.clone(),
|
360
|
+
nonblocking_sender: nonblocking_sender.clone(),
|
361
|
+
server_params: self.server_config.server_params.read().clone(),
|
362
|
+
listener,
|
363
|
+
addr: addr.to_string(),
|
364
|
+
shutdown_channel: shutdown_channel.clone(),
|
365
|
+
}),
|
366
|
+
};
|
367
|
+
let mut serve = Box::pin(
|
368
|
+
binding
|
369
|
+
.timer(TokioTimer::new())
|
370
|
+
.header_read_timeout(Duration::from_secs(1))
|
371
|
+
.serve_connection_with_upgrades(io, service),
|
372
|
+
);
|
373
|
+
|
374
|
+
tokio::select! {
|
375
|
+
// Await the connection finishing naturally.
|
376
|
+
res = &mut serve => {
|
377
|
+
match res{
|
378
|
+
Ok(()) => {
|
379
|
+
debug!("Connection closed normally")
|
380
|
+
},
|
381
|
+
Err(res) => {
|
382
|
+
debug!("Connection closed abruptly: {:?}", res)
|
383
|
+
}
|
384
|
+
}
|
385
|
+
serve.as_mut().graceful_shutdown();
|
386
|
+
},
|
387
|
+
// A lifecycle event triggers shutdown.
|
388
|
+
_ = shutdown_channel_clone.changed() => {
|
389
|
+
// Initiate graceful shutdown.
|
390
|
+
serve.as_mut().graceful_shutdown();
|
391
|
+
|
392
|
+
// Now await the connection to finish shutting down.
|
393
|
+
if let Err(e) = serve.await {
|
394
|
+
debug!("Connection shutdown error: {:?}", e);
|
395
|
+
}
|
396
|
+
}
|
397
|
+
}
|
398
|
+
}
|
399
|
+
|
400
|
+
/// Attempts to reload the config "live"
|
401
|
+
/// Not that when running in single mode this will not unload
|
402
|
+
/// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
|
403
|
+
pub fn reload(&self) -> Result<()> {
|
404
|
+
let should_reexec = self.server_config.clone().reload(false)?;
|
405
|
+
if should_reexec {
|
406
|
+
self.server_config.dup_fds()?;
|
407
|
+
self.server_config.reload_exec()?;
|
408
|
+
}
|
409
|
+
self.restart_requested.store(true, Ordering::SeqCst);
|
410
|
+
self.stop()?;
|
411
|
+
self.server_config.server_params.read().preload_ruby()?;
|
412
|
+
Ok(())
|
413
|
+
}
|
414
|
+
|
415
|
+
/// Restart the server while keeping connections open.
|
416
|
+
pub fn restart(&self) -> Result<()> {
|
417
|
+
self.server_config.dup_fds()?;
|
418
|
+
self.server_config.reload_exec()?;
|
419
|
+
Ok(())
|
420
|
+
}
|
421
|
+
}
|
@@ -0,0 +1,76 @@
|
|
1
|
+
use std::sync::{
|
2
|
+
atomic::{AtomicBool, AtomicI8},
|
3
|
+
LazyLock,
|
4
|
+
};
|
5
|
+
|
6
|
+
use nix::libc::{self, sighandler_t};
|
7
|
+
use tokio::sync::{self, broadcast};
|
8
|
+
|
9
|
+
use super::lifecycle_event::LifecycleEvent;
|
10
|
+
|
11
|
+
pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
|
12
|
+
pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
|
13
|
+
pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
|
14
|
+
broadcast::Sender<LifecycleEvent>,
|
15
|
+
broadcast::Receiver<LifecycleEvent>,
|
16
|
+
)> = LazyLock::new(|| sync::broadcast::channel(5));
|
17
|
+
|
18
|
+
pub fn send_lifecycle_event(event: LifecycleEvent) {
|
19
|
+
SIGNAL_HANDLER_CHANNEL.0.send(event).ok();
|
20
|
+
}
|
21
|
+
|
22
|
+
fn receive_signal(signum: i32, _: sighandler_t) {
|
23
|
+
SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
|
24
|
+
let event = match signum {
|
25
|
+
libc::SIGTERM | libc::SIGINT => {
|
26
|
+
SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
|
27
|
+
SIGINT_COUNT.fetch_add(2, std::sync::atomic::Ordering::SeqCst);
|
28
|
+
if SIGINT_COUNT.load(std::sync::atomic::Ordering::SeqCst) < 2 {
|
29
|
+
Some(LifecycleEvent::Shutdown)
|
30
|
+
} else {
|
31
|
+
// Not messing about. Force shutdown.
|
32
|
+
Some(LifecycleEvent::ForceShutdown)
|
33
|
+
}
|
34
|
+
}
|
35
|
+
libc::SIGUSR2 => Some(LifecycleEvent::PrintInfo),
|
36
|
+
libc::SIGUSR1 => Some(LifecycleEvent::Restart),
|
37
|
+
libc::SIGHUP => Some(LifecycleEvent::Reload),
|
38
|
+
libc::SIGTTIN => Some(LifecycleEvent::IncreaseWorkers),
|
39
|
+
libc::SIGTTOU => Some(LifecycleEvent::DecreaseWorkers),
|
40
|
+
libc::SIGCHLD => Some(LifecycleEvent::ChildTerminated),
|
41
|
+
_ => None,
|
42
|
+
};
|
43
|
+
|
44
|
+
if let Some(event) = event {
|
45
|
+
send_lifecycle_event(event);
|
46
|
+
}
|
47
|
+
}
|
48
|
+
|
49
|
+
pub fn reset_signal_handlers() -> bool {
|
50
|
+
SIGINT_COUNT.store(0, std::sync::atomic::Ordering::SeqCst);
|
51
|
+
SHUTDOWN_REQUESTED.store(false, std::sync::atomic::Ordering::SeqCst);
|
52
|
+
unsafe {
|
53
|
+
libc::signal(libc::SIGTERM, receive_signal as usize);
|
54
|
+
libc::signal(libc::SIGINT, receive_signal as usize);
|
55
|
+
libc::signal(libc::SIGUSR2, receive_signal as usize);
|
56
|
+
libc::signal(libc::SIGUSR1, receive_signal as usize);
|
57
|
+
libc::signal(libc::SIGHUP, receive_signal as usize);
|
58
|
+
libc::signal(libc::SIGTTIN, receive_signal as usize);
|
59
|
+
libc::signal(libc::SIGTTOU, receive_signal as usize);
|
60
|
+
libc::signal(libc::SIGCHLD, receive_signal as usize);
|
61
|
+
}
|
62
|
+
true
|
63
|
+
}
|
64
|
+
|
65
|
+
pub fn clear_signal_handlers() {
|
66
|
+
unsafe {
|
67
|
+
libc::signal(libc::SIGTERM, libc::SIG_DFL);
|
68
|
+
libc::signal(libc::SIGINT, libc::SIG_DFL);
|
69
|
+
libc::signal(libc::SIGUSR2, libc::SIG_DFL);
|
70
|
+
libc::signal(libc::SIGUSR1, libc::SIG_DFL);
|
71
|
+
libc::signal(libc::SIGHUP, libc::SIG_DFL);
|
72
|
+
libc::signal(libc::SIGTTIN, libc::SIG_DFL);
|
73
|
+
libc::signal(libc::SIGTTOU, libc::SIG_DFL);
|
74
|
+
libc::signal(libc::SIGCHLD, libc::SIG_DFL);
|
75
|
+
}
|
76
|
+
}
|
@@ -0,0 +1,101 @@
|
|
1
|
+
use bytes::Buf;
|
2
|
+
use hyper::body::Body;
|
3
|
+
use hyper::body::Frame;
|
4
|
+
use hyper::body::SizeHint;
|
5
|
+
use std::error::Error;
|
6
|
+
use std::fmt;
|
7
|
+
use std::ops::Deref;
|
8
|
+
use std::pin::Pin;
|
9
|
+
use std::sync::atomic::AtomicUsize;
|
10
|
+
use std::sync::atomic::Ordering;
|
11
|
+
use std::task::Context;
|
12
|
+
use std::task::Poll;
|
13
|
+
|
14
|
+
/// Custom error to indicate that the maximum body size was exceeded.
|
15
|
+
#[derive(Debug)]
|
16
|
+
pub struct MaxBodySizeReached;
|
17
|
+
impl fmt::Display for MaxBodySizeReached {
|
18
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
19
|
+
write!(f, "Maximum body size reached")
|
20
|
+
}
|
21
|
+
}
|
22
|
+
|
23
|
+
impl Error for MaxBodySizeReached {}
|
24
|
+
|
25
|
+
#[derive(Debug)]
|
26
|
+
pub struct SizeLimitedIncoming<B> {
|
27
|
+
pub inner: B,
|
28
|
+
pub limit: AtomicUsize,
|
29
|
+
current: usize,
|
30
|
+
}
|
31
|
+
|
32
|
+
impl<B> Deref for SizeLimitedIncoming<B> {
|
33
|
+
type Target = B;
|
34
|
+
|
35
|
+
fn deref(&self) -> &Self::Target {
|
36
|
+
&self.inner
|
37
|
+
}
|
38
|
+
}
|
39
|
+
|
40
|
+
impl<B> SizeLimitedIncoming<B> {
|
41
|
+
pub fn new(inner: B) -> Self {
|
42
|
+
Self {
|
43
|
+
inner,
|
44
|
+
limit: AtomicUsize::new(usize::MAX),
|
45
|
+
current: 0,
|
46
|
+
}
|
47
|
+
}
|
48
|
+
}
|
49
|
+
|
50
|
+
impl<B> Body for SizeLimitedIncoming<B>
|
51
|
+
where
|
52
|
+
B: Body + Unpin,
|
53
|
+
B::Data: Buf,
|
54
|
+
// Ensure that the inner error converts into our boxed error type.
|
55
|
+
B::Error: Into<Box<dyn Error + Send + Sync>>,
|
56
|
+
{
|
57
|
+
type Data = B::Data;
|
58
|
+
type Error = Box<dyn Error + Send + Sync>;
|
59
|
+
|
60
|
+
fn poll_frame(
|
61
|
+
mut self: Pin<&mut Self>,
|
62
|
+
cx: &mut Context<'_>,
|
63
|
+
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
|
64
|
+
// Pin the inner body.
|
65
|
+
let inner = Pin::new(&mut self.inner);
|
66
|
+
match inner.poll_frame(cx) {
|
67
|
+
Poll::Ready(Some(Ok(frame))) => {
|
68
|
+
// Use public methods since we cannot match on the private enum.
|
69
|
+
if frame.is_data() {
|
70
|
+
match frame.into_data() {
|
71
|
+
Ok(data) => {
|
72
|
+
let len = data.remaining();
|
73
|
+
self.current += len;
|
74
|
+
if self.current > self.limit.load(Ordering::Relaxed) {
|
75
|
+
Poll::Ready(Some(Err(Box::new(MaxBodySizeReached))))
|
76
|
+
} else {
|
77
|
+
Poll::Ready(Some(Ok(Frame::data(data))))
|
78
|
+
}
|
79
|
+
}
|
80
|
+
// Should not occur if is_data() was true, but pass through if it does.
|
81
|
+
Err(frame) => Poll::Ready(Some(Ok(frame))),
|
82
|
+
}
|
83
|
+
} else {
|
84
|
+
// For non-data frames (e.g. trailers), just pass them along.
|
85
|
+
Poll::Ready(Some(Ok(frame)))
|
86
|
+
}
|
87
|
+
}
|
88
|
+
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
|
89
|
+
Poll::Ready(None) => Poll::Ready(None),
|
90
|
+
Poll::Pending => Poll::Pending,
|
91
|
+
}
|
92
|
+
}
|
93
|
+
|
94
|
+
fn is_end_stream(&self) -> bool {
|
95
|
+
self.inner.is_end_stream()
|
96
|
+
}
|
97
|
+
|
98
|
+
fn size_hint(&self) -> SizeHint {
|
99
|
+
self.inner.size_hint()
|
100
|
+
}
|
101
|
+
}
|