itsi-scheduler 0.1.5 → 0.1.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of itsi-scheduler might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CODE_OF_CONDUCT.md +7 -0
- data/Cargo.lock +90 -22
- data/README.md +5 -0
- data/_index.md +7 -0
- data/ext/itsi_error/Cargo.toml +1 -0
- data/ext/itsi_error/src/lib.rs +106 -7
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
- data/ext/itsi_rb_helpers/Cargo.toml +1 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
- data/ext/itsi_rb_helpers/src/lib.rs +59 -9
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +1 -1
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +72 -28
- data/ext/itsi_server/src/default_responses/mod.rs +11 -0
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +113 -75
- data/ext/itsi_server/src/prelude.rs +2 -0
- data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
- data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +29 -8
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +345 -0
- data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +84 -40
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +375 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +83 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
- data/ext/itsi_server/src/server/{bind.rs → binds/bind.rs} +56 -24
- data/ext/itsi_server/src/server/{listener.rs → binds/listener.rs} +218 -113
- data/ext/itsi_server/src/server/binds/mod.rs +4 -0
- data/ext/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +55 -17
- data/ext/itsi_server/src/server/{tls.rs → binds/tls.rs} +109 -28
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/http_message_types.rs +97 -0
- data/ext/itsi_server/src/server/io_stream.rs +2 -1
- data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +165 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +56 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +86 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +285 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +142 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +289 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +292 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +190 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +157 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +195 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +201 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +414 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +131 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +44 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +36 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +180 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +163 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +347 -0
- data/ext/itsi_server/src/server/mod.rs +6 -5
- data/ext/itsi_server/src/server/process_worker.rs +65 -14
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +137 -49
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +338 -164
- data/ext/itsi_server/src/server/signal.rs +32 -26
- data/ext/itsi_server/src/server/size_limited_incoming.rs +101 -0
- data/ext/itsi_server/src/server/thread_worker.rs +214 -107
- data/ext/itsi_server/src/services/cache_store.rs +74 -0
- data/ext/itsi_server/src/services/itsi_http_service.rs +239 -0
- data/ext/itsi_server/src/services/mime_types.rs +1416 -0
- data/ext/itsi_server/src/services/mod.rs +6 -0
- data/ext/itsi_server/src/services/password_hasher.rs +83 -0
- data/ext/itsi_server/src/services/rate_limiter.rs +569 -0
- data/ext/itsi_server/src/services/static_file_server.rs +1324 -0
- data/ext/itsi_tracing/Cargo.toml +1 -0
- data/ext/itsi_tracing/src/lib.rs +312 -34
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
- data/lib/itsi/scheduler/version.rb +1 -1
- data/lib/itsi/scheduler.rb +2 -2
- metadata +93 -21
- data/ext/itsi_error/src/from.rs +0 -71
- data/ext/itsi_server/extconf.rb +0 -6
- data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
- data/ext/itsi_server/src/request/itsi_request.rs +0 -277
- data/ext/itsi_server/src/request/mod.rs +0 -1
- data/ext/itsi_server/src/response/mod.rs +0 -1
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
- data/ext/itsi_server/src/server/itsi_server.rs +0 -244
- /data/ext/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
@@ -1,59 +1,63 @@
|
|
1
|
-
use std::sync::{
|
1
|
+
use std::sync::{
|
2
|
+
atomic::{AtomicBool, AtomicI8},
|
3
|
+
LazyLock,
|
4
|
+
};
|
2
5
|
|
3
6
|
use nix::libc::{self, sighandler_t};
|
4
7
|
use tokio::sync::{self, broadcast};
|
5
8
|
|
6
9
|
use super::lifecycle_event::LifecycleEvent;
|
7
10
|
|
11
|
+
pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
|
12
|
+
pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
|
8
13
|
pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
|
9
14
|
broadcast::Sender<LifecycleEvent>,
|
10
15
|
broadcast::Receiver<LifecycleEvent>,
|
11
16
|
)> = LazyLock::new(|| sync::broadcast::channel(5));
|
12
17
|
|
13
|
-
pub
|
18
|
+
pub fn send_lifecycle_event(event: LifecycleEvent) {
|
19
|
+
SIGNAL_HANDLER_CHANNEL.0.send(event).ok();
|
20
|
+
}
|
21
|
+
|
14
22
|
fn receive_signal(signum: i32, _: sighandler_t) {
|
15
23
|
SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
|
16
|
-
match signum {
|
24
|
+
let event = match signum {
|
17
25
|
libc::SIGTERM | libc::SIGINT => {
|
26
|
+
SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
|
18
27
|
SIGINT_COUNT.fetch_add(2, std::sync::atomic::Ordering::SeqCst);
|
19
28
|
if SIGINT_COUNT.load(std::sync::atomic::Ordering::SeqCst) < 2 {
|
20
|
-
|
29
|
+
Some(LifecycleEvent::Shutdown)
|
21
30
|
} else {
|
22
31
|
// Not messing about. Force shutdown.
|
23
|
-
|
24
|
-
.0
|
25
|
-
.send(LifecycleEvent::ForceShutdown)
|
26
|
-
.ok();
|
32
|
+
Some(LifecycleEvent::ForceShutdown)
|
27
33
|
}
|
28
34
|
}
|
29
|
-
libc::
|
30
|
-
|
31
|
-
|
32
|
-
libc::SIGTTIN =>
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
.0
|
41
|
-
.send(LifecycleEvent::DecreaseWorkers)
|
42
|
-
.ok();
|
43
|
-
}
|
44
|
-
_ => {}
|
35
|
+
libc::SIGUSR2 => Some(LifecycleEvent::PrintInfo),
|
36
|
+
libc::SIGUSR1 => Some(LifecycleEvent::Restart),
|
37
|
+
libc::SIGHUP => Some(LifecycleEvent::Reload),
|
38
|
+
libc::SIGTTIN => Some(LifecycleEvent::IncreaseWorkers),
|
39
|
+
libc::SIGTTOU => Some(LifecycleEvent::DecreaseWorkers),
|
40
|
+
libc::SIGCHLD => Some(LifecycleEvent::ChildTerminated),
|
41
|
+
_ => None,
|
42
|
+
};
|
43
|
+
|
44
|
+
if let Some(event) = event {
|
45
|
+
send_lifecycle_event(event);
|
45
46
|
}
|
46
47
|
}
|
47
48
|
|
48
49
|
pub fn reset_signal_handlers() -> bool {
|
49
50
|
SIGINT_COUNT.store(0, std::sync::atomic::Ordering::SeqCst);
|
51
|
+
SHUTDOWN_REQUESTED.store(false, std::sync::atomic::Ordering::SeqCst);
|
50
52
|
unsafe {
|
51
53
|
libc::signal(libc::SIGTERM, receive_signal as usize);
|
52
54
|
libc::signal(libc::SIGINT, receive_signal as usize);
|
53
|
-
libc::signal(libc::SIGUSR1, receive_signal as usize);
|
54
55
|
libc::signal(libc::SIGUSR2, receive_signal as usize);
|
56
|
+
libc::signal(libc::SIGUSR1, receive_signal as usize);
|
57
|
+
libc::signal(libc::SIGHUP, receive_signal as usize);
|
55
58
|
libc::signal(libc::SIGTTIN, receive_signal as usize);
|
56
59
|
libc::signal(libc::SIGTTOU, receive_signal as usize);
|
60
|
+
libc::signal(libc::SIGCHLD, receive_signal as usize);
|
57
61
|
}
|
58
62
|
true
|
59
63
|
}
|
@@ -62,9 +66,11 @@ pub fn clear_signal_handlers() {
|
|
62
66
|
unsafe {
|
63
67
|
libc::signal(libc::SIGTERM, libc::SIG_DFL);
|
64
68
|
libc::signal(libc::SIGINT, libc::SIG_DFL);
|
65
|
-
libc::signal(libc::SIGUSR1, libc::SIG_DFL);
|
66
69
|
libc::signal(libc::SIGUSR2, libc::SIG_DFL);
|
70
|
+
libc::signal(libc::SIGUSR1, libc::SIG_DFL);
|
71
|
+
libc::signal(libc::SIGHUP, libc::SIG_DFL);
|
67
72
|
libc::signal(libc::SIGTTIN, libc::SIG_DFL);
|
68
73
|
libc::signal(libc::SIGTTOU, libc::SIG_DFL);
|
74
|
+
libc::signal(libc::SIGCHLD, libc::SIG_DFL);
|
69
75
|
}
|
70
76
|
}
|
@@ -0,0 +1,101 @@
|
|
1
|
+
use bytes::Buf;
|
2
|
+
use hyper::body::Body;
|
3
|
+
use hyper::body::Frame;
|
4
|
+
use hyper::body::SizeHint;
|
5
|
+
use std::error::Error;
|
6
|
+
use std::fmt;
|
7
|
+
use std::ops::Deref;
|
8
|
+
use std::pin::Pin;
|
9
|
+
use std::sync::atomic::AtomicUsize;
|
10
|
+
use std::sync::atomic::Ordering;
|
11
|
+
use std::task::Context;
|
12
|
+
use std::task::Poll;
|
13
|
+
|
14
|
+
/// Custom error to indicate that the maximum body size was exceeded.
|
15
|
+
#[derive(Debug)]
|
16
|
+
pub struct MaxBodySizeReached;
|
17
|
+
impl fmt::Display for MaxBodySizeReached {
|
18
|
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
19
|
+
write!(f, "Maximum body size reached")
|
20
|
+
}
|
21
|
+
}
|
22
|
+
|
23
|
+
impl Error for MaxBodySizeReached {}
|
24
|
+
|
25
|
+
#[derive(Debug)]
|
26
|
+
pub struct SizeLimitedIncoming<B> {
|
27
|
+
pub inner: B,
|
28
|
+
pub limit: AtomicUsize,
|
29
|
+
current: usize,
|
30
|
+
}
|
31
|
+
|
32
|
+
impl<B> Deref for SizeLimitedIncoming<B> {
|
33
|
+
type Target = B;
|
34
|
+
|
35
|
+
fn deref(&self) -> &Self::Target {
|
36
|
+
&self.inner
|
37
|
+
}
|
38
|
+
}
|
39
|
+
|
40
|
+
impl<B> SizeLimitedIncoming<B> {
|
41
|
+
pub fn new(inner: B) -> Self {
|
42
|
+
Self {
|
43
|
+
inner,
|
44
|
+
limit: AtomicUsize::new(usize::MAX),
|
45
|
+
current: 0,
|
46
|
+
}
|
47
|
+
}
|
48
|
+
}
|
49
|
+
|
50
|
+
impl<B> Body for SizeLimitedIncoming<B>
|
51
|
+
where
|
52
|
+
B: Body + Unpin,
|
53
|
+
B::Data: Buf,
|
54
|
+
// Ensure that the inner error converts into our boxed error type.
|
55
|
+
B::Error: Into<Box<dyn Error + Send + Sync>>,
|
56
|
+
{
|
57
|
+
type Data = B::Data;
|
58
|
+
type Error = Box<dyn Error + Send + Sync>;
|
59
|
+
|
60
|
+
fn poll_frame(
|
61
|
+
mut self: Pin<&mut Self>,
|
62
|
+
cx: &mut Context<'_>,
|
63
|
+
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
|
64
|
+
// Pin the inner body.
|
65
|
+
let inner = Pin::new(&mut self.inner);
|
66
|
+
match inner.poll_frame(cx) {
|
67
|
+
Poll::Ready(Some(Ok(frame))) => {
|
68
|
+
// Use public methods since we cannot match on the private enum.
|
69
|
+
if frame.is_data() {
|
70
|
+
match frame.into_data() {
|
71
|
+
Ok(data) => {
|
72
|
+
let len = data.remaining();
|
73
|
+
self.current += len;
|
74
|
+
if self.current > self.limit.load(Ordering::Relaxed) {
|
75
|
+
Poll::Ready(Some(Err(Box::new(MaxBodySizeReached))))
|
76
|
+
} else {
|
77
|
+
Poll::Ready(Some(Ok(Frame::data(data))))
|
78
|
+
}
|
79
|
+
}
|
80
|
+
// Should not occur if is_data() was true, but pass through if it does.
|
81
|
+
Err(frame) => Poll::Ready(Some(Ok(frame))),
|
82
|
+
}
|
83
|
+
} else {
|
84
|
+
// For non-data frames (e.g. trailers), just pass them along.
|
85
|
+
Poll::Ready(Some(Ok(frame)))
|
86
|
+
}
|
87
|
+
}
|
88
|
+
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
|
89
|
+
Poll::Ready(None) => Poll::Ready(None),
|
90
|
+
Poll::Pending => Poll::Pending,
|
91
|
+
}
|
92
|
+
}
|
93
|
+
|
94
|
+
fn is_end_stream(&self) -> bool {
|
95
|
+
self.inner.is_end_stream()
|
96
|
+
}
|
97
|
+
|
98
|
+
fn size_hint(&self) -> SizeHint {
|
99
|
+
self.inner.size_hint()
|
100
|
+
}
|
101
|
+
}
|
@@ -1,9 +1,8 @@
|
|
1
|
-
use
|
2
|
-
use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
|
1
|
+
use async_channel::Sender;
|
3
2
|
use itsi_rb_helpers::{
|
4
3
|
call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
|
5
4
|
};
|
6
|
-
use itsi_tracing::{debug, error,
|
5
|
+
use itsi_tracing::{debug, error, warn};
|
7
6
|
use magnus::{
|
8
7
|
error::Result,
|
9
8
|
value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
|
@@ -12,28 +11,36 @@ use magnus::{
|
|
12
11
|
use nix::unistd::Pid;
|
13
12
|
use parking_lot::{Mutex, RwLock};
|
14
13
|
use std::{
|
15
|
-
num::NonZeroU8,
|
16
14
|
ops::Deref,
|
17
15
|
sync::{
|
18
|
-
atomic::{AtomicBool, Ordering},
|
16
|
+
atomic::{AtomicBool, AtomicU64, Ordering},
|
19
17
|
Arc,
|
20
18
|
},
|
21
19
|
thread,
|
22
|
-
time::{Duration, Instant},
|
20
|
+
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
23
21
|
};
|
24
22
|
use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
|
25
23
|
use tracing::instrument;
|
24
|
+
|
25
|
+
use crate::ruby_types::{
|
26
|
+
itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
|
27
|
+
itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
|
28
|
+
};
|
29
|
+
|
30
|
+
use super::request_job::RequestJob;
|
26
31
|
pub struct ThreadWorker {
|
27
|
-
pub
|
28
|
-
pub
|
32
|
+
pub params: Arc<ServerParams>,
|
33
|
+
pub id: u8,
|
34
|
+
pub name: String,
|
35
|
+
pub request_id: AtomicU64,
|
36
|
+
pub current_request_start: AtomicU64,
|
29
37
|
pub receiver: Arc<async_channel::Receiver<RequestJob>>,
|
30
|
-
pub sender:
|
38
|
+
pub sender: Sender<RequestJob>,
|
31
39
|
pub thread: RwLock<Option<HeapValue<Thread>>>,
|
32
40
|
pub terminated: Arc<AtomicBool>,
|
33
41
|
pub scheduler_class: Option<Opaque<Value>>,
|
34
42
|
}
|
35
43
|
|
36
|
-
static ID_CALL: LazyId = LazyId::new("call");
|
37
44
|
static ID_ALIVE: LazyId = LazyId::new("alive?");
|
38
45
|
static ID_SCHEDULER: LazyId = LazyId::new("scheduler");
|
39
46
|
static ID_SCHEDULE: LazyId = LazyId::new("schedule");
|
@@ -47,47 +54,71 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
|
|
47
54
|
});
|
48
55
|
|
49
56
|
pub struct TerminateWakerSignal(bool);
|
57
|
+
type ThreadWorkerBuildResult = Result<(
|
58
|
+
Arc<Vec<Arc<ThreadWorker>>>,
|
59
|
+
Sender<RequestJob>,
|
60
|
+
Sender<RequestJob>,
|
61
|
+
)>;
|
62
|
+
|
63
|
+
#[instrument(name = "boot", parent=None, skip(params, pid))]
|
64
|
+
pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorkerBuildResult {
|
65
|
+
let blocking_thread_count = params.threads;
|
66
|
+
let nonblocking_thread_count = params.scheduler_threads;
|
67
|
+
|
68
|
+
let (blocking_sender, blocking_receiver) =
|
69
|
+
async_channel::bounded((blocking_thread_count as u16 * 30) as usize);
|
70
|
+
let blocking_receiver_ref = Arc::new(blocking_receiver);
|
71
|
+
let blocking_sender_ref = blocking_sender;
|
72
|
+
let scheduler_class = load_scheduler_class(params.scheduler_class.clone())?;
|
73
|
+
|
74
|
+
let mut workers = (1..=blocking_thread_count)
|
75
|
+
.map(|id| {
|
76
|
+
ThreadWorker::new(
|
77
|
+
params.clone(),
|
78
|
+
id,
|
79
|
+
format!("{:?}#{:?}", pid, id),
|
80
|
+
blocking_receiver_ref.clone(),
|
81
|
+
blocking_sender_ref.clone(),
|
82
|
+
if nonblocking_thread_count.is_some() {
|
83
|
+
None
|
84
|
+
} else {
|
85
|
+
scheduler_class
|
86
|
+
},
|
87
|
+
)
|
88
|
+
})
|
89
|
+
.collect::<Result<Vec<_>>>()?;
|
90
|
+
|
91
|
+
let nonblocking_sender_ref = if let (Some(nonblocking_thread_count), Some(scheduler_class)) =
|
92
|
+
(nonblocking_thread_count, scheduler_class)
|
93
|
+
{
|
94
|
+
let (nonblocking_sender, nonblocking_receiver) =
|
95
|
+
async_channel::bounded((nonblocking_thread_count as u16 * 30) as usize);
|
96
|
+
let nonblocking_receiver_ref = Arc::new(nonblocking_receiver);
|
97
|
+
let nonblocking_sender_ref = nonblocking_sender.clone();
|
98
|
+
for id in 0..nonblocking_thread_count {
|
99
|
+
workers.push(ThreadWorker::new(
|
100
|
+
params.clone(),
|
101
|
+
id,
|
102
|
+
format!("{:?}#{:?}", pid, id),
|
103
|
+
nonblocking_receiver_ref.clone(),
|
104
|
+
nonblocking_sender_ref.clone(),
|
105
|
+
Some(scheduler_class),
|
106
|
+
)?)
|
107
|
+
}
|
108
|
+
nonblocking_sender
|
109
|
+
} else {
|
110
|
+
blocking_sender_ref.clone()
|
111
|
+
};
|
50
112
|
|
51
|
-
#[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
|
52
|
-
pub fn build_thread_workers(
|
53
|
-
pid: Pid,
|
54
|
-
threads: NonZeroU8,
|
55
|
-
app: Opaque<Value>,
|
56
|
-
scheduler_class: Option<String>,
|
57
|
-
) -> Result<(Arc<Vec<ThreadWorker>>, async_channel::Sender<RequestJob>)> {
|
58
|
-
let (sender, receiver) = async_channel::bounded(20);
|
59
|
-
let receiver_ref = Arc::new(receiver);
|
60
|
-
let sender_ref = sender;
|
61
|
-
let (app, scheduler_class) = load_app(app, scheduler_class)?;
|
62
113
|
Ok((
|
63
|
-
Arc::new(
|
64
|
-
|
65
|
-
|
66
|
-
info!(pid = pid.as_raw(), id, "Thread");
|
67
|
-
ThreadWorker::new(
|
68
|
-
format!("{:?}#{:?}", pid, id),
|
69
|
-
app,
|
70
|
-
receiver_ref.clone(),
|
71
|
-
sender_ref.clone(),
|
72
|
-
scheduler_class,
|
73
|
-
)
|
74
|
-
})
|
75
|
-
.collect::<Result<Vec<_>>>()?,
|
76
|
-
),
|
77
|
-
sender_ref,
|
114
|
+
Arc::new(workers),
|
115
|
+
blocking_sender_ref,
|
116
|
+
nonblocking_sender_ref,
|
78
117
|
))
|
79
118
|
}
|
80
119
|
|
81
|
-
pub fn
|
82
|
-
app: Opaque<Value>,
|
83
|
-
scheduler_class: Option<String>,
|
84
|
-
) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
|
120
|
+
pub fn load_scheduler_class(scheduler_class: Option<String>) -> Result<Option<Opaque<Value>>> {
|
85
121
|
call_with_gvl(|ruby| {
|
86
|
-
let app = app.get_inner_with(&ruby);
|
87
|
-
let app = Opaque::from(
|
88
|
-
app.funcall::<_, _, Value>(*ID_CALL, ())
|
89
|
-
.expect("Couldn't load app"),
|
90
|
-
);
|
91
122
|
let scheduler_class = if let Some(scheduler_class) = scheduler_class {
|
92
123
|
Some(Opaque::from(
|
93
124
|
ruby.module_kernel()
|
@@ -96,76 +127,75 @@ pub fn load_app(
|
|
96
127
|
} else {
|
97
128
|
None
|
98
129
|
};
|
99
|
-
Ok(
|
130
|
+
Ok(scheduler_class)
|
100
131
|
})
|
101
132
|
}
|
102
133
|
impl ThreadWorker {
|
103
134
|
pub fn new(
|
104
|
-
|
105
|
-
|
135
|
+
params: Arc<ServerParams>,
|
136
|
+
id: u8,
|
137
|
+
name: String,
|
106
138
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
107
|
-
sender:
|
139
|
+
sender: Sender<RequestJob>,
|
108
140
|
scheduler_class: Option<Opaque<Value>>,
|
109
|
-
) -> Result<Self
|
110
|
-
let
|
141
|
+
) -> Result<Arc<Self>> {
|
142
|
+
let worker = Arc::new(Self {
|
143
|
+
params,
|
111
144
|
id,
|
112
|
-
|
145
|
+
request_id: AtomicU64::new(0),
|
146
|
+
current_request_start: AtomicU64::new(0),
|
147
|
+
name,
|
113
148
|
receiver,
|
114
149
|
sender,
|
115
150
|
thread: RwLock::new(None),
|
116
151
|
terminated: Arc::new(AtomicBool::new(false)),
|
117
152
|
scheduler_class,
|
118
|
-
};
|
119
|
-
worker.run()?;
|
153
|
+
});
|
154
|
+
worker.clone().run()?;
|
120
155
|
Ok(worker)
|
121
156
|
}
|
122
157
|
|
123
|
-
#[instrument(skip(self), fields(id = self.id))]
|
124
|
-
pub async fn request_shutdown(&self) {
|
125
|
-
match self.sender.send(RequestJob::Shutdown).await {
|
126
|
-
Ok(_) => {}
|
127
|
-
Err(err) => error!("Failed to send shutdown request: {}", err),
|
128
|
-
};
|
129
|
-
info!("Requesting shutdown");
|
130
|
-
}
|
131
|
-
|
132
158
|
#[instrument(skip(self, deadline), fields(id = self.id))]
|
133
159
|
pub fn poll_shutdown(&self, deadline: Instant) -> bool {
|
134
|
-
|
135
|
-
if
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
kill_threads(vec![thread.as_value()]);
|
140
|
-
}
|
141
|
-
if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
|
142
|
-
return true;
|
143
|
-
}
|
144
|
-
info!("Thread has shut down");
|
160
|
+
if let Some(thread) = self.thread.read().deref() {
|
161
|
+
if Instant::now() > deadline {
|
162
|
+
warn!("Worker shutdown timed out. Killing thread");
|
163
|
+
self.terminated.store(true, Ordering::SeqCst);
|
164
|
+
kill_threads(vec![thread.as_value()]);
|
145
165
|
}
|
146
|
-
|
166
|
+
debug!("Checking thread status");
|
167
|
+
if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
|
168
|
+
return true;
|
169
|
+
}
|
170
|
+
debug!("Thread has shut down");
|
171
|
+
}
|
172
|
+
self.thread.write().take();
|
147
173
|
|
148
|
-
|
149
|
-
})
|
174
|
+
false
|
150
175
|
}
|
151
176
|
|
152
|
-
pub fn run(
|
153
|
-
let
|
154
|
-
let app = self.app;
|
177
|
+
pub fn run(self: Arc<Self>) -> Result<()> {
|
178
|
+
let name = self.name.clone();
|
155
179
|
let receiver = self.receiver.clone();
|
156
180
|
let terminated = self.terminated.clone();
|
157
181
|
let scheduler_class = self.scheduler_class;
|
182
|
+
let params = self.params.clone();
|
183
|
+
let self_ref = self.clone();
|
158
184
|
call_with_gvl(|_| {
|
159
185
|
*self.thread.write() = Some(
|
160
186
|
create_ruby_thread(move || {
|
161
187
|
if let Some(scheduler_class) = scheduler_class {
|
162
|
-
if let Err(err) =
|
163
|
-
|
164
|
-
|
188
|
+
if let Err(err) = self_ref.fiber_accept_loop(
|
189
|
+
params,
|
190
|
+
name,
|
191
|
+
receiver,
|
192
|
+
scheduler_class,
|
193
|
+
terminated,
|
194
|
+
) {
|
165
195
|
error!("Error in fiber_accept_loop: {:?}", err);
|
166
196
|
}
|
167
197
|
} else {
|
168
|
-
|
198
|
+
self_ref.accept_loop(params, name, receiver, terminated);
|
169
199
|
}
|
170
200
|
})
|
171
201
|
.into(),
|
@@ -176,11 +206,12 @@ impl ThreadWorker {
|
|
176
206
|
}
|
177
207
|
|
178
208
|
pub fn build_scheduler_proc(
|
179
|
-
|
209
|
+
self: Arc<Self>,
|
180
210
|
leader: &Arc<Mutex<Option<RequestJob>>>,
|
181
211
|
receiver: &Arc<async_channel::Receiver<RequestJob>>,
|
182
212
|
terminated: &Arc<AtomicBool>,
|
183
213
|
waker_sender: &watch::Sender<TerminateWakerSignal>,
|
214
|
+
oob_gc_responses_threshold: Option<u64>,
|
184
215
|
) -> magnus::block::Proc {
|
185
216
|
let leader = leader.clone();
|
186
217
|
let receiver = receiver.clone();
|
@@ -197,6 +228,7 @@ impl ThreadWorker {
|
|
197
228
|
let receiver = receiver.clone();
|
198
229
|
let terminated = terminated.clone();
|
199
230
|
let waker_sender = waker_sender.clone();
|
231
|
+
let self_ref = self.clone();
|
200
232
|
let mut batch = Vec::with_capacity(MAX_BATCH_SIZE as usize);
|
201
233
|
|
202
234
|
static MAX_BATCH_SIZE: i32 = 25;
|
@@ -204,8 +236,11 @@ impl ThreadWorker {
|
|
204
236
|
let mut idle_counter = 0;
|
205
237
|
if let Some(v) = leader_clone.lock().take() {
|
206
238
|
match v {
|
207
|
-
RequestJob::
|
208
|
-
batch.push(RequestJob::
|
239
|
+
RequestJob::ProcessHttpRequest(itsi_request, app_proc) => {
|
240
|
+
batch.push(RequestJob::ProcessHttpRequest(itsi_request, app_proc))
|
241
|
+
}
|
242
|
+
RequestJob::ProcessGrpcRequest(itsi_request, app_proc) => {
|
243
|
+
batch.push(RequestJob::ProcessGrpcRequest(itsi_request, app_proc))
|
209
244
|
}
|
210
245
|
RequestJob::Shutdown => {
|
211
246
|
waker_sender.send(TerminateWakerSignal(true)).unwrap();
|
@@ -224,12 +259,38 @@ impl ThreadWorker {
|
|
224
259
|
let shutdown_requested = call_with_gvl(|_| {
|
225
260
|
for req in batch.drain(..) {
|
226
261
|
match req {
|
227
|
-
RequestJob::
|
262
|
+
RequestJob::ProcessHttpRequest(request, app_proc) => {
|
263
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
264
|
+
self_ref.current_request_start.store(
|
265
|
+
SystemTime::now()
|
266
|
+
.duration_since(UNIX_EPOCH)
|
267
|
+
.unwrap()
|
268
|
+
.as_secs(),
|
269
|
+
Ordering::Relaxed,
|
270
|
+
);
|
228
271
|
let response = request.response.clone();
|
229
|
-
if let Err(err) =
|
230
|
-
|
231
|
-
|
232
|
-
|
272
|
+
if let Err(err) = server.funcall::<_, _, Value>(
|
273
|
+
*ID_SCHEDULE,
|
274
|
+
(app_proc.as_value(), request),
|
275
|
+
) {
|
276
|
+
ItsiHttpRequest::internal_error(ruby, response, err)
|
277
|
+
}
|
278
|
+
}
|
279
|
+
RequestJob::ProcessGrpcRequest(request, app_proc) => {
|
280
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
281
|
+
self_ref.current_request_start.store(
|
282
|
+
SystemTime::now()
|
283
|
+
.duration_since(UNIX_EPOCH)
|
284
|
+
.unwrap()
|
285
|
+
.as_secs(),
|
286
|
+
Ordering::Relaxed,
|
287
|
+
);
|
288
|
+
let response = request.stream.clone();
|
289
|
+
if let Err(err) = server.funcall::<_, _, Value>(
|
290
|
+
*ID_SCHEDULE,
|
291
|
+
(app_proc.as_value(), request),
|
292
|
+
) {
|
293
|
+
ItsiGrpcCall::internal_error(ruby, response, err)
|
233
294
|
}
|
234
295
|
}
|
235
296
|
RequestJob::Shutdown => return true,
|
@@ -244,10 +305,15 @@ impl ThreadWorker {
|
|
244
305
|
}
|
245
306
|
|
246
307
|
let yield_result = if receiver.is_empty() {
|
308
|
+
let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
|
309
|
+
idle_counter = (idle_counter + 1) % oob_gc_threshold;
|
310
|
+
idle_counter == 0
|
311
|
+
} else {
|
312
|
+
false
|
313
|
+
};
|
247
314
|
waker_sender.send(TerminateWakerSignal(false)).unwrap();
|
248
|
-
idle_counter = (idle_counter + 1) % 100;
|
249
315
|
call_with_gvl(|ruby| {
|
250
|
-
if
|
316
|
+
if should_gc {
|
251
317
|
ruby.gc_start();
|
252
318
|
}
|
253
319
|
scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
|
@@ -263,10 +329,11 @@ impl ThreadWorker {
|
|
263
329
|
})
|
264
330
|
}
|
265
331
|
|
266
|
-
#[instrument(skip_all, fields(thread_worker=
|
332
|
+
#[instrument(skip_all, fields(thread_worker=name))]
|
267
333
|
pub fn fiber_accept_loop(
|
268
|
-
|
269
|
-
|
334
|
+
self: Arc<Self>,
|
335
|
+
params: Arc<ServerParams>,
|
336
|
+
name: String,
|
270
337
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
271
338
|
scheduler_class: Opaque<Value>,
|
272
339
|
terminated: Arc<AtomicBool>,
|
@@ -274,10 +341,15 @@ impl ThreadWorker {
|
|
274
341
|
let ruby = Ruby::get().unwrap();
|
275
342
|
let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
|
276
343
|
let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
|
277
|
-
let
|
278
|
-
let scheduler_proc =
|
279
|
-
|
280
|
-
|
344
|
+
let server_class = ruby.get_inner(&ITSI_SERVER);
|
345
|
+
let scheduler_proc = self.build_scheduler_proc(
|
346
|
+
&leader,
|
347
|
+
&receiver,
|
348
|
+
&terminated,
|
349
|
+
&waker_sender,
|
350
|
+
params.oob_gc_responses_threshold,
|
351
|
+
);
|
352
|
+
let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
|
281
353
|
"start_scheduler_loop",
|
282
354
|
(scheduler_class, scheduler_proc),
|
283
355
|
)?;
|
@@ -338,25 +410,60 @@ impl ThreadWorker {
|
|
338
410
|
|
339
411
|
#[instrument(skip_all, fields(thread_worker=id))]
|
340
412
|
pub fn accept_loop(
|
413
|
+
self: Arc<Self>,
|
414
|
+
params: Arc<ServerParams>,
|
341
415
|
id: String,
|
342
|
-
app: Opaque<Value>,
|
343
416
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
344
417
|
terminated: Arc<AtomicBool>,
|
345
418
|
) {
|
346
419
|
let ruby = Ruby::get().unwrap();
|
347
|
-
let
|
420
|
+
let mut idle_counter = 0;
|
421
|
+
let self_ref = self.clone();
|
348
422
|
call_without_gvl(|| loop {
|
423
|
+
if receiver.is_empty() {
|
424
|
+
if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
|
425
|
+
idle_counter = (idle_counter + 1) % oob_gc_threshold;
|
426
|
+
if idle_counter == 0 {
|
427
|
+
call_with_gvl(|_ruby| {
|
428
|
+
ruby.gc_start();
|
429
|
+
});
|
430
|
+
}
|
431
|
+
};
|
432
|
+
}
|
349
433
|
match receiver.recv_blocking() {
|
350
|
-
Ok(RequestJob::
|
434
|
+
Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
|
435
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
436
|
+
self_ref.current_request_start.store(
|
437
|
+
SystemTime::now()
|
438
|
+
.duration_since(UNIX_EPOCH)
|
439
|
+
.unwrap()
|
440
|
+
.as_secs(),
|
441
|
+
Ordering::Relaxed,
|
442
|
+
);
|
443
|
+
call_with_gvl(|_ruby| {
|
444
|
+
request.process(&ruby, app_proc).ok();
|
445
|
+
});
|
351
446
|
if terminated.load(Ordering::Relaxed) {
|
352
447
|
break;
|
353
448
|
}
|
449
|
+
}
|
450
|
+
Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
|
451
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
452
|
+
self_ref.current_request_start.store(
|
453
|
+
SystemTime::now()
|
454
|
+
.duration_since(UNIX_EPOCH)
|
455
|
+
.unwrap()
|
456
|
+
.as_secs(),
|
457
|
+
Ordering::Relaxed,
|
458
|
+
);
|
354
459
|
call_with_gvl(|_ruby| {
|
355
|
-
request.process(&ruby,
|
356
|
-
})
|
460
|
+
request.process(&ruby, app_proc).ok();
|
461
|
+
});
|
462
|
+
if terminated.load(Ordering::Relaxed) {
|
463
|
+
break;
|
464
|
+
}
|
357
465
|
}
|
358
466
|
Ok(RequestJob::Shutdown) => {
|
359
|
-
debug!("Shutting down thread worker");
|
360
467
|
break;
|
361
468
|
}
|
362
469
|
Err(_) => {
|