itsi-scheduler 0.1.5 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +120 -52
- data/README.md +57 -24
- data/Rakefile +0 -4
- data/ext/itsi_acme/Cargo.toml +86 -0
- data/ext/itsi_acme/examples/high_level.rs +63 -0
- data/ext/itsi_acme/examples/high_level_warp.rs +52 -0
- data/ext/itsi_acme/examples/low_level.rs +87 -0
- data/ext/itsi_acme/examples/low_level_axum.rs +66 -0
- data/ext/itsi_acme/src/acceptor.rs +81 -0
- data/ext/itsi_acme/src/acme.rs +354 -0
- data/ext/itsi_acme/src/axum.rs +86 -0
- data/ext/itsi_acme/src/cache.rs +39 -0
- data/ext/itsi_acme/src/caches/boxed.rs +80 -0
- data/ext/itsi_acme/src/caches/composite.rs +69 -0
- data/ext/itsi_acme/src/caches/dir.rs +106 -0
- data/ext/itsi_acme/src/caches/mod.rs +11 -0
- data/ext/itsi_acme/src/caches/no.rs +78 -0
- data/ext/itsi_acme/src/caches/test.rs +136 -0
- data/ext/itsi_acme/src/config.rs +172 -0
- data/ext/itsi_acme/src/https_helper.rs +69 -0
- data/ext/itsi_acme/src/incoming.rs +142 -0
- data/ext/itsi_acme/src/jose.rs +161 -0
- data/ext/itsi_acme/src/lib.rs +142 -0
- data/ext/itsi_acme/src/resolver.rs +59 -0
- data/ext/itsi_acme/src/state.rs +424 -0
- data/ext/itsi_error/Cargo.toml +1 -0
- data/ext/itsi_error/src/lib.rs +106 -7
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
- data/ext/itsi_rb_helpers/Cargo.toml +1 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
- data/ext/itsi_rb_helpers/src/lib.rs +63 -12
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
- data/ext/itsi_scheduler/Cargo.toml +1 -1
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +9 -3
- data/ext/itsi_scheduler/src/lib.rs +1 -0
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +73 -29
- data/ext/itsi_server/src/default_responses/mod.rs +11 -0
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +114 -75
- data/ext/itsi_server/src/prelude.rs +2 -0
- data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
- data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +29 -8
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +362 -0
- data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +84 -40
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +233 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +565 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +86 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
- data/ext/itsi_server/src/server/{bind.rs → binds/bind.rs} +59 -24
- data/ext/itsi_server/src/server/binds/listener.rs +444 -0
- data/ext/itsi_server/src/server/binds/mod.rs +4 -0
- data/ext/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +57 -19
- data/ext/itsi_server/src/server/{tls.rs → binds/tls.rs} +120 -31
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/http_message_types.rs +97 -0
- data/ext/itsi_server/src/server/io_stream.rs +2 -1
- data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +170 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +63 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +94 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +94 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +343 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +151 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +316 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +301 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +193 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +64 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +192 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +171 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +198 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +209 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +116 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +411 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +142 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +54 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +51 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +187 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +173 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +31 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +381 -0
- data/ext/itsi_server/src/server/mod.rs +7 -5
- data/ext/itsi_server/src/server/process_worker.rs +65 -14
- data/ext/itsi_server/src/server/redirect_type.rs +26 -0
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +150 -50
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +399 -165
- data/ext/itsi_server/src/server/signal.rs +33 -26
- data/ext/itsi_server/src/server/size_limited_incoming.rs +107 -0
- data/ext/itsi_server/src/server/thread_worker.rs +218 -107
- data/ext/itsi_server/src/services/cache_store.rs +74 -0
- data/ext/itsi_server/src/services/itsi_http_service.rs +257 -0
- data/ext/itsi_server/src/services/mime_types.rs +1416 -0
- data/ext/itsi_server/src/services/mod.rs +6 -0
- data/ext/itsi_server/src/services/password_hasher.rs +83 -0
- data/ext/itsi_server/src/services/rate_limiter.rs +580 -0
- data/ext/itsi_server/src/services/static_file_server.rs +1340 -0
- data/ext/itsi_tracing/Cargo.toml +1 -0
- data/ext/itsi_tracing/src/lib.rs +362 -33
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
- data/itsi-scheduler-100.png +0 -0
- data/lib/itsi/scheduler/version.rb +1 -1
- data/lib/itsi/scheduler.rb +11 -6
- metadata +117 -24
- data/CHANGELOG.md +0 -5
- data/CODE_OF_CONDUCT.md +0 -132
- data/LICENSE.txt +0 -21
- data/ext/itsi_error/src/from.rs +0 -71
- data/ext/itsi_server/extconf.rb +0 -6
- data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
- data/ext/itsi_server/src/request/itsi_request.rs +0 -277
- data/ext/itsi_server/src/request/mod.rs +0 -1
- data/ext/itsi_server/src/response/mod.rs +0 -1
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
- data/ext/itsi_server/src/server/itsi_server.rs +0 -244
- data/ext/itsi_server/src/server/listener.rs +0 -327
- /data/ext/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
@@ -1,9 +1,9 @@
|
|
1
|
-
use
|
2
|
-
use
|
1
|
+
use async_channel::Sender;
|
2
|
+
use itsi_error::ItsiError;
|
3
3
|
use itsi_rb_helpers::{
|
4
4
|
call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
|
5
5
|
};
|
6
|
-
use itsi_tracing::{debug, error,
|
6
|
+
use itsi_tracing::{debug, error, warn};
|
7
7
|
use magnus::{
|
8
8
|
error::Result,
|
9
9
|
value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
|
@@ -12,28 +12,36 @@ use magnus::{
|
|
12
12
|
use nix::unistd::Pid;
|
13
13
|
use parking_lot::{Mutex, RwLock};
|
14
14
|
use std::{
|
15
|
-
num::NonZeroU8,
|
16
15
|
ops::Deref,
|
17
16
|
sync::{
|
18
|
-
atomic::{AtomicBool, Ordering},
|
17
|
+
atomic::{AtomicBool, AtomicU64, Ordering},
|
19
18
|
Arc,
|
20
19
|
},
|
21
20
|
thread,
|
22
|
-
time::{Duration, Instant},
|
21
|
+
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
23
22
|
};
|
24
23
|
use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
|
25
24
|
use tracing::instrument;
|
25
|
+
|
26
|
+
use crate::ruby_types::{
|
27
|
+
itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
|
28
|
+
itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
|
29
|
+
};
|
30
|
+
|
31
|
+
use super::request_job::RequestJob;
|
26
32
|
pub struct ThreadWorker {
|
27
|
-
pub
|
28
|
-
pub
|
33
|
+
pub params: Arc<ServerParams>,
|
34
|
+
pub id: u8,
|
35
|
+
pub name: String,
|
36
|
+
pub request_id: AtomicU64,
|
37
|
+
pub current_request_start: AtomicU64,
|
29
38
|
pub receiver: Arc<async_channel::Receiver<RequestJob>>,
|
30
|
-
pub sender:
|
39
|
+
pub sender: Sender<RequestJob>,
|
31
40
|
pub thread: RwLock<Option<HeapValue<Thread>>>,
|
32
41
|
pub terminated: Arc<AtomicBool>,
|
33
42
|
pub scheduler_class: Option<Opaque<Value>>,
|
34
43
|
}
|
35
44
|
|
36
|
-
static ID_CALL: LazyId = LazyId::new("call");
|
37
45
|
static ID_ALIVE: LazyId = LazyId::new("alive?");
|
38
46
|
static ID_SCHEDULER: LazyId = LazyId::new("scheduler");
|
39
47
|
static ID_SCHEDULE: LazyId = LazyId::new("schedule");
|
@@ -47,47 +55,71 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
|
|
47
55
|
});
|
48
56
|
|
49
57
|
pub struct TerminateWakerSignal(bool);
|
58
|
+
type ThreadWorkerBuildResult = Result<(
|
59
|
+
Arc<Vec<Arc<ThreadWorker>>>,
|
60
|
+
Sender<RequestJob>,
|
61
|
+
Sender<RequestJob>,
|
62
|
+
)>;
|
63
|
+
|
64
|
+
#[instrument(name = "boot", parent=None, skip(params, pid))]
|
65
|
+
pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorkerBuildResult {
|
66
|
+
let blocking_thread_count = params.threads;
|
67
|
+
let nonblocking_thread_count = params.scheduler_threads;
|
68
|
+
|
69
|
+
let (blocking_sender, blocking_receiver) =
|
70
|
+
async_channel::bounded((blocking_thread_count as u16 * 30) as usize);
|
71
|
+
let blocking_receiver_ref = Arc::new(blocking_receiver);
|
72
|
+
let blocking_sender_ref = blocking_sender;
|
73
|
+
let scheduler_class = load_scheduler_class(params.scheduler_class.clone())?;
|
74
|
+
|
75
|
+
let mut workers = (1..=blocking_thread_count)
|
76
|
+
.map(|id| {
|
77
|
+
ThreadWorker::new(
|
78
|
+
params.clone(),
|
79
|
+
id,
|
80
|
+
format!("{:?}#{:?}", pid, id),
|
81
|
+
blocking_receiver_ref.clone(),
|
82
|
+
blocking_sender_ref.clone(),
|
83
|
+
if nonblocking_thread_count.is_some() {
|
84
|
+
None
|
85
|
+
} else {
|
86
|
+
scheduler_class
|
87
|
+
},
|
88
|
+
)
|
89
|
+
})
|
90
|
+
.collect::<Result<Vec<_>>>()?;
|
91
|
+
|
92
|
+
let nonblocking_sender_ref = if let (Some(nonblocking_thread_count), Some(scheduler_class)) =
|
93
|
+
(nonblocking_thread_count, scheduler_class)
|
94
|
+
{
|
95
|
+
let (nonblocking_sender, nonblocking_receiver) =
|
96
|
+
async_channel::bounded((nonblocking_thread_count as u16 * 30) as usize);
|
97
|
+
let nonblocking_receiver_ref = Arc::new(nonblocking_receiver);
|
98
|
+
let nonblocking_sender_ref = nonblocking_sender.clone();
|
99
|
+
for id in 0..nonblocking_thread_count {
|
100
|
+
workers.push(ThreadWorker::new(
|
101
|
+
params.clone(),
|
102
|
+
id,
|
103
|
+
format!("{:?}#{:?}", pid, id),
|
104
|
+
nonblocking_receiver_ref.clone(),
|
105
|
+
nonblocking_sender_ref.clone(),
|
106
|
+
Some(scheduler_class),
|
107
|
+
)?)
|
108
|
+
}
|
109
|
+
nonblocking_sender
|
110
|
+
} else {
|
111
|
+
blocking_sender_ref.clone()
|
112
|
+
};
|
50
113
|
|
51
|
-
#[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
|
52
|
-
pub fn build_thread_workers(
|
53
|
-
pid: Pid,
|
54
|
-
threads: NonZeroU8,
|
55
|
-
app: Opaque<Value>,
|
56
|
-
scheduler_class: Option<String>,
|
57
|
-
) -> Result<(Arc<Vec<ThreadWorker>>, async_channel::Sender<RequestJob>)> {
|
58
|
-
let (sender, receiver) = async_channel::bounded(20);
|
59
|
-
let receiver_ref = Arc::new(receiver);
|
60
|
-
let sender_ref = sender;
|
61
|
-
let (app, scheduler_class) = load_app(app, scheduler_class)?;
|
62
114
|
Ok((
|
63
|
-
Arc::new(
|
64
|
-
|
65
|
-
|
66
|
-
info!(pid = pid.as_raw(), id, "Thread");
|
67
|
-
ThreadWorker::new(
|
68
|
-
format!("{:?}#{:?}", pid, id),
|
69
|
-
app,
|
70
|
-
receiver_ref.clone(),
|
71
|
-
sender_ref.clone(),
|
72
|
-
scheduler_class,
|
73
|
-
)
|
74
|
-
})
|
75
|
-
.collect::<Result<Vec<_>>>()?,
|
76
|
-
),
|
77
|
-
sender_ref,
|
115
|
+
Arc::new(workers),
|
116
|
+
blocking_sender_ref,
|
117
|
+
nonblocking_sender_ref,
|
78
118
|
))
|
79
119
|
}
|
80
120
|
|
81
|
-
pub fn
|
82
|
-
app: Opaque<Value>,
|
83
|
-
scheduler_class: Option<String>,
|
84
|
-
) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
|
121
|
+
pub fn load_scheduler_class(scheduler_class: Option<String>) -> Result<Option<Opaque<Value>>> {
|
85
122
|
call_with_gvl(|ruby| {
|
86
|
-
let app = app.get_inner_with(&ruby);
|
87
|
-
let app = Opaque::from(
|
88
|
-
app.funcall::<_, _, Value>(*ID_CALL, ())
|
89
|
-
.expect("Couldn't load app"),
|
90
|
-
);
|
91
123
|
let scheduler_class = if let Some(scheduler_class) = scheduler_class {
|
92
124
|
Some(Opaque::from(
|
93
125
|
ruby.module_kernel()
|
@@ -96,78 +128,80 @@ pub fn load_app(
|
|
96
128
|
} else {
|
97
129
|
None
|
98
130
|
};
|
99
|
-
Ok(
|
131
|
+
Ok(scheduler_class)
|
100
132
|
})
|
101
133
|
}
|
102
134
|
impl ThreadWorker {
|
103
135
|
pub fn new(
|
104
|
-
|
105
|
-
|
136
|
+
params: Arc<ServerParams>,
|
137
|
+
id: u8,
|
138
|
+
name: String,
|
106
139
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
107
|
-
sender:
|
140
|
+
sender: Sender<RequestJob>,
|
108
141
|
scheduler_class: Option<Opaque<Value>>,
|
109
|
-
) -> Result<Self
|
110
|
-
let
|
142
|
+
) -> Result<Arc<Self>> {
|
143
|
+
let worker = Arc::new(Self {
|
144
|
+
params,
|
111
145
|
id,
|
112
|
-
|
146
|
+
request_id: AtomicU64::new(0),
|
147
|
+
current_request_start: AtomicU64::new(0),
|
148
|
+
name,
|
113
149
|
receiver,
|
114
150
|
sender,
|
115
151
|
thread: RwLock::new(None),
|
116
152
|
terminated: Arc::new(AtomicBool::new(false)),
|
117
153
|
scheduler_class,
|
118
|
-
};
|
119
|
-
worker.run()?;
|
154
|
+
});
|
155
|
+
worker.clone().run()?;
|
120
156
|
Ok(worker)
|
121
157
|
}
|
122
158
|
|
123
|
-
#[instrument(skip(self), fields(id = self.id))]
|
124
|
-
pub async fn request_shutdown(&self) {
|
125
|
-
match self.sender.send(RequestJob::Shutdown).await {
|
126
|
-
Ok(_) => {}
|
127
|
-
Err(err) => error!("Failed to send shutdown request: {}", err),
|
128
|
-
};
|
129
|
-
info!("Requesting shutdown");
|
130
|
-
}
|
131
|
-
|
132
159
|
#[instrument(skip(self, deadline), fields(id = self.id))]
|
133
160
|
pub fn poll_shutdown(&self, deadline: Instant) -> bool {
|
134
|
-
|
135
|
-
if
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
kill_threads(vec![thread.as_value()]);
|
140
|
-
}
|
141
|
-
if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
|
142
|
-
return true;
|
143
|
-
}
|
144
|
-
info!("Thread has shut down");
|
161
|
+
if let Some(thread) = self.thread.read().deref() {
|
162
|
+
if Instant::now() > deadline {
|
163
|
+
debug!("Worker shutdown timed out. Killing thread {:?}", thread);
|
164
|
+
self.terminated.store(true, Ordering::SeqCst);
|
165
|
+
kill_threads(vec![thread.as_value()]);
|
145
166
|
}
|
146
|
-
|
167
|
+
if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
|
168
|
+
return true;
|
169
|
+
}
|
170
|
+
debug!("Thread has shut down");
|
171
|
+
}
|
172
|
+
self.thread.write().take();
|
147
173
|
|
148
|
-
|
149
|
-
})
|
174
|
+
false
|
150
175
|
}
|
151
176
|
|
152
|
-
pub fn run(
|
153
|
-
let
|
154
|
-
let app = self.app;
|
177
|
+
pub fn run(self: Arc<Self>) -> Result<()> {
|
178
|
+
let name = self.name.clone();
|
155
179
|
let receiver = self.receiver.clone();
|
156
180
|
let terminated = self.terminated.clone();
|
157
181
|
let scheduler_class = self.scheduler_class;
|
182
|
+
let params = self.params.clone();
|
183
|
+
let self_ref = self.clone();
|
158
184
|
call_with_gvl(|_| {
|
159
185
|
*self.thread.write() = Some(
|
160
186
|
create_ruby_thread(move || {
|
187
|
+
debug!("Ruby thread worker started");
|
161
188
|
if let Some(scheduler_class) = scheduler_class {
|
162
|
-
if let Err(err) =
|
163
|
-
|
164
|
-
|
189
|
+
if let Err(err) = self_ref.fiber_accept_loop(
|
190
|
+
params,
|
191
|
+
name,
|
192
|
+
receiver,
|
193
|
+
scheduler_class,
|
194
|
+
terminated,
|
195
|
+
) {
|
165
196
|
error!("Error in fiber_accept_loop: {:?}", err);
|
166
197
|
}
|
167
198
|
} else {
|
168
|
-
|
199
|
+
self_ref.accept_loop(params, name, receiver, terminated);
|
169
200
|
}
|
170
201
|
})
|
202
|
+
.ok_or_else(|| {
|
203
|
+
ItsiError::InternalServerError("Failed to create Ruby thread".to_owned())
|
204
|
+
})?
|
171
205
|
.into(),
|
172
206
|
);
|
173
207
|
Ok::<(), magnus::Error>(())
|
@@ -176,11 +210,12 @@ impl ThreadWorker {
|
|
176
210
|
}
|
177
211
|
|
178
212
|
pub fn build_scheduler_proc(
|
179
|
-
|
213
|
+
self: Arc<Self>,
|
180
214
|
leader: &Arc<Mutex<Option<RequestJob>>>,
|
181
215
|
receiver: &Arc<async_channel::Receiver<RequestJob>>,
|
182
216
|
terminated: &Arc<AtomicBool>,
|
183
217
|
waker_sender: &watch::Sender<TerminateWakerSignal>,
|
218
|
+
oob_gc_responses_threshold: Option<u64>,
|
184
219
|
) -> magnus::block::Proc {
|
185
220
|
let leader = leader.clone();
|
186
221
|
let receiver = receiver.clone();
|
@@ -197,6 +232,7 @@ impl ThreadWorker {
|
|
197
232
|
let receiver = receiver.clone();
|
198
233
|
let terminated = terminated.clone();
|
199
234
|
let waker_sender = waker_sender.clone();
|
235
|
+
let self_ref = self.clone();
|
200
236
|
let mut batch = Vec::with_capacity(MAX_BATCH_SIZE as usize);
|
201
237
|
|
202
238
|
static MAX_BATCH_SIZE: i32 = 25;
|
@@ -204,8 +240,11 @@ impl ThreadWorker {
|
|
204
240
|
let mut idle_counter = 0;
|
205
241
|
if let Some(v) = leader_clone.lock().take() {
|
206
242
|
match v {
|
207
|
-
RequestJob::
|
208
|
-
batch.push(RequestJob::
|
243
|
+
RequestJob::ProcessHttpRequest(itsi_request, app_proc) => {
|
244
|
+
batch.push(RequestJob::ProcessHttpRequest(itsi_request, app_proc))
|
245
|
+
}
|
246
|
+
RequestJob::ProcessGrpcRequest(itsi_request, app_proc) => {
|
247
|
+
batch.push(RequestJob::ProcessGrpcRequest(itsi_request, app_proc))
|
209
248
|
}
|
210
249
|
RequestJob::Shutdown => {
|
211
250
|
waker_sender.send(TerminateWakerSignal(true)).unwrap();
|
@@ -224,12 +263,38 @@ impl ThreadWorker {
|
|
224
263
|
let shutdown_requested = call_with_gvl(|_| {
|
225
264
|
for req in batch.drain(..) {
|
226
265
|
match req {
|
227
|
-
RequestJob::
|
266
|
+
RequestJob::ProcessHttpRequest(request, app_proc) => {
|
267
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
268
|
+
self_ref.current_request_start.store(
|
269
|
+
SystemTime::now()
|
270
|
+
.duration_since(UNIX_EPOCH)
|
271
|
+
.unwrap()
|
272
|
+
.as_secs(),
|
273
|
+
Ordering::Relaxed,
|
274
|
+
);
|
228
275
|
let response = request.response.clone();
|
229
|
-
if let Err(err) =
|
230
|
-
|
231
|
-
|
232
|
-
|
276
|
+
if let Err(err) = server.funcall::<_, _, Value>(
|
277
|
+
*ID_SCHEDULE,
|
278
|
+
(app_proc.as_value(), request),
|
279
|
+
) {
|
280
|
+
ItsiHttpRequest::internal_error(ruby, response, err)
|
281
|
+
}
|
282
|
+
}
|
283
|
+
RequestJob::ProcessGrpcRequest(request, app_proc) => {
|
284
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
285
|
+
self_ref.current_request_start.store(
|
286
|
+
SystemTime::now()
|
287
|
+
.duration_since(UNIX_EPOCH)
|
288
|
+
.unwrap()
|
289
|
+
.as_secs(),
|
290
|
+
Ordering::Relaxed,
|
291
|
+
);
|
292
|
+
let response = request.stream.clone();
|
293
|
+
if let Err(err) = server.funcall::<_, _, Value>(
|
294
|
+
*ID_SCHEDULE,
|
295
|
+
(app_proc.as_value(), request),
|
296
|
+
) {
|
297
|
+
ItsiGrpcCall::internal_error(ruby, response, err)
|
233
298
|
}
|
234
299
|
}
|
235
300
|
RequestJob::Shutdown => return true,
|
@@ -244,10 +309,15 @@ impl ThreadWorker {
|
|
244
309
|
}
|
245
310
|
|
246
311
|
let yield_result = if receiver.is_empty() {
|
312
|
+
let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
|
313
|
+
idle_counter = (idle_counter + 1) % oob_gc_threshold;
|
314
|
+
idle_counter == 0
|
315
|
+
} else {
|
316
|
+
false
|
317
|
+
};
|
247
318
|
waker_sender.send(TerminateWakerSignal(false)).unwrap();
|
248
|
-
idle_counter = (idle_counter + 1) % 100;
|
249
319
|
call_with_gvl(|ruby| {
|
250
|
-
if
|
320
|
+
if should_gc {
|
251
321
|
ruby.gc_start();
|
252
322
|
}
|
253
323
|
scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
|
@@ -263,10 +333,11 @@ impl ThreadWorker {
|
|
263
333
|
})
|
264
334
|
}
|
265
335
|
|
266
|
-
#[instrument(skip_all, fields(thread_worker=
|
336
|
+
#[instrument(skip_all, fields(thread_worker=name))]
|
267
337
|
pub fn fiber_accept_loop(
|
268
|
-
|
269
|
-
|
338
|
+
self: Arc<Self>,
|
339
|
+
params: Arc<ServerParams>,
|
340
|
+
name: String,
|
270
341
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
271
342
|
scheduler_class: Opaque<Value>,
|
272
343
|
terminated: Arc<AtomicBool>,
|
@@ -274,10 +345,15 @@ impl ThreadWorker {
|
|
274
345
|
let ruby = Ruby::get().unwrap();
|
275
346
|
let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
|
276
347
|
let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
|
277
|
-
let
|
278
|
-
let scheduler_proc =
|
279
|
-
|
280
|
-
|
348
|
+
let server_class = ruby.get_inner(&ITSI_SERVER);
|
349
|
+
let scheduler_proc = self.build_scheduler_proc(
|
350
|
+
&leader,
|
351
|
+
&receiver,
|
352
|
+
&terminated,
|
353
|
+
&waker_sender,
|
354
|
+
params.oob_gc_responses_threshold,
|
355
|
+
);
|
356
|
+
let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
|
281
357
|
"start_scheduler_loop",
|
282
358
|
(scheduler_class, scheduler_proc),
|
283
359
|
)?;
|
@@ -338,25 +414,60 @@ impl ThreadWorker {
|
|
338
414
|
|
339
415
|
#[instrument(skip_all, fields(thread_worker=id))]
|
340
416
|
pub fn accept_loop(
|
417
|
+
self: Arc<Self>,
|
418
|
+
params: Arc<ServerParams>,
|
341
419
|
id: String,
|
342
|
-
app: Opaque<Value>,
|
343
420
|
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
344
421
|
terminated: Arc<AtomicBool>,
|
345
422
|
) {
|
346
423
|
let ruby = Ruby::get().unwrap();
|
347
|
-
let
|
424
|
+
let mut idle_counter = 0;
|
425
|
+
let self_ref = self.clone();
|
348
426
|
call_without_gvl(|| loop {
|
427
|
+
if receiver.is_empty() {
|
428
|
+
if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
|
429
|
+
idle_counter = (idle_counter + 1) % oob_gc_threshold;
|
430
|
+
if idle_counter == 0 {
|
431
|
+
call_with_gvl(|_ruby| {
|
432
|
+
ruby.gc_start();
|
433
|
+
});
|
434
|
+
}
|
435
|
+
};
|
436
|
+
}
|
349
437
|
match receiver.recv_blocking() {
|
350
|
-
Ok(RequestJob::
|
438
|
+
Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
|
439
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
440
|
+
self_ref.current_request_start.store(
|
441
|
+
SystemTime::now()
|
442
|
+
.duration_since(UNIX_EPOCH)
|
443
|
+
.unwrap()
|
444
|
+
.as_secs(),
|
445
|
+
Ordering::Relaxed,
|
446
|
+
);
|
447
|
+
call_with_gvl(|_ruby| {
|
448
|
+
request.process(&ruby, app_proc).ok();
|
449
|
+
});
|
351
450
|
if terminated.load(Ordering::Relaxed) {
|
352
451
|
break;
|
353
452
|
}
|
453
|
+
}
|
454
|
+
Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
|
455
|
+
self_ref.request_id.fetch_add(1, Ordering::Relaxed);
|
456
|
+
self_ref.current_request_start.store(
|
457
|
+
SystemTime::now()
|
458
|
+
.duration_since(UNIX_EPOCH)
|
459
|
+
.unwrap()
|
460
|
+
.as_secs(),
|
461
|
+
Ordering::Relaxed,
|
462
|
+
);
|
354
463
|
call_with_gvl(|_ruby| {
|
355
|
-
request.process(&ruby,
|
356
|
-
})
|
464
|
+
request.process(&ruby, app_proc).ok();
|
465
|
+
});
|
466
|
+
if terminated.load(Ordering::Relaxed) {
|
467
|
+
break;
|
468
|
+
}
|
357
469
|
}
|
358
470
|
Ok(RequestJob::Shutdown) => {
|
359
|
-
debug!("Shutting down thread worker");
|
360
471
|
break;
|
361
472
|
}
|
362
473
|
Err(_) => {
|
@@ -0,0 +1,74 @@
|
|
1
|
+
use async_trait::async_trait;
|
2
|
+
use redis::aio::ConnectionManager;
|
3
|
+
use redis::{Client, RedisError, Script};
|
4
|
+
use std::sync::Arc;
|
5
|
+
use std::time::Duration;
|
6
|
+
|
7
|
+
#[derive(Debug)]
|
8
|
+
pub enum CacheError {
|
9
|
+
RedisError(RedisError),
|
10
|
+
// Other error variants as needed.
|
11
|
+
}
|
12
|
+
/// A general-purpose cache trait with an atomic “increment with timeout” operation.
|
13
|
+
#[async_trait]
|
14
|
+
pub trait CacheStore: Send + Sync + std::fmt::Debug {
|
15
|
+
/// Increments the counter associated with `key` and sets (or extends) its expiration.
|
16
|
+
/// Returns the new counter value.
|
17
|
+
async fn increment(&self, key: &str, timeout: Duration) -> Result<u64, CacheError>;
|
18
|
+
}
|
19
|
+
|
20
|
+
/// A Redis-backed cache store using an async connection manager.
|
21
|
+
/// This uses a TLS-enabled connection when the URL is prefixed with "rediss://".
|
22
|
+
#[derive(Clone)]
|
23
|
+
pub struct RedisCacheStore {
|
24
|
+
connection: Arc<ConnectionManager>,
|
25
|
+
}
|
26
|
+
|
27
|
+
impl std::fmt::Debug for RedisCacheStore {
|
28
|
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
29
|
+
f.debug_struct("RedisCacheStore").finish()
|
30
|
+
}
|
31
|
+
}
|
32
|
+
|
33
|
+
impl RedisCacheStore {
|
34
|
+
/// Constructs a new RedisCacheStore.
|
35
|
+
///
|
36
|
+
/// Use a connection URL like "rediss://host:port" to enable TLS (with rustls under the hood).
|
37
|
+
/// This constructor is async because it sets up the connection manager.
|
38
|
+
pub async fn new(connection_url: &str) -> Result<Self, CacheError> {
|
39
|
+
let client = Client::open(connection_url).map_err(CacheError::RedisError)?;
|
40
|
+
let connection_manager = ConnectionManager::new(client)
|
41
|
+
.await
|
42
|
+
.map_err(CacheError::RedisError)?;
|
43
|
+
Ok(Self {
|
44
|
+
connection: Arc::new(connection_manager),
|
45
|
+
})
|
46
|
+
}
|
47
|
+
}
|
48
|
+
|
49
|
+
#[async_trait]
|
50
|
+
impl CacheStore for RedisCacheStore {
|
51
|
+
async fn increment(&self, key: &str, timeout: Duration) -> Result<u64, CacheError> {
|
52
|
+
let timeout_secs = timeout.as_secs();
|
53
|
+
// Lua script to:
|
54
|
+
// 1. INCR the key.
|
55
|
+
// 2. If the key doesn't have a TTL, set it.
|
56
|
+
let script = r#"
|
57
|
+
local current = redis.call('INCR', KEYS[1])
|
58
|
+
if redis.call('TTL', KEYS[1]) < 0 then
|
59
|
+
redis.call('EXPIRE', KEYS[1], ARGV[1])
|
60
|
+
end
|
61
|
+
return current
|
62
|
+
"#;
|
63
|
+
let script = Script::new(script);
|
64
|
+
// The ConnectionManager is cloneable and can be used concurrently.
|
65
|
+
let mut connection = (*self.connection).clone();
|
66
|
+
let value: i64 = script
|
67
|
+
.key(key)
|
68
|
+
.arg(timeout_secs)
|
69
|
+
.invoke_async(&mut connection)
|
70
|
+
.await
|
71
|
+
.map_err(CacheError::RedisError)?;
|
72
|
+
Ok(value as u64)
|
73
|
+
}
|
74
|
+
}
|