itsi-scheduler 0.1.5 → 0.1.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of itsi-scheduler might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CODE_OF_CONDUCT.md +7 -0
- data/Cargo.lock +90 -22
- data/README.md +5 -0
- data/_index.md +7 -0
- data/ext/itsi_error/Cargo.toml +1 -0
- data/ext/itsi_error/src/lib.rs +106 -7
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
- data/ext/itsi_rb_helpers/Cargo.toml +1 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
- data/ext/itsi_rb_helpers/src/lib.rs +59 -9
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +1 -1
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +72 -28
- data/ext/itsi_server/src/default_responses/mod.rs +11 -0
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +113 -75
- data/ext/itsi_server/src/prelude.rs +2 -0
- data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
- data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +29 -8
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +345 -0
- data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +84 -40
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +375 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +83 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
- data/ext/itsi_server/src/server/{bind.rs → binds/bind.rs} +56 -24
- data/ext/itsi_server/src/server/{listener.rs → binds/listener.rs} +218 -113
- data/ext/itsi_server/src/server/binds/mod.rs +4 -0
- data/ext/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +55 -17
- data/ext/itsi_server/src/server/{tls.rs → binds/tls.rs} +109 -28
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/http_message_types.rs +97 -0
- data/ext/itsi_server/src/server/io_stream.rs +2 -1
- data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +165 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +56 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +86 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +285 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +142 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +289 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +292 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +190 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +157 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +195 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +201 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +87 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +414 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +131 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +44 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +36 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +180 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +163 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +347 -0
- data/ext/itsi_server/src/server/mod.rs +6 -5
- data/ext/itsi_server/src/server/process_worker.rs +65 -14
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +137 -49
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +338 -164
- data/ext/itsi_server/src/server/signal.rs +32 -26
- data/ext/itsi_server/src/server/size_limited_incoming.rs +101 -0
- data/ext/itsi_server/src/server/thread_worker.rs +214 -107
- data/ext/itsi_server/src/services/cache_store.rs +74 -0
- data/ext/itsi_server/src/services/itsi_http_service.rs +239 -0
- data/ext/itsi_server/src/services/mime_types.rs +1416 -0
- data/ext/itsi_server/src/services/mod.rs +6 -0
- data/ext/itsi_server/src/services/password_hasher.rs +83 -0
- data/ext/itsi_server/src/services/rate_limiter.rs +569 -0
- data/ext/itsi_server/src/services/static_file_server.rs +1324 -0
- data/ext/itsi_tracing/Cargo.toml +1 -0
- data/ext/itsi_tracing/src/lib.rs +312 -34
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
- data/lib/itsi/scheduler/version.rb +1 -1
- data/lib/itsi/scheduler.rb +2 -2
- metadata +93 -21
- data/ext/itsi_error/src/from.rs +0 -71
- data/ext/itsi_server/extconf.rb +0 -6
- data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
- data/ext/itsi_server/src/request/itsi_request.rs +0 -277
- data/ext/itsi_server/src/request/mod.rs +0 -1
- data/ext/itsi_server/src/response/mod.rs +0 -1
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
- data/ext/itsi_server/src/server/itsi_server.rs +0 -244
- /data/ext/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
@@ -0,0 +1,225 @@
|
|
1
|
+
use derive_more::Debug;
|
2
|
+
use globset::{Glob, GlobSet, GlobSetBuilder};
|
3
|
+
use magnus::error::Result;
|
4
|
+
use nix::unistd::{close, fork, pipe, read};
|
5
|
+
use notify::{event::ModifyKind, EventKind, RecommendedWatcher};
|
6
|
+
use notify::{Event, RecursiveMode, Watcher};
|
7
|
+
use std::path::Path;
|
8
|
+
use std::sync::mpsc::Sender;
|
9
|
+
use std::time::{Duration, Instant};
|
10
|
+
use std::{collections::HashSet, fs};
|
11
|
+
use std::{
|
12
|
+
os::fd::{AsRawFd, IntoRawFd, OwnedFd},
|
13
|
+
path::PathBuf,
|
14
|
+
process::Command,
|
15
|
+
sync::mpsc,
|
16
|
+
thread::{self},
|
17
|
+
};
|
18
|
+
|
19
|
+
/// Represents a set of patterns and commands.
|
20
|
+
#[derive(Debug, Clone)]
|
21
|
+
struct PatternGroup {
|
22
|
+
base_dir: PathBuf,
|
23
|
+
glob_set: GlobSet,
|
24
|
+
pattern: String,
|
25
|
+
commands: Vec<Vec<String>>,
|
26
|
+
last_triggered: Option<Instant>,
|
27
|
+
}
|
28
|
+
|
29
|
+
/// Extracts the base directory from a wildcard pattern by taking the portion up to the first
|
30
|
+
/// component that contains a wildcard character.
|
31
|
+
fn extract_and_canonicalize_base_dir(pattern: &str) -> PathBuf {
|
32
|
+
if !(pattern.contains("*") || pattern.contains("?") || pattern.contains('[')) {
|
33
|
+
if let Ok(metadata) = fs::metadata(pattern) {
|
34
|
+
if metadata.is_dir() {
|
35
|
+
return fs::canonicalize(pattern).unwrap();
|
36
|
+
}
|
37
|
+
if metadata.is_file() {
|
38
|
+
return fs::canonicalize(pattern)
|
39
|
+
.unwrap()
|
40
|
+
.parent()
|
41
|
+
.unwrap()
|
42
|
+
.to_path_buf();
|
43
|
+
}
|
44
|
+
}
|
45
|
+
}
|
46
|
+
|
47
|
+
let path = Path::new(pattern);
|
48
|
+
let mut base = PathBuf::new();
|
49
|
+
for comp in path.components() {
|
50
|
+
let comp_str = comp.as_os_str().to_string_lossy();
|
51
|
+
if comp_str.contains('*') || comp_str.contains('?') || comp_str.contains('[') {
|
52
|
+
break;
|
53
|
+
} else {
|
54
|
+
base.push(comp);
|
55
|
+
}
|
56
|
+
}
|
57
|
+
// If no base was built, default to "."
|
58
|
+
let base = if base.as_os_str().is_empty() || !base.exists() {
|
59
|
+
PathBuf::from(".")
|
60
|
+
} else {
|
61
|
+
base
|
62
|
+
};
|
63
|
+
|
64
|
+
// Canonicalize to get the absolute path.
|
65
|
+
fs::canonicalize(&base).unwrap_or(base)
|
66
|
+
}
|
67
|
+
|
68
|
+
/// Minimum time between triggering the same pattern group (debounce time)
|
69
|
+
const DEBOUNCE_DURATION: Duration = Duration::from_millis(500);
|
70
|
+
|
71
|
+
pub fn watch_groups(pattern_groups: Vec<(String, Vec<Vec<String>>)>) -> Result<Option<OwnedFd>> {
|
72
|
+
let (r_fd, w_fd): (OwnedFd, OwnedFd) = pipe().map_err(|e| {
|
73
|
+
magnus::Error::new(
|
74
|
+
magnus::exception::standard_error(),
|
75
|
+
format!("Failed to create watcher pipe: {}", e),
|
76
|
+
)
|
77
|
+
})?;
|
78
|
+
|
79
|
+
let fork_result = unsafe {
|
80
|
+
fork().map_err(|e| {
|
81
|
+
magnus::Error::new(
|
82
|
+
magnus::exception::standard_error(),
|
83
|
+
format!("Failed to fork file watcher: {}", e),
|
84
|
+
)
|
85
|
+
})
|
86
|
+
}?;
|
87
|
+
|
88
|
+
if fork_result.is_child() {
|
89
|
+
let _ = close(w_fd.into_raw_fd());
|
90
|
+
thread::spawn(move || {
|
91
|
+
let mut buf = [0u8; 1];
|
92
|
+
loop {
|
93
|
+
match read(r_fd.as_raw_fd(), &mut buf) {
|
94
|
+
Ok(0) => {
|
95
|
+
std::process::exit(0);
|
96
|
+
}
|
97
|
+
Ok(_) => {}
|
98
|
+
Err(_) => {
|
99
|
+
std::process::exit(0);
|
100
|
+
}
|
101
|
+
}
|
102
|
+
}
|
103
|
+
});
|
104
|
+
|
105
|
+
let mut groups = Vec::new();
|
106
|
+
for (pattern, commands) in pattern_groups.into_iter() {
|
107
|
+
let base_dir = extract_and_canonicalize_base_dir(&pattern);
|
108
|
+
let glob = Glob::new(&pattern).map_err(|e| {
|
109
|
+
magnus::Error::new(
|
110
|
+
magnus::exception::standard_error(),
|
111
|
+
format!("Failed to create watch glob: {}", e),
|
112
|
+
)
|
113
|
+
})?;
|
114
|
+
let glob_set = GlobSetBuilder::new().add(glob).build().map_err(|e| {
|
115
|
+
magnus::Error::new(
|
116
|
+
magnus::exception::standard_error(),
|
117
|
+
format!("Failed to create watch glob set: {}", e),
|
118
|
+
)
|
119
|
+
})?;
|
120
|
+
groups.push(PatternGroup {
|
121
|
+
base_dir,
|
122
|
+
glob_set,
|
123
|
+
pattern,
|
124
|
+
commands,
|
125
|
+
last_triggered: None,
|
126
|
+
});
|
127
|
+
}
|
128
|
+
|
129
|
+
// Create a channel and a watcher.
|
130
|
+
let (tx, rx) = mpsc::channel::<notify::Result<Event>>();
|
131
|
+
let sender = tx.clone();
|
132
|
+
fn event_fn(sender: Sender<notify::Result<Event>>) -> impl Fn(notify::Result<Event>) {
|
133
|
+
move |res| match res {
|
134
|
+
Ok(event) => {
|
135
|
+
sender.send(Ok(event)).unwrap();
|
136
|
+
}
|
137
|
+
Err(e) => println!("watch error: {:?}", e),
|
138
|
+
}
|
139
|
+
}
|
140
|
+
|
141
|
+
let mut watched_dirs = HashSet::new();
|
142
|
+
let mut watcher: RecommendedWatcher =
|
143
|
+
notify::recommended_watcher(event_fn(sender)).expect("Failed to create watcher");
|
144
|
+
for group in &groups {
|
145
|
+
if watched_dirs.insert(group.base_dir.clone()) {
|
146
|
+
watcher
|
147
|
+
.watch(&group.base_dir, RecursiveMode::Recursive)
|
148
|
+
.expect("Failed to add watch");
|
149
|
+
}
|
150
|
+
}
|
151
|
+
|
152
|
+
// Main event loop.
|
153
|
+
for res in rx {
|
154
|
+
match res {
|
155
|
+
Ok(event) => {
|
156
|
+
if !matches!(event.kind, EventKind::Modify(ModifyKind::Metadata(_))) {
|
157
|
+
continue;
|
158
|
+
}
|
159
|
+
let now = Instant::now();
|
160
|
+
for group in &mut groups {
|
161
|
+
for path in event.paths.iter() {
|
162
|
+
if let Ok(rel_path) = path.strip_prefix(&group.base_dir) {
|
163
|
+
if group.glob_set.is_match(rel_path)
|
164
|
+
|| rel_path.to_str().is_some_and(|s| s == group.pattern)
|
165
|
+
{
|
166
|
+
// Check if we should debounce this event
|
167
|
+
if let Some(last_triggered) = group.last_triggered {
|
168
|
+
if now.duration_since(last_triggered) < DEBOUNCE_DURATION {
|
169
|
+
// Skip this event as we've recently triggered for this pattern
|
170
|
+
continue;
|
171
|
+
}
|
172
|
+
}
|
173
|
+
|
174
|
+
// Update the last triggered time
|
175
|
+
group.last_triggered = Some(now);
|
176
|
+
|
177
|
+
// Execute the commands for this group.
|
178
|
+
for command in &group.commands {
|
179
|
+
if command.is_empty() {
|
180
|
+
continue;
|
181
|
+
}
|
182
|
+
let mut cmd = Command::new(&command[0]);
|
183
|
+
if command.len() > 1 {
|
184
|
+
cmd.args(&command[1..]);
|
185
|
+
}
|
186
|
+
match cmd.spawn() {
|
187
|
+
Ok(mut child) => {
|
188
|
+
if let Err(e) = child.wait() {
|
189
|
+
eprintln!(
|
190
|
+
"Command {:?} failed: {:?}",
|
191
|
+
command, e
|
192
|
+
);
|
193
|
+
}
|
194
|
+
}
|
195
|
+
Err(e) => {
|
196
|
+
eprintln!(
|
197
|
+
"Failed to execute command {:?}: {:?}",
|
198
|
+
command, e
|
199
|
+
);
|
200
|
+
}
|
201
|
+
}
|
202
|
+
}
|
203
|
+
break;
|
204
|
+
}
|
205
|
+
}
|
206
|
+
}
|
207
|
+
}
|
208
|
+
}
|
209
|
+
Err(e) => println!("Watch error: {:?}", e),
|
210
|
+
}
|
211
|
+
}
|
212
|
+
|
213
|
+
// Clean up the watches.
|
214
|
+
for group in &groups {
|
215
|
+
watcher
|
216
|
+
.unwatch(&group.base_dir)
|
217
|
+
.expect("Failed to remove watch");
|
218
|
+
}
|
219
|
+
drop(watcher);
|
220
|
+
std::process::exit(0);
|
221
|
+
} else {
|
222
|
+
let _ = close(r_fd.into_raw_fd());
|
223
|
+
Ok(Some(w_fd))
|
224
|
+
}
|
225
|
+
}
|
@@ -0,0 +1,375 @@
|
|
1
|
+
use super::file_watcher::{self};
|
2
|
+
use crate::{
|
3
|
+
ruby_types::ITSI_SERVER_CONFIG,
|
4
|
+
server::{
|
5
|
+
binds::{bind::Bind, listener::Listener},
|
6
|
+
middleware_stack::MiddlewareSet,
|
7
|
+
},
|
8
|
+
};
|
9
|
+
use derive_more::Debug;
|
10
|
+
use itsi_rb_helpers::{call_with_gvl, print_rb_backtrace, HeapValue};
|
11
|
+
use itsi_tracing::{set_format, set_level, set_target};
|
12
|
+
use magnus::{
|
13
|
+
block::Proc,
|
14
|
+
error::Result,
|
15
|
+
value::{LazyId, ReprValue},
|
16
|
+
RArray, RHash, Ruby, Symbol, Value,
|
17
|
+
};
|
18
|
+
use nix::{
|
19
|
+
fcntl::{fcntl, FcntlArg, FdFlag},
|
20
|
+
unistd::{close, dup},
|
21
|
+
};
|
22
|
+
use parking_lot::{Mutex, RwLock};
|
23
|
+
use std::{
|
24
|
+
collections::HashMap,
|
25
|
+
os::fd::{AsRawFd, OwnedFd, RawFd},
|
26
|
+
path::PathBuf,
|
27
|
+
sync::{Arc, OnceLock},
|
28
|
+
time::Duration,
|
29
|
+
};
|
30
|
+
static DEFAULT_BIND: &str = "http://localhost:3000";
|
31
|
+
static ID_BUILD_CONFIG: LazyId = LazyId::new("build_config");
|
32
|
+
static ID_RELOAD_EXEC: LazyId = LazyId::new("reload_exec");
|
33
|
+
|
34
|
+
#[derive(Debug, Clone)]
|
35
|
+
pub struct ItsiServerConfig {
|
36
|
+
pub cli_params: Arc<HeapValue<RHash>>,
|
37
|
+
pub itsifile_path: Option<PathBuf>,
|
38
|
+
pub itsi_config_proc: Arc<Option<HeapValue<Proc>>>,
|
39
|
+
#[debug(skip)]
|
40
|
+
pub server_params: Arc<RwLock<Arc<ServerParams>>>,
|
41
|
+
pub watcher_fd: Arc<Option<OwnedFd>>,
|
42
|
+
}
|
43
|
+
|
44
|
+
#[derive(Debug)]
|
45
|
+
pub struct ServerParams {
|
46
|
+
/// Cluster params
|
47
|
+
pub workers: u8,
|
48
|
+
pub worker_memory_limit: Option<u64>,
|
49
|
+
pub silence: bool,
|
50
|
+
pub shutdown_timeout: f64,
|
51
|
+
pub hooks: HashMap<String, HeapValue<Proc>>,
|
52
|
+
pub preload: bool,
|
53
|
+
|
54
|
+
pub request_timeout: Option<Duration>,
|
55
|
+
pub notify_watchers: Option<Vec<(String, Vec<Vec<String>>)>>,
|
56
|
+
/// Worker params
|
57
|
+
pub threads: u8,
|
58
|
+
pub scheduler_threads: Option<u8>,
|
59
|
+
pub streamable_body: bool,
|
60
|
+
pub multithreaded_reactor: bool,
|
61
|
+
pub pin_worker_cores: bool,
|
62
|
+
pub scheduler_class: Option<String>,
|
63
|
+
pub oob_gc_responses_threshold: Option<u64>,
|
64
|
+
pub middleware_loader: HeapValue<Proc>,
|
65
|
+
pub middleware: OnceLock<MiddlewareSet>,
|
66
|
+
pub binds: Vec<Bind>,
|
67
|
+
#[debug(skip)]
|
68
|
+
pub(crate) listeners: Mutex<Vec<Listener>>,
|
69
|
+
listener_info: Mutex<HashMap<String, i32>>,
|
70
|
+
}
|
71
|
+
|
72
|
+
impl ServerParams {
|
73
|
+
pub fn preload_ruby(self: &Arc<Self>) -> Result<()> {
|
74
|
+
call_with_gvl(|ruby| -> Result<()> {
|
75
|
+
if self
|
76
|
+
.scheduler_class
|
77
|
+
.as_ref()
|
78
|
+
.is_some_and(|t| t == "Itsi::Scheduler")
|
79
|
+
{
|
80
|
+
ruby.require("itsi/scheduler")?;
|
81
|
+
}
|
82
|
+
let middleware = MiddlewareSet::new(
|
83
|
+
self.middleware_loader
|
84
|
+
.call::<_, Option<Value>>(())
|
85
|
+
.inspect_err(|e| {
|
86
|
+
if let Some(err_value) = e.value() {
|
87
|
+
print_rb_backtrace(err_value);
|
88
|
+
}
|
89
|
+
})?
|
90
|
+
.map(|mw| mw.into()),
|
91
|
+
)?;
|
92
|
+
self.middleware.set(middleware).map_err(|_| {
|
93
|
+
magnus::Error::new(
|
94
|
+
magnus::exception::runtime_error(),
|
95
|
+
"Failed to set middleware",
|
96
|
+
)
|
97
|
+
})?;
|
98
|
+
Ok(())
|
99
|
+
})?;
|
100
|
+
Ok(())
|
101
|
+
}
|
102
|
+
|
103
|
+
fn from_rb_hash(rb_param_hash: RHash) -> Result<ServerParams> {
|
104
|
+
let workers = rb_param_hash
|
105
|
+
.fetch::<_, Option<u8>>("workers")?
|
106
|
+
.unwrap_or(num_cpus::get() as u8);
|
107
|
+
let worker_memory_limit: Option<u64> = rb_param_hash.fetch("worker_memory_limit")?;
|
108
|
+
let silence: bool = rb_param_hash.fetch("silence")?;
|
109
|
+
let multithreaded_reactor: bool = rb_param_hash
|
110
|
+
.fetch::<_, Option<bool>>("multithreaded_reactor")?
|
111
|
+
.unwrap_or(workers == 1);
|
112
|
+
let pin_worker_cores: bool = rb_param_hash
|
113
|
+
.fetch::<_, Option<bool>>("pin_worker_cores")?
|
114
|
+
.unwrap_or(true);
|
115
|
+
let shutdown_timeout: f64 = rb_param_hash.fetch("shutdown_timeout")?;
|
116
|
+
|
117
|
+
let hooks: Option<RHash> = rb_param_hash.fetch("hooks")?;
|
118
|
+
let hooks = hooks
|
119
|
+
.map(|rhash| -> Result<HashMap<String, HeapValue<Proc>>> {
|
120
|
+
let mut hook_map: HashMap<String, HeapValue<Proc>> = HashMap::new();
|
121
|
+
for pair in rhash.enumeratorize::<_, ()>("each", ()) {
|
122
|
+
if let Some(pair_value) = RArray::from_value(pair?) {
|
123
|
+
if let (Ok(key), Ok(value)) =
|
124
|
+
(pair_value.entry::<Value>(0), pair_value.entry::<Proc>(1))
|
125
|
+
{
|
126
|
+
hook_map.insert(key.to_string(), HeapValue::from(value));
|
127
|
+
}
|
128
|
+
}
|
129
|
+
}
|
130
|
+
Ok(hook_map)
|
131
|
+
})
|
132
|
+
.transpose()?
|
133
|
+
.unwrap_or_default();
|
134
|
+
let preload: bool = rb_param_hash.fetch("preload")?;
|
135
|
+
let request_timeout: Option<u64> = rb_param_hash.fetch("request_timeout")?;
|
136
|
+
let request_timeout = request_timeout.map(Duration::from_secs);
|
137
|
+
|
138
|
+
let notify_watchers: Option<Vec<(String, Vec<Vec<String>>)>> =
|
139
|
+
rb_param_hash.fetch("notify_watchers")?;
|
140
|
+
let threads: u8 = rb_param_hash.fetch("threads")?;
|
141
|
+
let scheduler_threads: Option<u8> = rb_param_hash.fetch("scheduler_threads")?;
|
142
|
+
let streamable_body: bool = rb_param_hash.fetch("streamable_body")?;
|
143
|
+
let scheduler_class: Option<String> = rb_param_hash.fetch("scheduler_class")?;
|
144
|
+
let oob_gc_responses_threshold: Option<u64> =
|
145
|
+
rb_param_hash.fetch("oob_gc_responses_threshold")?;
|
146
|
+
let middleware_loader: Proc = rb_param_hash.fetch("middleware_loader")?;
|
147
|
+
let log_level: Option<String> = rb_param_hash.fetch("log_level")?;
|
148
|
+
let log_target: Option<String> = rb_param_hash.fetch("log_target")?;
|
149
|
+
let log_format: Option<String> = rb_param_hash.fetch("log_format")?;
|
150
|
+
|
151
|
+
if let Some(level) = log_level {
|
152
|
+
set_level(&level);
|
153
|
+
}
|
154
|
+
|
155
|
+
if let Some(target) = log_target {
|
156
|
+
set_target(&target);
|
157
|
+
}
|
158
|
+
|
159
|
+
if let Some(format) = log_format {
|
160
|
+
set_format(&format);
|
161
|
+
}
|
162
|
+
|
163
|
+
let binds: Option<Vec<String>> = rb_param_hash.fetch("binds")?;
|
164
|
+
let binds = binds
|
165
|
+
.unwrap_or_else(|| vec![DEFAULT_BIND.to_string()])
|
166
|
+
.into_iter()
|
167
|
+
.map(|s| s.parse())
|
168
|
+
.collect::<itsi_error::Result<Vec<Bind>>>()?;
|
169
|
+
|
170
|
+
let listeners = if let Some(preexisting_listeners) =
|
171
|
+
rb_param_hash.delete::<_, Option<String>>("listeners")?
|
172
|
+
{
|
173
|
+
let bind_to_fd_map: HashMap<String, i32> = serde_json::from_str(&preexisting_listeners)
|
174
|
+
.map_err(|e| {
|
175
|
+
magnus::Error::new(
|
176
|
+
magnus::exception::standard_error(),
|
177
|
+
format!("Invalid listener info: {}", e),
|
178
|
+
)
|
179
|
+
})?;
|
180
|
+
|
181
|
+
binds
|
182
|
+
.iter()
|
183
|
+
.cloned()
|
184
|
+
.map(|bind| {
|
185
|
+
if let Some(fd) = bind_to_fd_map.get(&bind.listener_address_string()) {
|
186
|
+
Listener::inherit_fd(bind, *fd)
|
187
|
+
} else {
|
188
|
+
Listener::try_from(bind)
|
189
|
+
}
|
190
|
+
})
|
191
|
+
.collect::<std::result::Result<Vec<Listener>, _>>()?
|
192
|
+
.into_iter()
|
193
|
+
.collect::<Vec<_>>()
|
194
|
+
} else {
|
195
|
+
binds
|
196
|
+
.iter()
|
197
|
+
.cloned()
|
198
|
+
.map(Listener::try_from)
|
199
|
+
.collect::<std::result::Result<Vec<Listener>, _>>()?
|
200
|
+
.into_iter()
|
201
|
+
.collect::<Vec<_>>()
|
202
|
+
};
|
203
|
+
|
204
|
+
let listener_info = listeners
|
205
|
+
.iter()
|
206
|
+
.map(|listener| {
|
207
|
+
listener.handover().map_err(|e| {
|
208
|
+
magnus::Error::new(magnus::exception::runtime_error(), e.to_string())
|
209
|
+
})
|
210
|
+
})
|
211
|
+
.collect::<Result<HashMap<String, i32>>>()?;
|
212
|
+
|
213
|
+
Ok(ServerParams {
|
214
|
+
workers,
|
215
|
+
worker_memory_limit,
|
216
|
+
silence,
|
217
|
+
multithreaded_reactor,
|
218
|
+
pin_worker_cores,
|
219
|
+
shutdown_timeout,
|
220
|
+
hooks,
|
221
|
+
preload,
|
222
|
+
request_timeout,
|
223
|
+
notify_watchers,
|
224
|
+
threads,
|
225
|
+
scheduler_threads,
|
226
|
+
streamable_body,
|
227
|
+
scheduler_class,
|
228
|
+
oob_gc_responses_threshold,
|
229
|
+
binds,
|
230
|
+
listener_info: Mutex::new(listener_info),
|
231
|
+
listeners: Mutex::new(listeners),
|
232
|
+
middleware_loader: middleware_loader.into(),
|
233
|
+
middleware: OnceLock::new(),
|
234
|
+
})
|
235
|
+
}
|
236
|
+
}
|
237
|
+
|
238
|
+
impl ItsiServerConfig {
|
239
|
+
pub fn new(
|
240
|
+
ruby: &Ruby,
|
241
|
+
cli_params: RHash,
|
242
|
+
itsifile_path: Option<PathBuf>,
|
243
|
+
itsi_config_proc: Option<Proc>,
|
244
|
+
) -> Result<Self> {
|
245
|
+
let itsi_config_proc = Arc::new(itsi_config_proc.map(HeapValue::from));
|
246
|
+
let server_params = Self::combine_params(
|
247
|
+
ruby,
|
248
|
+
cli_params,
|
249
|
+
itsifile_path.as_ref(),
|
250
|
+
itsi_config_proc.clone(),
|
251
|
+
)?;
|
252
|
+
cli_params.delete::<_, Value>(Symbol::new("listeners"))?;
|
253
|
+
|
254
|
+
let watcher_fd = if let Some(watchers) = server_params.notify_watchers.clone() {
|
255
|
+
file_watcher::watch_groups(watchers)?
|
256
|
+
} else {
|
257
|
+
None
|
258
|
+
};
|
259
|
+
|
260
|
+
Ok(ItsiServerConfig {
|
261
|
+
cli_params: Arc::new(cli_params.into()),
|
262
|
+
server_params: RwLock::new(server_params.clone()).into(),
|
263
|
+
itsi_config_proc,
|
264
|
+
itsifile_path,
|
265
|
+
watcher_fd: watcher_fd.into(),
|
266
|
+
})
|
267
|
+
}
|
268
|
+
|
269
|
+
/// Reload
|
270
|
+
pub fn reload(self: Arc<Self>, cluster_worker: bool) -> Result<bool> {
|
271
|
+
let server_params = call_with_gvl(|ruby| {
|
272
|
+
Self::combine_params(
|
273
|
+
&ruby,
|
274
|
+
self.cli_params.cloned(),
|
275
|
+
self.itsifile_path.as_ref(),
|
276
|
+
self.itsi_config_proc.clone(),
|
277
|
+
)
|
278
|
+
})?;
|
279
|
+
|
280
|
+
let is_single_mode = self.server_params.read().workers == 1;
|
281
|
+
|
282
|
+
let requires_exec = if !is_single_mode && !server_params.preload {
|
283
|
+
// In cluster mode children are cycled during a reload
|
284
|
+
// and if preload is disabled, will get a clean memory slate,
|
285
|
+
// so we don't need to exec.
|
286
|
+
false
|
287
|
+
} else {
|
288
|
+
// In non-cluster mode, or when preloading is enabled, we shouldn't try to
|
289
|
+
// reload inside the existing process (as new code may conflict with old),
|
290
|
+
// and should re-exec instead.
|
291
|
+
true
|
292
|
+
};
|
293
|
+
|
294
|
+
*self.server_params.write() = server_params.clone();
|
295
|
+
Ok(requires_exec && (cluster_worker || is_single_mode))
|
296
|
+
}
|
297
|
+
|
298
|
+
fn combine_params(
|
299
|
+
ruby: &Ruby,
|
300
|
+
cli_params: RHash,
|
301
|
+
itsifile_path: Option<&PathBuf>,
|
302
|
+
itsi_config_proc: Arc<Option<HeapValue<Proc>>>,
|
303
|
+
) -> Result<Arc<ServerParams>> {
|
304
|
+
let inner = itsi_config_proc
|
305
|
+
.as_ref()
|
306
|
+
.clone()
|
307
|
+
.map(|hv| hv.clone().inner());
|
308
|
+
let rb_param_hash: RHash = ruby.get_inner_ref(&ITSI_SERVER_CONFIG).funcall(
|
309
|
+
*ID_BUILD_CONFIG,
|
310
|
+
(cli_params, itsifile_path.cloned(), inner),
|
311
|
+
)?;
|
312
|
+
Ok(Arc::new(ServerParams::from_rb_hash(rb_param_hash)?))
|
313
|
+
}
|
314
|
+
|
315
|
+
fn clear_cloexec(fd: RawFd) -> nix::Result<()> {
|
316
|
+
let current_flags = fcntl(fd, FcntlArg::F_GETFD)?;
|
317
|
+
let mut flags = FdFlag::from_bits_truncate(current_flags);
|
318
|
+
// Remove the FD_CLOEXEC flag
|
319
|
+
flags.remove(FdFlag::FD_CLOEXEC);
|
320
|
+
// Set the new flags back on the file descriptor
|
321
|
+
fcntl(fd, FcntlArg::F_SETFD(flags))?;
|
322
|
+
Ok(())
|
323
|
+
}
|
324
|
+
|
325
|
+
pub fn dup_fds(self: &Arc<Self>) -> Result<()> {
|
326
|
+
let binding = self.server_params.read();
|
327
|
+
let mut listener_info_guard = binding.listener_info.lock();
|
328
|
+
let dupped_fd_map = listener_info_guard
|
329
|
+
.iter()
|
330
|
+
.map(|(str, fd)| {
|
331
|
+
let dupped_fd = dup(*fd).map_err(|errno| {
|
332
|
+
magnus::Error::new(
|
333
|
+
magnus::exception::standard_error(),
|
334
|
+
format!("Errno {} while trying to dup {}", errno, fd),
|
335
|
+
)
|
336
|
+
})?;
|
337
|
+
Self::clear_cloexec(dupped_fd).map_err(|e| {
|
338
|
+
magnus::Error::new(
|
339
|
+
magnus::exception::standard_error(),
|
340
|
+
format!("Failed to clear cloexec flag for fd {}: {}", dupped_fd, e),
|
341
|
+
)
|
342
|
+
})?;
|
343
|
+
Ok((str.clone(), dupped_fd))
|
344
|
+
})
|
345
|
+
.collect::<Result<HashMap<String, i32>>>()?;
|
346
|
+
*listener_info_guard = dupped_fd_map;
|
347
|
+
Ok(())
|
348
|
+
}
|
349
|
+
|
350
|
+
pub fn stop_watcher(self: &Arc<Self>) -> Result<()> {
|
351
|
+
if let Some(r_fd) = self.watcher_fd.as_ref() {
|
352
|
+
close(r_fd.as_raw_fd()).ok();
|
353
|
+
}
|
354
|
+
Ok(())
|
355
|
+
}
|
356
|
+
|
357
|
+
pub fn reload_exec(self: &Arc<Self>) -> Result<()> {
|
358
|
+
let listener_json =
|
359
|
+
serde_json::to_string(&self.server_params.read().listener_info.lock().clone())
|
360
|
+
.map_err(|e| {
|
361
|
+
magnus::Error::new(
|
362
|
+
magnus::exception::standard_error(),
|
363
|
+
format!("Invalid listener info: {}", e),
|
364
|
+
)
|
365
|
+
})?;
|
366
|
+
|
367
|
+
self.stop_watcher()?;
|
368
|
+
call_with_gvl(|ruby| -> Result<()> {
|
369
|
+
ruby.get_inner_ref(&ITSI_SERVER_CONFIG)
|
370
|
+
.funcall::<_, _, Value>(*ID_RELOAD_EXEC, (listener_json,))?;
|
371
|
+
Ok(())
|
372
|
+
})?;
|
373
|
+
Ok(())
|
374
|
+
}
|
375
|
+
}
|
@@ -0,0 +1,83 @@
|
|
1
|
+
use crate::server::{
|
2
|
+
lifecycle_event::LifecycleEvent,
|
3
|
+
serve_strategy::{cluster_mode::ClusterMode, single_mode::SingleMode, ServeStrategy},
|
4
|
+
signal::{clear_signal_handlers, reset_signal_handlers, send_lifecycle_event},
|
5
|
+
};
|
6
|
+
use itsi_rb_helpers::{call_without_gvl, print_rb_backtrace};
|
7
|
+
use itsi_server_config::ItsiServerConfig;
|
8
|
+
use itsi_tracing::{error, run_silently};
|
9
|
+
use magnus::{block::Proc, error::Result, RHash, Ruby};
|
10
|
+
use parking_lot::Mutex;
|
11
|
+
use std::{path::PathBuf, sync::Arc};
|
12
|
+
use tracing::{info, instrument};
|
13
|
+
mod file_watcher;
|
14
|
+
pub mod itsi_server_config;
|
15
|
+
#[magnus::wrap(class = "Itsi::Server", free_immediately, size)]
|
16
|
+
#[derive(Clone)]
|
17
|
+
pub struct ItsiServer {
|
18
|
+
pub config: Arc<Mutex<Arc<ItsiServerConfig>>>,
|
19
|
+
}
|
20
|
+
|
21
|
+
impl ItsiServer {
|
22
|
+
pub fn new(
|
23
|
+
ruby: &Ruby,
|
24
|
+
cli_params: RHash,
|
25
|
+
itsifile_path: Option<PathBuf>,
|
26
|
+
itsi_config_proc: Option<Proc>,
|
27
|
+
) -> Result<Self> {
|
28
|
+
Ok(Self {
|
29
|
+
config: Arc::new(Mutex::new(Arc::new(ItsiServerConfig::new(
|
30
|
+
ruby,
|
31
|
+
cli_params,
|
32
|
+
itsifile_path,
|
33
|
+
itsi_config_proc,
|
34
|
+
)?))),
|
35
|
+
})
|
36
|
+
}
|
37
|
+
|
38
|
+
pub fn stop(&self) -> Result<()> {
|
39
|
+
send_lifecycle_event(LifecycleEvent::Shutdown);
|
40
|
+
Ok(())
|
41
|
+
}
|
42
|
+
|
43
|
+
#[instrument(skip(self))]
|
44
|
+
pub fn start(&self) -> Result<()> {
|
45
|
+
let result = if self.config.lock().server_params.read().silence {
|
46
|
+
run_silently(|| self.build_and_run_strategy())
|
47
|
+
} else {
|
48
|
+
info!("Itsi - Rolling into action. ⚪💨");
|
49
|
+
self.build_and_run_strategy()
|
50
|
+
};
|
51
|
+
if let Err(e) = result {
|
52
|
+
if let Some(err_value) = e.value() {
|
53
|
+
print_rb_backtrace(err_value);
|
54
|
+
}
|
55
|
+
return Err(e);
|
56
|
+
}
|
57
|
+
Ok(())
|
58
|
+
}
|
59
|
+
|
60
|
+
pub(crate) fn build_strategy(&self) -> Result<ServeStrategy> {
|
61
|
+
let server_config = self.config.lock();
|
62
|
+
Ok(if server_config.server_params.read().workers > 1 {
|
63
|
+
ServeStrategy::Cluster(Arc::new(ClusterMode::new(server_config.clone())))
|
64
|
+
} else {
|
65
|
+
ServeStrategy::Single(Arc::new(SingleMode::new(server_config.clone())?))
|
66
|
+
})
|
67
|
+
}
|
68
|
+
|
69
|
+
fn build_and_run_strategy(&self) -> Result<()> {
|
70
|
+
reset_signal_handlers();
|
71
|
+
call_without_gvl(move || -> Result<()> {
|
72
|
+
let strategy = self.build_strategy()?;
|
73
|
+
if let Err(e) = strategy.clone().run() {
|
74
|
+
error!("Error running server: {}", e);
|
75
|
+
strategy.stop()?;
|
76
|
+
}
|
77
|
+
Ok(())
|
78
|
+
})?;
|
79
|
+
clear_signal_handlers();
|
80
|
+
info!("Server stopped");
|
81
|
+
Ok(())
|
82
|
+
}
|
83
|
+
}
|