itsi-server 0.2.17 → 0.2.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +1 -1
- data/exe/itsi +2 -4
- data/ext/itsi_scheduler/Cargo.toml +1 -1
- data/ext/itsi_server/Cargo.toml +1 -1
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +422 -101
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +36 -10
- data/ext/itsi_server/src/server/binds/listener.rs +3 -3
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +5 -9
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +10 -2
- data/lib/itsi/server/config/dsl.rb +6 -5
- data/lib/itsi/server/config/middleware/compression.md +3 -3
- data/lib/itsi/server/config/middleware/endpoint/controller.md +1 -1
- data/lib/itsi/server/config/middleware/proxy.md +2 -2
- data/lib/itsi/server/config/options/auto_reload_config.rb +7 -6
- data/lib/itsi/server/config/options/include.md +1 -0
- data/lib/itsi/server/config/options/include.rb +13 -11
- data/lib/itsi/server/config/options/reuse_port.rb +2 -4
- data/lib/itsi/server/config.rb +1 -1
- data/lib/itsi/server/default_config/Itsi.rb +9 -5
- data/lib/itsi/server/version.rb +1 -1
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: deb929709a8dccdf23ec4cdbf7c5880277b9d12f23a1f2d962ed06dcb51c9223
|
4
|
+
data.tar.gz: 137a2e85d1e3d57cdfc007c004b3b385c2db2e90f9b1d613c5d4e0bb0bad129f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f72ed322a1dd7a738f28c13943394ca34f4c095bb9221770f02cdf918fa06c12ccb5332e4302f1ca837853afb4e3380710d82f6274c6316a68e1922bbeddcbd1
|
7
|
+
data.tar.gz: 4570d5e0bb716b56703cd6f8a33e9ddf6684c01c18e3b7a35039e292d26aa17620737aeb887034f4cfadd5d0f1a5edcadb6239e2d163974c4087caf580abf7ce
|
data/Cargo.lock
CHANGED
data/exe/itsi
CHANGED
@@ -4,7 +4,6 @@
|
|
4
4
|
require "itsi/server"
|
5
5
|
require "optparse"
|
6
6
|
|
7
|
-
|
8
7
|
COMMANDS = {
|
9
8
|
"init" => "Initialize a new Itsi.rb server configuration file",
|
10
9
|
"status" => "Show the status of the server",
|
@@ -119,7 +118,6 @@ parser = OptionParser.new do |opts|
|
|
119
118
|
options[:shutdown_timeout] = shutdown_timeout
|
120
119
|
end
|
121
120
|
|
122
|
-
|
123
121
|
opts.on("--stream-body", TrueClass, "Stream body frames (default: false for best compatibility)") do |stream_body|
|
124
122
|
options[:stream_body] = stream_body
|
125
123
|
end
|
@@ -159,7 +157,7 @@ parser = OptionParser.new do |opts|
|
|
159
157
|
end
|
160
158
|
end
|
161
159
|
|
162
|
-
if ENV[
|
160
|
+
if ENV["COMP_LINE"] || ARGV.include?("--completion")
|
163
161
|
puts COMMANDS.keys
|
164
162
|
exit
|
165
163
|
end
|
@@ -173,7 +171,7 @@ end
|
|
173
171
|
|
174
172
|
case (command = ARGV.shift)
|
175
173
|
when *COMMANDS.keys
|
176
|
-
required_arity = Itsi::Server.method(command).parameters&.select{|c| c.first == :req }&.length&.succ || 2
|
174
|
+
required_arity = Itsi::Server.method(command).parameters&.select { |c| c.first == :req }&.length&.succ || 2
|
177
175
|
case required_arity
|
178
176
|
when 1 then Itsi::Server.send(command)
|
179
177
|
when 2 then Itsi::Server.send(command, options)
|
data/ext/itsi_server/Cargo.toml
CHANGED
@@ -1,69 +1,136 @@
|
|
1
1
|
use derive_more::Debug;
|
2
2
|
use globset::{Glob, GlobSet, GlobSetBuilder};
|
3
3
|
use magnus::error::Result;
|
4
|
-
use nix::unistd::{close, fork, pipe, read};
|
4
|
+
use nix::unistd::{close, dup, fork, pipe, read, write};
|
5
5
|
use notify::event::ModifyKind;
|
6
|
-
use notify::{Event, RecursiveMode, Watcher};
|
7
|
-
use
|
6
|
+
use notify::{Event, EventKind, RecursiveMode, Watcher};
|
7
|
+
use parking_lot::Mutex;
|
8
|
+
use std::collections::{HashMap, HashSet};
|
9
|
+
use std::fs;
|
10
|
+
use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
|
8
11
|
use std::path::Path;
|
9
|
-
use std::
|
12
|
+
use std::path::PathBuf;
|
13
|
+
use std::process::Command;
|
14
|
+
|
15
|
+
use std::sync::{mpsc, Arc};
|
16
|
+
use std::thread;
|
10
17
|
use std::time::{Duration, Instant};
|
11
|
-
use
|
12
|
-
|
13
|
-
os::fd::{AsRawFd, IntoRawFd, OwnedFd},
|
14
|
-
path::PathBuf,
|
15
|
-
process::Command,
|
16
|
-
sync::mpsc,
|
17
|
-
thread::{self},
|
18
|
-
};
|
19
|
-
use tracing::debug;
|
20
|
-
|
21
|
-
/// Represents a set of patterns and commands.
|
18
|
+
use tracing::{error, info};
|
19
|
+
|
22
20
|
#[derive(Debug, Clone)]
|
23
21
|
struct PatternGroup {
|
24
22
|
base_dir: PathBuf,
|
25
23
|
glob_set: GlobSet,
|
26
|
-
pattern: String,
|
27
24
|
commands: Vec<Vec<String>>,
|
25
|
+
pattern: String,
|
28
26
|
last_triggered: Option<Instant>,
|
29
27
|
}
|
30
28
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
29
|
+
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
30
|
+
pub enum WatcherCommand {
|
31
|
+
Stop,
|
32
|
+
ConfigError,
|
33
|
+
Continue,
|
34
|
+
}
|
35
|
+
|
36
|
+
#[derive(Debug)]
|
37
|
+
pub struct WatcherPipes {
|
38
|
+
pub read_fd: OwnedFd,
|
39
|
+
pub write_fd: OwnedFd,
|
40
|
+
}
|
41
|
+
|
42
|
+
impl AsRawFd for WatcherPipes {
|
43
|
+
fn as_raw_fd(&self) -> RawFd {
|
44
|
+
self.read_fd.as_raw_fd()
|
37
45
|
}
|
46
|
+
}
|
47
|
+
|
48
|
+
impl Drop for WatcherPipes {
|
49
|
+
fn drop(&mut self) {
|
50
|
+
let _ = send_watcher_command(&self.write_fd, WatcherCommand::Stop);
|
51
|
+
let _ = close(self.read_fd.as_raw_fd());
|
52
|
+
let _ = close(self.write_fd.as_raw_fd());
|
53
|
+
}
|
54
|
+
}
|
38
55
|
|
56
|
+
fn extract_and_canonicalize_base_dir(pattern: &str) -> (PathBuf, String) {
|
39
57
|
let path = Path::new(pattern);
|
40
58
|
let mut base = PathBuf::new();
|
59
|
+
let mut remaining_components = Vec::new();
|
60
|
+
let mut found_glob = false;
|
61
|
+
|
41
62
|
for comp in path.components() {
|
42
63
|
let comp_str = comp.as_os_str().to_string_lossy();
|
43
|
-
if
|
44
|
-
|
64
|
+
if !found_glob
|
65
|
+
&& (comp_str.contains('*') || comp_str.contains('?') || comp_str.contains('['))
|
66
|
+
{
|
67
|
+
found_glob = true;
|
68
|
+
remaining_components.push(comp_str.to_string());
|
69
|
+
} else if found_glob {
|
70
|
+
remaining_components.push(comp_str.to_string());
|
45
71
|
} else {
|
46
72
|
base.push(comp);
|
47
73
|
}
|
48
74
|
}
|
49
|
-
|
50
|
-
let base = if base.as_os_str().is_empty()
|
75
|
+
|
76
|
+
let base = if base.as_os_str().is_empty() {
|
51
77
|
PathBuf::from(".")
|
52
78
|
} else {
|
53
79
|
base
|
54
80
|
};
|
81
|
+
let base = fs::canonicalize(&base).unwrap_or(base);
|
82
|
+
let remaining_pattern = remaining_components.join("/");
|
55
83
|
|
56
|
-
|
84
|
+
(base, remaining_pattern)
|
57
85
|
}
|
58
86
|
|
59
|
-
|
60
|
-
const
|
87
|
+
const DEBOUNCE_DURATION: Duration = Duration::from_millis(300);
|
88
|
+
const EVENT_DEDUP_DURATION: Duration = Duration::from_millis(50);
|
89
|
+
const AUTO_RECOVERY_TIMEOUT: Duration = Duration::from_secs(5);
|
90
|
+
|
91
|
+
fn serialize_command(cmd: WatcherCommand) -> u8 {
|
92
|
+
match cmd {
|
93
|
+
WatcherCommand::Stop => 0,
|
94
|
+
WatcherCommand::ConfigError => 1,
|
95
|
+
WatcherCommand::Continue => 2,
|
96
|
+
}
|
97
|
+
}
|
98
|
+
|
99
|
+
fn deserialize_command(byte: u8) -> Option<WatcherCommand> {
|
100
|
+
match byte {
|
101
|
+
0 => Some(WatcherCommand::Stop),
|
102
|
+
1 => Some(WatcherCommand::ConfigError),
|
103
|
+
2 => Some(WatcherCommand::Continue),
|
104
|
+
_ => None,
|
105
|
+
}
|
106
|
+
}
|
107
|
+
|
108
|
+
pub fn send_watcher_command(fd: &OwnedFd, cmd: WatcherCommand) -> Result<()> {
|
109
|
+
let buf = [serialize_command(cmd)];
|
110
|
+
match write(fd, &buf) {
|
111
|
+
Ok(_) => Ok(()),
|
112
|
+
Err(e) => Err(magnus::Error::new(
|
113
|
+
magnus::exception::standard_error(),
|
114
|
+
format!("Failed to send command to watcher: {}", e),
|
115
|
+
)),
|
116
|
+
}
|
117
|
+
}
|
118
|
+
|
119
|
+
pub fn watch_groups(
|
120
|
+
pattern_groups: Vec<(String, Vec<Vec<String>>)>,
|
121
|
+
) -> Result<Option<WatcherPipes>> {
|
122
|
+
// Create bidirectional pipes for communication
|
123
|
+
let (parent_read_fd, child_write_fd): (OwnedFd, OwnedFd) = pipe().map_err(|e| {
|
124
|
+
magnus::Error::new(
|
125
|
+
magnus::exception::standard_error(),
|
126
|
+
format!("Failed to create parent read pipe: {}", e),
|
127
|
+
)
|
128
|
+
})?;
|
61
129
|
|
62
|
-
|
63
|
-
let (r_fd, w_fd): (OwnedFd, OwnedFd) = pipe().map_err(|e| {
|
130
|
+
let (child_read_fd, parent_write_fd): (OwnedFd, OwnedFd) = pipe().map_err(|e| {
|
64
131
|
magnus::Error::new(
|
65
132
|
magnus::exception::standard_error(),
|
66
|
-
format!("Failed to create
|
133
|
+
format!("Failed to create child read pipe: {}", e),
|
67
134
|
)
|
68
135
|
})?;
|
69
136
|
|
@@ -77,17 +144,41 @@ pub fn watch_groups(pattern_groups: Vec<(String, Vec<Vec<String>>)>) -> Result<O
|
|
77
144
|
}?;
|
78
145
|
|
79
146
|
if fork_result.is_child() {
|
80
|
-
|
147
|
+
// Child process - close the parent ends of the pipes
|
148
|
+
let _ = close(parent_read_fd.into_raw_fd());
|
149
|
+
let _ = close(parent_write_fd.into_raw_fd());
|
150
|
+
|
151
|
+
let _child_read_fd_clone =
|
152
|
+
unsafe { OwnedFd::from_raw_fd(dup(child_read_fd.as_raw_fd()).unwrap()) };
|
153
|
+
let child_write_fd_clone =
|
154
|
+
unsafe { OwnedFd::from_raw_fd(dup(child_write_fd.as_raw_fd()).unwrap()) };
|
155
|
+
|
156
|
+
let command_channel = Arc::new(Mutex::new(None));
|
157
|
+
let command_channel_clone = command_channel.clone();
|
158
|
+
|
159
|
+
// Thread to read commands from parent
|
81
160
|
thread::spawn(move || {
|
82
161
|
let mut buf = [0u8; 1];
|
83
162
|
loop {
|
84
|
-
match read(
|
163
|
+
match read(child_read_fd.as_raw_fd(), &mut buf) {
|
85
164
|
Ok(0) => {
|
165
|
+
info!("Parent closed command pipe, exiting watcher");
|
86
166
|
std::process::exit(0);
|
87
167
|
}
|
88
|
-
Ok(_) => {
|
89
|
-
|
90
|
-
|
168
|
+
Ok(_) => {
|
169
|
+
if let Some(cmd) = deserialize_command(buf[0]) {
|
170
|
+
info!("Received command from parent: {:?}", cmd);
|
171
|
+
*command_channel_clone.lock() = Some(cmd);
|
172
|
+
|
173
|
+
if matches!(cmd, WatcherCommand::Stop) {
|
174
|
+
info!("Received stop command, exiting watcher");
|
175
|
+
std::process::exit(0);
|
176
|
+
}
|
177
|
+
}
|
178
|
+
}
|
179
|
+
Err(e) => {
|
180
|
+
error!("Error reading from command pipe: {}", e);
|
181
|
+
std::process::exit(1);
|
91
182
|
}
|
92
183
|
}
|
93
184
|
}
|
@@ -95,11 +186,19 @@ pub fn watch_groups(pattern_groups: Vec<(String, Vec<Vec<String>>)>) -> Result<O
|
|
95
186
|
|
96
187
|
let mut groups = Vec::new();
|
97
188
|
for (pattern, commands) in pattern_groups.into_iter() {
|
98
|
-
let base_dir = extract_and_canonicalize_base_dir(&pattern);
|
99
|
-
|
189
|
+
let (base_dir, remaining_pattern) = extract_and_canonicalize_base_dir(&pattern);
|
190
|
+
info!(
|
191
|
+
"Watching base directory {:?} with pattern {:?} (original: {:?})",
|
192
|
+
base_dir, remaining_pattern, pattern
|
193
|
+
);
|
194
|
+
|
195
|
+
let glob = Glob::new(&remaining_pattern).map_err(|e| {
|
100
196
|
magnus::Error::new(
|
101
197
|
magnus::exception::standard_error(),
|
102
|
-
format!(
|
198
|
+
format!(
|
199
|
+
"Failed to create watch glob for pattern '{}': {}",
|
200
|
+
remaining_pattern, e
|
201
|
+
),
|
103
202
|
)
|
104
203
|
})?;
|
105
204
|
let glob_set = GlobSetBuilder::new().add(glob).build().map_err(|e| {
|
@@ -108,117 +207,339 @@ pub fn watch_groups(pattern_groups: Vec<(String, Vec<Vec<String>>)>) -> Result<O
|
|
108
207
|
format!("Failed to create watch glob set: {}", e),
|
109
208
|
)
|
110
209
|
})?;
|
210
|
+
|
111
211
|
groups.push(PatternGroup {
|
112
212
|
base_dir,
|
113
213
|
glob_set,
|
114
|
-
pattern,
|
115
214
|
commands,
|
215
|
+
pattern: remaining_pattern,
|
116
216
|
last_triggered: None,
|
117
217
|
});
|
118
218
|
}
|
119
219
|
|
120
|
-
// Create a channel and a watcher
|
220
|
+
// Create a channel and a watcher
|
121
221
|
let (tx, rx) = mpsc::channel::<notify::Result<Event>>();
|
222
|
+
let startup_time = Instant::now();
|
122
223
|
let sender = tx.clone();
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
224
|
+
|
225
|
+
let event_fn = move |res: notify::Result<Event>| {
|
226
|
+
if let Ok(event) = res {
|
227
|
+
sender.send(Ok(event)).unwrap_or_else(|e| {
|
228
|
+
error!("Failed to send event: {}", e);
|
229
|
+
});
|
230
|
+
} else if let Err(e) = res {
|
231
|
+
error!("Watch error: {:?}", e);
|
129
232
|
}
|
130
|
-
}
|
233
|
+
};
|
234
|
+
|
235
|
+
let mut watched_paths = HashSet::new();
|
236
|
+
let mut watcher = notify::recommended_watcher(event_fn).expect("Failed to create watcher");
|
131
237
|
|
132
|
-
let mut watched_dirs = HashSet::new();
|
133
|
-
let mut watcher: RecommendedWatcher =
|
134
|
-
notify::recommended_watcher(event_fn(sender)).expect("Failed to create watcher");
|
135
238
|
for group in &groups {
|
136
|
-
if
|
137
|
-
|
239
|
+
if watched_paths.insert(group.base_dir.clone()) {
|
240
|
+
let recursive = if group.pattern.is_empty() {
|
241
|
+
RecursiveMode::NonRecursive
|
242
|
+
} else {
|
243
|
+
RecursiveMode::Recursive
|
244
|
+
};
|
245
|
+
|
138
246
|
watcher
|
139
|
-
.watch(&group.base_dir,
|
247
|
+
.watch(&group.base_dir, recursive)
|
140
248
|
.expect("Failed to add watch");
|
141
249
|
}
|
142
250
|
}
|
143
251
|
|
144
|
-
|
145
|
-
|
252
|
+
// Wait briefly to avoid initial event storm
|
253
|
+
thread::sleep(Duration::from_millis(100));
|
254
|
+
|
255
|
+
// State management
|
256
|
+
let mut recent_events: HashMap<(PathBuf, EventKind), Instant> = HashMap::new();
|
257
|
+
let restart_state = Arc::new(Mutex::new(None::<Instant>));
|
258
|
+
|
259
|
+
// Main event loop
|
146
260
|
for res in rx {
|
147
261
|
match res {
|
148
262
|
Ok(event) => {
|
149
263
|
if !matches!(event.kind, EventKind::Modify(ModifyKind::Data(_))) {
|
150
264
|
continue;
|
151
265
|
}
|
152
|
-
|
266
|
+
|
153
267
|
let now = Instant::now();
|
268
|
+
|
269
|
+
// Skip startup events
|
270
|
+
if now.duration_since(startup_time) < Duration::from_millis(500) {
|
271
|
+
continue;
|
272
|
+
}
|
273
|
+
|
274
|
+
// Deduplicate events
|
275
|
+
let mut should_process = true;
|
276
|
+
for path in &event.paths {
|
277
|
+
let event_key = (path.clone(), event.kind);
|
278
|
+
if let Some(&last_seen) = recent_events.get(&event_key) {
|
279
|
+
if now.duration_since(last_seen) < EVENT_DEDUP_DURATION {
|
280
|
+
should_process = false;
|
281
|
+
break;
|
282
|
+
}
|
283
|
+
}
|
284
|
+
recent_events.insert(event_key, now);
|
285
|
+
}
|
286
|
+
|
287
|
+
if !should_process {
|
288
|
+
continue;
|
289
|
+
}
|
290
|
+
|
291
|
+
// Clean up old entries
|
292
|
+
recent_events
|
293
|
+
.retain(|_, &mut time| now.duration_since(time) < Duration::from_secs(1));
|
294
|
+
|
295
|
+
// Check restart state
|
296
|
+
let should_skip = {
|
297
|
+
let state = restart_state.lock();
|
298
|
+
if let Some(restart_time) = *state {
|
299
|
+
now.duration_since(restart_time) < Duration::from_millis(500)
|
300
|
+
} else {
|
301
|
+
false
|
302
|
+
}
|
303
|
+
};
|
304
|
+
|
305
|
+
if should_skip {
|
306
|
+
continue;
|
307
|
+
}
|
308
|
+
|
309
|
+
// Process commands from parent
|
310
|
+
let command_to_process = {
|
311
|
+
let mut command_guard = command_channel.lock();
|
312
|
+
let cmd = *command_guard;
|
313
|
+
*command_guard = None;
|
314
|
+
cmd
|
315
|
+
};
|
316
|
+
|
317
|
+
if let Some(cmd) = command_to_process {
|
318
|
+
match cmd {
|
319
|
+
WatcherCommand::ConfigError => {
|
320
|
+
info!("Received config error notification, resuming file watching");
|
321
|
+
*restart_state.lock() = None;
|
322
|
+
for group in &mut groups {
|
323
|
+
group.last_triggered = None;
|
324
|
+
}
|
325
|
+
recent_events.clear();
|
326
|
+
}
|
327
|
+
WatcherCommand::Continue => {
|
328
|
+
info!("Received continue notification, resuming file watching");
|
329
|
+
*restart_state.lock() = None;
|
330
|
+
}
|
331
|
+
WatcherCommand::Stop => { /* Handled in command thread */ }
|
332
|
+
}
|
333
|
+
}
|
334
|
+
|
335
|
+
// Process file events
|
154
336
|
for group in &mut groups {
|
337
|
+
// Apply debounce
|
338
|
+
if let Some(last_triggered) = group.last_triggered {
|
339
|
+
if now.duration_since(last_triggered) < DEBOUNCE_DURATION {
|
340
|
+
continue;
|
341
|
+
}
|
342
|
+
}
|
343
|
+
|
155
344
|
for path in event.paths.iter() {
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
345
|
+
let matches = if group.pattern.is_empty() {
|
346
|
+
path == &group.base_dir
|
347
|
+
} else if let Ok(rel_path) = path.strip_prefix(&group.base_dir) {
|
348
|
+
group.glob_set.is_match(rel_path)
|
349
|
+
} else {
|
350
|
+
false
|
351
|
+
};
|
352
|
+
|
353
|
+
if matches {
|
354
|
+
group.last_triggered = Some(now);
|
355
|
+
|
356
|
+
// Execute commands
|
357
|
+
for command in &group.commands {
|
358
|
+
if command.is_empty() {
|
359
|
+
continue;
|
167
360
|
}
|
168
361
|
|
169
|
-
//
|
170
|
-
|
362
|
+
// Check for shell command or restart/reload
|
363
|
+
let is_shell_command = command.len() == 1
|
364
|
+
&& (command[0].contains("&&")
|
365
|
+
|| command[0].contains("||")
|
366
|
+
|| command[0].contains("|")
|
367
|
+
|| command[0].contains(";"));
|
368
|
+
|
369
|
+
let is_restart = command
|
370
|
+
.windows(2)
|
371
|
+
.any(|w| w[0] == "itsi" && w[1] == "restart")
|
372
|
+
|| (is_shell_command
|
373
|
+
&& command[0].contains("itsi restart"));
|
374
|
+
|
375
|
+
let is_reload = command
|
376
|
+
.windows(2)
|
377
|
+
.any(|w| w[0] == "itsi" && w[1] == "reload")
|
378
|
+
|| (is_shell_command && command[0].contains("itsi reload"));
|
379
|
+
|
380
|
+
// Handle restart/reload
|
381
|
+
if is_restart || is_reload {
|
382
|
+
let cmd_type =
|
383
|
+
if is_restart { "restart" } else { "reload" };
|
384
|
+
let mut should_run = false;
|
385
|
+
|
386
|
+
{
|
387
|
+
let mut state = restart_state.lock();
|
388
|
+
if let Some(last_time) = *state {
|
389
|
+
if now.duration_since(last_time)
|
390
|
+
< Duration::from_secs(3)
|
391
|
+
{
|
392
|
+
info!(
|
393
|
+
"Ignoring {} command - too soon",
|
394
|
+
cmd_type
|
395
|
+
);
|
396
|
+
} else {
|
397
|
+
*state = Some(now);
|
398
|
+
should_run = true;
|
399
|
+
}
|
400
|
+
} else {
|
401
|
+
*state = Some(now);
|
402
|
+
should_run = true;
|
403
|
+
}
|
404
|
+
}
|
171
405
|
|
172
|
-
|
173
|
-
for command in &group.commands {
|
174
|
-
if command.is_empty() {
|
406
|
+
if !should_run {
|
175
407
|
continue;
|
176
408
|
}
|
409
|
+
|
410
|
+
// Notify parent (optional)
|
411
|
+
let _ = write(&child_write_fd_clone, &[3]);
|
412
|
+
}
|
413
|
+
|
414
|
+
// Build and execute command
|
415
|
+
let mut cmd = if is_shell_command {
|
416
|
+
let mut shell_cmd = Command::new("sh");
|
417
|
+
shell_cmd.arg("-c").arg(command.join(" "));
|
418
|
+
shell_cmd
|
419
|
+
} else {
|
177
420
|
let mut cmd = Command::new(&command[0]);
|
178
421
|
if command.len() > 1 {
|
179
422
|
cmd.args(&command[1..]);
|
180
423
|
}
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
eprintln!(
|
189
|
-
"Command {:?} failed: {:?}",
|
190
|
-
command, e
|
191
|
-
);
|
192
|
-
}
|
424
|
+
cmd
|
425
|
+
};
|
426
|
+
|
427
|
+
match cmd.spawn() {
|
428
|
+
Ok(mut child) => {
|
429
|
+
if let Err(e) = child.wait() {
|
430
|
+
error!("Command {:?} failed: {:?}", command, e);
|
193
431
|
}
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
432
|
+
|
433
|
+
if is_restart || is_reload {
|
434
|
+
info!("Itsi command submitted, waiting for parent response");
|
435
|
+
|
436
|
+
// Set auto-recovery timer
|
437
|
+
let restart_state_clone =
|
438
|
+
Arc::clone(&restart_state);
|
439
|
+
let now_clone = now;
|
440
|
+
thread::spawn(move || {
|
441
|
+
thread::sleep(AUTO_RECOVERY_TIMEOUT);
|
442
|
+
let mut state = restart_state_clone.lock();
|
443
|
+
if let Some(restart_time) = *state {
|
444
|
+
if now_clone.duration_since(restart_time)
|
445
|
+
< Duration::from_secs(1)
|
446
|
+
{
|
447
|
+
info!("Auto-recovering from potential restart failure");
|
448
|
+
*state = None;
|
449
|
+
}
|
450
|
+
}
|
451
|
+
});
|
199
452
|
}
|
200
453
|
}
|
454
|
+
Err(e) => {
|
455
|
+
error!(
|
456
|
+
"Failed to execute command {:?}: {:?}",
|
457
|
+
command, e
|
458
|
+
);
|
459
|
+
}
|
201
460
|
}
|
202
|
-
break;
|
203
461
|
}
|
462
|
+
break;
|
204
463
|
}
|
205
464
|
}
|
206
465
|
}
|
207
466
|
}
|
208
|
-
Err(e) =>
|
467
|
+
Err(e) => error!("Watch error: {:?}", e),
|
209
468
|
}
|
210
469
|
}
|
211
470
|
|
212
|
-
// Clean up
|
213
|
-
for group in &groups {
|
214
|
-
watcher
|
215
|
-
.unwatch(&group.base_dir)
|
216
|
-
.expect("Failed to remove watch");
|
217
|
-
}
|
471
|
+
// Clean up
|
218
472
|
drop(watcher);
|
219
473
|
std::process::exit(0);
|
220
474
|
} else {
|
221
|
-
|
222
|
-
|
475
|
+
// Parent process - close the child ends of the pipes
|
476
|
+
let _ = close(child_read_fd.into_raw_fd());
|
477
|
+
let _ = close(child_write_fd.into_raw_fd());
|
478
|
+
|
479
|
+
// Create a paired structure to return
|
480
|
+
let watcher_pipes = WatcherPipes {
|
481
|
+
read_fd: parent_read_fd,
|
482
|
+
write_fd: parent_write_fd,
|
483
|
+
};
|
484
|
+
|
485
|
+
Ok(Some(watcher_pipes))
|
486
|
+
}
|
487
|
+
}
|
488
|
+
|
489
|
+
#[cfg(test)]
|
490
|
+
mod tests {
|
491
|
+
use std::env;
|
492
|
+
|
493
|
+
use super::*;
|
494
|
+
|
495
|
+
#[test]
|
496
|
+
fn test_extract_patterns() {
|
497
|
+
// Save current dir to restore later
|
498
|
+
let original_dir = env::current_dir().unwrap();
|
499
|
+
|
500
|
+
// Create a temp dir and work from there for consistent results
|
501
|
+
let temp_dir = env::temp_dir().join("itsi_test_patterns");
|
502
|
+
let _ = fs::create_dir_all(&temp_dir);
|
503
|
+
env::set_current_dir(&temp_dir).unwrap();
|
504
|
+
|
505
|
+
// Test glob patterns
|
506
|
+
let (base, pattern) = extract_and_canonicalize_base_dir("assets/*/**.tsx");
|
507
|
+
assert!(base.ends_with("assets"));
|
508
|
+
assert_eq!(pattern, "*/**.tsx");
|
509
|
+
|
510
|
+
let (base, pattern) = extract_and_canonicalize_base_dir("./assets/*/**.tsx");
|
511
|
+
assert!(base.ends_with("assets"));
|
512
|
+
assert_eq!(pattern, "*/**.tsx");
|
513
|
+
|
514
|
+
// Test non-glob patterns - exact files should have empty pattern
|
515
|
+
let (base, pattern) = extract_and_canonicalize_base_dir("foo/bar.txt");
|
516
|
+
assert!(base.ends_with("bar.txt"));
|
517
|
+
assert_eq!(pattern, "");
|
518
|
+
|
519
|
+
// Test current directory patterns
|
520
|
+
let (base, pattern) = extract_and_canonicalize_base_dir("*.txt");
|
521
|
+
assert_eq!(base, temp_dir.canonicalize().unwrap());
|
522
|
+
assert_eq!(pattern, "*.txt");
|
523
|
+
|
524
|
+
// Test file in current directory
|
525
|
+
let (base, pattern) = extract_and_canonicalize_base_dir("test.txt");
|
526
|
+
assert!(base.ends_with("test.txt"));
|
527
|
+
assert_eq!(pattern, "");
|
528
|
+
|
529
|
+
// Restore original directory and clean up
|
530
|
+
env::set_current_dir(original_dir).unwrap();
|
531
|
+
let _ = fs::remove_dir_all(&temp_dir);
|
532
|
+
}
|
533
|
+
|
534
|
+
#[test]
|
535
|
+
fn test_watcher_commands() {
|
536
|
+
assert_eq!(serialize_command(WatcherCommand::Stop), 0);
|
537
|
+
assert_eq!(serialize_command(WatcherCommand::ConfigError), 1);
|
538
|
+
assert_eq!(serialize_command(WatcherCommand::Continue), 2);
|
539
|
+
|
540
|
+
assert_eq!(deserialize_command(0), Some(WatcherCommand::Stop));
|
541
|
+
assert_eq!(deserialize_command(1), Some(WatcherCommand::ConfigError));
|
542
|
+
assert_eq!(deserialize_command(2), Some(WatcherCommand::Continue));
|
543
|
+
assert_eq!(deserialize_command(99), None);
|
223
544
|
}
|
224
545
|
}
|
@@ -1,4 +1,4 @@
|
|
1
|
-
use super::file_watcher::{self};
|
1
|
+
use super::file_watcher::{self, WatcherCommand};
|
2
2
|
use crate::{
|
3
3
|
ruby_types::ITSI_SERVER_CONFIG,
|
4
4
|
server::{
|
@@ -9,7 +9,7 @@ use crate::{
|
|
9
9
|
use derive_more::Debug;
|
10
10
|
use itsi_error::ItsiError;
|
11
11
|
use itsi_rb_helpers::{call_with_gvl, print_rb_backtrace, HeapValue};
|
12
|
-
use itsi_tracing::{set_format, set_level, set_target, set_target_filters};
|
12
|
+
use itsi_tracing::{error, set_format, set_level, set_target, set_target_filters};
|
13
13
|
use magnus::{
|
14
14
|
block::Proc,
|
15
15
|
error::Result,
|
@@ -18,12 +18,12 @@ use magnus::{
|
|
18
18
|
};
|
19
19
|
use nix::{
|
20
20
|
fcntl::{fcntl, FcntlArg, FdFlag},
|
21
|
-
unistd::
|
21
|
+
unistd::dup,
|
22
22
|
};
|
23
23
|
use parking_lot::{Mutex, RwLock};
|
24
24
|
use std::{
|
25
25
|
collections::HashMap,
|
26
|
-
os::fd::
|
26
|
+
os::fd::RawFd,
|
27
27
|
path::PathBuf,
|
28
28
|
str::FromStr,
|
29
29
|
sync::{
|
@@ -32,7 +32,7 @@ use std::{
|
|
32
32
|
},
|
33
33
|
time::Duration,
|
34
34
|
};
|
35
|
-
use tracing::
|
35
|
+
use tracing::debug;
|
36
36
|
static DEFAULT_BIND: &str = "http://localhost:3000";
|
37
37
|
static ID_BUILD_CONFIG: LazyId = LazyId::new("build_config");
|
38
38
|
static ID_RELOAD_EXEC: LazyId = LazyId::new("reload_exec");
|
@@ -44,7 +44,7 @@ pub struct ItsiServerConfig {
|
|
44
44
|
pub itsi_config_proc: Arc<Option<HeapValue<Proc>>>,
|
45
45
|
#[debug(skip)]
|
46
46
|
pub server_params: Arc<RwLock<Arc<ServerParams>>>,
|
47
|
-
pub watcher_fd: Arc<Option<
|
47
|
+
pub watcher_fd: Arc<Option<file_watcher::WatcherPipes>>,
|
48
48
|
}
|
49
49
|
|
50
50
|
#[derive(Debug)]
|
@@ -84,7 +84,7 @@ pub struct ServerParams {
|
|
84
84
|
listener_info: Mutex<HashMap<String, i32>>,
|
85
85
|
pub itsi_server_token_preference: ItsiServerTokenPreference,
|
86
86
|
pub preloaded: AtomicBool,
|
87
|
-
socket_opts: SocketOpts,
|
87
|
+
pub socket_opts: SocketOpts,
|
88
88
|
preexisting_listeners: Option<String>,
|
89
89
|
}
|
90
90
|
|
@@ -442,6 +442,10 @@ impl ItsiServerConfig {
|
|
442
442
|
}
|
443
443
|
}
|
444
444
|
|
445
|
+
pub fn use_reuse_port_load_balancing(&self) -> bool {
|
446
|
+
cfg!(target_os = "linux") && self.server_params.read().socket_opts.reuse_port
|
447
|
+
}
|
448
|
+
|
445
449
|
/// Reload
|
446
450
|
pub fn reload(self: Arc<Self>, cluster_worker: bool) -> Result<bool> {
|
447
451
|
let server_params = call_with_gvl(|ruby| {
|
@@ -553,6 +557,9 @@ impl ItsiServerConfig {
|
|
553
557
|
}
|
554
558
|
|
555
559
|
pub fn dup_fds(self: &Arc<Self>) -> Result<()> {
|
560
|
+
// Ensure the watcher is already stopped before duplicating file descriptors
|
561
|
+
// to prevent race conditions between closing the watcher FD and duplicating socket FDs
|
562
|
+
|
556
563
|
let binding = self.server_params.read();
|
557
564
|
let mut listener_info_guard = binding.listener_info.lock();
|
558
565
|
let dupped_fd_map = listener_info_guard
|
@@ -578,8 +585,10 @@ impl ItsiServerConfig {
|
|
578
585
|
}
|
579
586
|
|
580
587
|
pub fn stop_watcher(self: &Arc<Self>) -> Result<()> {
|
581
|
-
if let Some(
|
582
|
-
|
588
|
+
if let Some(pipes) = self.watcher_fd.as_ref() {
|
589
|
+
// Send explicit stop command to the watcher process
|
590
|
+
file_watcher::send_watcher_command(&pipes.write_fd, WatcherCommand::Stop)?;
|
591
|
+
// We don't close the pipes here - they'll be closed when the WatcherPipes is dropped
|
583
592
|
}
|
584
593
|
Ok(())
|
585
594
|
}
|
@@ -594,8 +603,24 @@ impl ItsiServerConfig {
|
|
594
603
|
pub async fn check_config(&self) -> bool {
|
595
604
|
if let Some(errors) = self.get_config_errors().await {
|
596
605
|
Self::print_config_errors(errors);
|
606
|
+
// Notify watcher that config check failed
|
607
|
+
if let Some(pipes) = self.watcher_fd.as_ref() {
|
608
|
+
if let Err(e) =
|
609
|
+
file_watcher::send_watcher_command(&pipes.write_fd, WatcherCommand::ConfigError)
|
610
|
+
{
|
611
|
+
error!("Failed to notify watcher of config error: {}", e);
|
612
|
+
}
|
613
|
+
}
|
597
614
|
return false;
|
598
615
|
}
|
616
|
+
// If we reach here, the config is valid
|
617
|
+
if let Some(pipes) = self.watcher_fd.as_ref() {
|
618
|
+
if let Err(e) =
|
619
|
+
file_watcher::send_watcher_command(&pipes.write_fd, WatcherCommand::Continue)
|
620
|
+
{
|
621
|
+
error!("Failed to notify watcher to continue: {}", e);
|
622
|
+
}
|
623
|
+
}
|
599
624
|
true
|
600
625
|
}
|
601
626
|
|
@@ -609,7 +634,8 @@ impl ItsiServerConfig {
|
|
609
634
|
)
|
610
635
|
})?;
|
611
636
|
|
612
|
-
|
637
|
+
// Make sure we're not calling stop_watcher here to avoid double-stopping
|
638
|
+
// The watcher should be stopped earlier in the restart sequence
|
613
639
|
call_with_gvl(|ruby| -> Result<()> {
|
614
640
|
ruby.get_inner_ref(&ITSI_SERVER_CONFIG)
|
615
641
|
.funcall::<_, _, Value>(*ID_RELOAD_EXEC, (listener_json,))?;
|
@@ -304,16 +304,16 @@ impl Listener {
|
|
304
304
|
connect_tcp_socket(ip, port, &socket_opts).unwrap()
|
305
305
|
}
|
306
306
|
|
307
|
-
pub fn into_tokio_listener(self,
|
307
|
+
pub fn into_tokio_listener(self, should_rebind: bool) -> TokioListener {
|
308
308
|
match self {
|
309
309
|
Listener::Tcp(mut listener) => {
|
310
|
-
if
|
310
|
+
if should_rebind {
|
311
311
|
listener = Listener::rebind_listener(listener);
|
312
312
|
}
|
313
313
|
TokioListener::Tcp(TokioTcpListener::from_std(listener).unwrap())
|
314
314
|
}
|
315
315
|
Listener::TcpTls((mut listener, acceptor)) => {
|
316
|
-
if
|
316
|
+
if should_rebind {
|
317
317
|
listener = Listener::rebind_listener(listener);
|
318
318
|
}
|
319
319
|
TokioListener::TcpTls(
|
@@ -100,9 +100,11 @@ impl ClusterMode {
|
|
100
100
|
LifecycleEvent::Restart => {
|
101
101
|
if self.server_config.check_config().await {
|
102
102
|
self.invoke_hook("before_restart");
|
103
|
+
self.server_config.stop_watcher()?;
|
103
104
|
self.server_config.dup_fds()?;
|
104
105
|
self.shutdown().await.ok();
|
105
106
|
info!("Shutdown complete. Calling reload exec");
|
107
|
+
|
106
108
|
self.server_config.reload_exec()?;
|
107
109
|
}
|
108
110
|
Ok(())
|
@@ -111,8 +113,11 @@ impl ClusterMode {
|
|
111
113
|
if !self.server_config.check_config().await {
|
112
114
|
return Ok(());
|
113
115
|
}
|
116
|
+
|
114
117
|
let should_reexec = self.server_config.clone().reload(true)?;
|
118
|
+
|
115
119
|
if should_reexec {
|
120
|
+
self.server_config.stop_watcher()?;
|
116
121
|
self.server_config.dup_fds()?;
|
117
122
|
self.shutdown().await.ok();
|
118
123
|
self.server_config.reload_exec()?;
|
@@ -321,15 +326,6 @@ impl ClusterMode {
|
|
321
326
|
.iter()
|
322
327
|
.try_for_each(|worker| worker.boot(Arc::clone(&self)))?;
|
323
328
|
|
324
|
-
if cfg!(target_os = "linux") {
|
325
|
-
self.server_config
|
326
|
-
.server_params
|
327
|
-
.write()
|
328
|
-
.listeners
|
329
|
-
.lock()
|
330
|
-
.drain(..);
|
331
|
-
};
|
332
|
-
|
333
329
|
let (sender, mut receiver) = watch::channel(());
|
334
330
|
*CHILD_SIGNAL_SENDER.lock() = Some(sender);
|
335
331
|
|
@@ -262,7 +262,14 @@ impl SingleMode {
|
|
262
262
|
let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
|
263
263
|
let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
|
264
264
|
let monitor_thread = self.clone().start_monitors(thread_workers.clone());
|
265
|
+
|
266
|
+
// If we're on Linux with reuse_port enabled, we can use
|
267
|
+
// kernel level load balancing across processes sharing a port.
|
268
|
+
// To take advantage of this, these forks will rebind to the same port upon boot.
|
269
|
+
// Worker 0 is special (this one just inherits the bind from the master process).
|
265
270
|
let is_zero_worker = self.is_zero_worker();
|
271
|
+
let should_rebind = !is_zero_worker && self.server_config.use_reuse_port_load_balancing();
|
272
|
+
|
266
273
|
if monitor_thread.is_none() {
|
267
274
|
error!("Failed to start monitor thread");
|
268
275
|
return Err(ItsiError::new("Failed to start monitor thread"));
|
@@ -283,7 +290,7 @@ impl SingleMode {
|
|
283
290
|
.listeners
|
284
291
|
.lock()
|
285
292
|
.drain(..)
|
286
|
-
.map(|list| Arc::new(list.into_tokio_listener(
|
293
|
+
.map(|list| Arc::new(list.into_tokio_listener(should_rebind)))
|
287
294
|
.collect::<Vec<_>>();
|
288
295
|
|
289
296
|
tokio_listeners.iter().cloned().for_each(|listener| {
|
@@ -311,7 +318,7 @@ impl SingleMode {
|
|
311
318
|
let mut after_accept_wait: Option<Duration> = None::<Duration>;
|
312
319
|
|
313
320
|
if cfg!(target_os = "macos") {
|
314
|
-
after_accept_wait = if server_params.workers > 1 {
|
321
|
+
after_accept_wait = if server_params.workers > 1 && !(server_params.socket_opts.reuse_port && server_params.socket_opts.reuse_address) {
|
315
322
|
Some(Duration::from_nanos(10 * server_params.workers as u64))
|
316
323
|
} else {
|
317
324
|
None
|
@@ -434,6 +441,7 @@ impl SingleMode {
|
|
434
441
|
if self.is_single_mode() {
|
435
442
|
self.invoke_hook("before_restart");
|
436
443
|
}
|
444
|
+
self.server_config.stop_watcher()?;
|
437
445
|
self.server_config.dup_fds()?;
|
438
446
|
self.server_config.reload_exec()?;
|
439
447
|
Ok(())
|
@@ -1,3 +1,6 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
# typed: true
|
3
|
+
|
1
4
|
module Itsi
|
2
5
|
class Server
|
3
6
|
module Config
|
@@ -55,9 +58,7 @@ module Itsi
|
|
55
58
|
nested_locations: [],
|
56
59
|
middleware_loader: lambda do
|
57
60
|
@options[:nested_locations].each(&:call)
|
58
|
-
|
59
|
-
@middleware[:app] = { app_proc: DEFAULT_APP[]}
|
60
|
-
end
|
61
|
+
@middleware[:app] = { app_proc: DEFAULT_APP[] } unless @middleware[:app] || @middleware[:static_assets]
|
61
62
|
[flatten_routes, Config.errors_to_error_lines(errors)]
|
62
63
|
end
|
63
64
|
}
|
@@ -75,7 +76,7 @@ module Itsi
|
|
75
76
|
define_method(option_name) do |*args, **kwargs, &blk|
|
76
77
|
option.new(self, *args, **kwargs, &blk).build!
|
77
78
|
rescue Exception => e # rubocop:disable Lint/RescueException
|
78
|
-
@errors << [e, e.backtrace.find{|r| !(r =~ /
|
79
|
+
@errors << [e, e.backtrace.find { |r| !(r =~ %r{server/config}) }]
|
79
80
|
end
|
80
81
|
end
|
81
82
|
|
@@ -86,7 +87,7 @@ module Itsi
|
|
86
87
|
rescue Config::Endpoint::InvalidHandlerException => e
|
87
88
|
@errors << [e, "#{e.backtrace[0]}:in #{e.message}"]
|
88
89
|
rescue Exception => e # rubocop:disable Lint/RescueException
|
89
|
-
@errors << [e, e.backtrace.find{|r| !(r =~ /
|
90
|
+
@errors << [e, e.backtrace.find { |r| !(r =~ %r{server/config}) }]
|
90
91
|
end
|
91
92
|
end
|
92
93
|
|
@@ -13,7 +13,7 @@ You can enable several different compression algorithms, and choose to selective
|
|
13
13
|
min_size: 1024 # 1KiB,
|
14
14
|
algorithms: %w[zstd gzip deflate br],
|
15
15
|
compress_streams: true,
|
16
|
-
mime_types: %[all],
|
16
|
+
mime_types: %w[all],
|
17
17
|
level: "fastest"
|
18
18
|
```
|
19
19
|
|
@@ -24,7 +24,7 @@ You can enable several different compression algorithms, and choose to selective
|
|
24
24
|
compress \
|
25
25
|
min_size: 1024 # 1KiB,
|
26
26
|
algorithms: %w[zstd gzip deflate br],
|
27
|
-
mime_types: %[image],
|
27
|
+
mime_types: %w[image],
|
28
28
|
level: "fastest"
|
29
29
|
|
30
30
|
static_assets: \
|
@@ -47,4 +47,4 @@ You can enable several different compression algorithms, and choose to selective
|
|
47
47
|
# Pre-compressed `static_assets`
|
48
48
|
Itsi also supports serving pre-compressed static assets directly from the file-system.
|
49
49
|
This is configured inside the `static_assets` middleware.
|
50
|
-
Go to the [static_assets](/middleware/static_assets
|
50
|
+
Go to the [static_assets](/middleware/static_assets) middleware for more information.
|
@@ -12,7 +12,7 @@ proxy \
|
|
12
12
|
to: "http://backend.example.com/api{path}{query}",
|
13
13
|
backends: ["127.0.0.1:3001", "127.0.0.1:3002"],
|
14
14
|
backend_priority: "round_robin",
|
15
|
-
headers: { "X-Forwarded-For" =>
|
15
|
+
headers: { "X-Forwarded-For" => "{addr}" },
|
16
16
|
verify_ssl: false,
|
17
17
|
timeout: 30,
|
18
18
|
tls_sni: true,
|
@@ -54,7 +54,7 @@ proxy \
|
|
54
54
|
3. **Header Overrides**
|
55
55
|
The `headers` option lets you specify extra or overriding headers. Each header value may be a literal or a string rewrite. For example, overriding `"X-Forwarded-For"` to carry the client’s IP is done by:
|
56
56
|
```ruby
|
57
|
-
{ "X-Forwarded-For" =>
|
57
|
+
{ "X-Forwarded-For" => "{addr}" }
|
58
58
|
```
|
59
59
|
|
60
60
|
4. **Request Forwarding and Error Handling**
|
@@ -2,9 +2,8 @@ module Itsi
|
|
2
2
|
class Server
|
3
3
|
module Config
|
4
4
|
class AutoReloadConfig < Option
|
5
|
-
|
6
5
|
insert_text <<~SNIPPET
|
7
|
-
|
6
|
+
auto_reload_config! # Auto-reload the server configuration each time it changes.
|
8
7
|
SNIPPET
|
9
8
|
|
10
9
|
detail "Auto-reload the server configuration each time it changes."
|
@@ -15,18 +14,20 @@ module Itsi
|
|
15
14
|
|
16
15
|
def build!
|
17
16
|
return if @auto_reloading
|
18
|
-
|
17
|
+
|
18
|
+
src = caller.find { |l| !(l =~ %r{lib/itsi/server/config}) }.split(":").first
|
19
19
|
|
20
20
|
location.instance_eval do
|
21
21
|
return if @auto_reloading
|
22
22
|
|
23
23
|
if @included
|
24
24
|
@included.each do |file|
|
25
|
-
next if
|
25
|
+
next if "#{file}" == src
|
26
|
+
|
26
27
|
if ENV["BUNDLE_BIN_PATH"]
|
27
|
-
watch "#{file}
|
28
|
+
watch "#{file}", [%w[bundle exec itsi restart]]
|
28
29
|
else
|
29
|
-
watch "#{file}
|
30
|
+
watch "#{file}", [%w[itsi restart]]
|
30
31
|
end
|
31
32
|
end
|
32
33
|
end
|
@@ -7,6 +7,7 @@ Use the `include` option to load additional files to be evaluated within the cur
|
|
7
7
|
You can use this option to split a large configuration file into multiple smaller files.
|
8
8
|
|
9
9
|
Files required using `include` are also subject to auto-reloading, when using the [auto_reload_config](/options/auto_reload_config) option.
|
10
|
+
The path of the included file is evaluated relative to the current configuration file.
|
10
11
|
|
11
12
|
## Examples
|
12
13
|
```ruby {filename="Itsi.rb"}
|
@@ -2,37 +2,39 @@ module Itsi
|
|
2
2
|
class Server
|
3
3
|
module Config
|
4
4
|
class Include < Option
|
5
|
-
|
6
5
|
insert_text "include \"${1|other_file|}\" # Include another file to be loaded within the current configuration"
|
7
6
|
|
8
7
|
detail "Include another file to be loaded within the current configuration"
|
9
8
|
|
10
9
|
schema do
|
11
|
-
Type(String)
|
10
|
+
Type(String) & Required()
|
12
11
|
end
|
13
12
|
|
14
13
|
def build!
|
15
|
-
|
14
|
+
caller_location = caller_locations(2, 1).first.path
|
15
|
+
included_file = \
|
16
|
+
if caller_location =~ %r{lib/itsi/server}
|
17
|
+
File.expand_path("#{@params}.rb")
|
18
|
+
else
|
19
|
+
File.expand_path("#{@params}.rb", File.dirname(caller_location))
|
20
|
+
end
|
21
|
+
|
16
22
|
location.instance_eval do
|
17
23
|
@included ||= []
|
18
24
|
@included << included_file
|
19
25
|
|
20
26
|
if @auto_reloading
|
21
27
|
if ENV["BUNDLE_BIN_PATH"]
|
22
|
-
watch "#{included_file}
|
28
|
+
watch "#{included_file}", [%w[bundle exec itsi restart]]
|
23
29
|
else
|
24
|
-
watch "#{included_file}
|
30
|
+
watch "#{included_file}", [%w[itsi restart]]
|
25
31
|
end
|
26
32
|
end
|
27
33
|
end
|
28
34
|
|
29
|
-
|
30
|
-
|
31
|
-
code = IO.read(filename)
|
32
|
-
location.instance_eval(code, filename, 1)
|
33
|
-
|
35
|
+
code = IO.read(included_file)
|
36
|
+
location.instance_eval(code, included_file, 1)
|
34
37
|
end
|
35
|
-
|
36
38
|
end
|
37
39
|
end
|
38
40
|
end
|
@@ -2,17 +2,15 @@ module Itsi
|
|
2
2
|
class Server
|
3
3
|
module Config
|
4
4
|
class ReusePort < Option
|
5
|
-
|
6
5
|
insert_text <<~SNIPPET
|
7
|
-
|
6
|
+
reuse_port ${1|true,false|}
|
8
7
|
SNIPPET
|
9
8
|
|
10
9
|
detail "Configures whether the server should set the reuse_port option on the underlying socket."
|
11
10
|
|
12
11
|
schema do
|
13
|
-
(Bool() & Required()).default(
|
12
|
+
(Bool() & Required()).default(true)
|
14
13
|
end
|
15
|
-
|
16
14
|
end
|
17
15
|
end
|
18
16
|
end
|
data/lib/itsi/server/config.rb
CHANGED
@@ -97,7 +97,7 @@ module Itsi
|
|
97
97
|
errors << [e, e.backtrace[0]]
|
98
98
|
end
|
99
99
|
# If we're just preloading a specific gem group, we'll do that here too
|
100
|
-
when Symbol
|
100
|
+
when Symbol, String
|
101
101
|
Itsi.log_debug("Preloading gem group #{preload}")
|
102
102
|
Bundler.require(preload)
|
103
103
|
end
|
@@ -10,7 +10,7 @@ env = ENV.fetch("APP_ENV") { ENV.fetch("RACK_ENV", "development") }
|
|
10
10
|
|
11
11
|
# Number of worker processes to spawn
|
12
12
|
# If more than 1, Itsi will be booted in Cluster mode
|
13
|
-
workers ENV["ITSI_WORKERS"]&.to_i || env == "development" ? 1 : nil
|
13
|
+
workers ENV["ITSI_WORKERS"]&.to_i || (env == "development" ? 1 : nil)
|
14
14
|
|
15
15
|
# Number of threads to spawn per worker process
|
16
16
|
# For pure CPU bound applicationss, you'll get the best results keeping this number low
|
@@ -27,11 +27,13 @@ threads ENV.fetch("ITSI_THREADS", 3)
|
|
27
27
|
fiber_scheduler nil
|
28
28
|
|
29
29
|
# If you bind to https, without specifying a certificate, Itsi will use a self-signed certificate.
|
30
|
-
# The self-signed certificate will use a CA generated for your
|
30
|
+
# The self-signed certificate will use a CA generated for your
|
31
|
+
# host and stored inside `ITSI_LOCAL_CA_DIR` (Defaults to ~/.itsi)
|
31
32
|
# bind "https://0.0.0.0:3000"
|
32
33
|
# bind "https://0.0.0.0:3000?domains=dev.itsi.fyi"
|
33
34
|
#
|
34
|
-
# If you want to use let's encrypt to generate you a real certificate you
|
35
|
+
# If you want to use let's encrypt to generate you a real certificate you
|
36
|
+
# and pass cert=acme and an acme_email address to generate one.
|
35
37
|
# bind "https://itsi.fyi?cert=acme&acme_email=admin@itsi.fyi"
|
36
38
|
# You can generate certificates for multiple domains at once, by passing a comma-separated list of domains
|
37
39
|
# bind "https://0.0.0.0?domains=foo.itsi.fyi,bar.itsi.fyi&cert=acme&acme_email=admin@itsi.fyi"
|
@@ -68,7 +70,8 @@ preload true
|
|
68
70
|
# all of them at once, if they reach the threshold simultaneously.
|
69
71
|
worker_memory_limit 1024 * 1024 * 1024
|
70
72
|
|
71
|
-
# You can provide an optional block of code to run, when a worker hits its memory threshold
|
73
|
+
# You can provide an optional block of code to run, when a worker hits its memory threshold
|
74
|
+
# (Use this to send yourself an alert,
|
72
75
|
# write metrics to disk etc. etc.)
|
73
76
|
after_memory_limit_reached do |pid|
|
74
77
|
puts "Worker #{pid} has reached its memory threshold and will restart"
|
@@ -85,7 +88,8 @@ after_fork {}
|
|
85
88
|
shutdown_timeout 5
|
86
89
|
|
87
90
|
# Set this to false for application environments that require rack.input to be a rewindable body
|
88
|
-
# (like Rails). For rack applications that can stream inputs, you can set this to true for a more
|
91
|
+
# (like Rails). For rack applications that can stream inputs, you can set this to true for a more
|
92
|
+
# memory-efficient approach.
|
89
93
|
stream_body false
|
90
94
|
|
91
95
|
# OOB GC responses threshold
|
data/lib/itsi/server/version.rb
CHANGED