itsi-scheduler 0.1.5 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +120 -52
  3. data/README.md +57 -24
  4. data/Rakefile +0 -4
  5. data/ext/itsi_acme/Cargo.toml +86 -0
  6. data/ext/itsi_acme/examples/high_level.rs +63 -0
  7. data/ext/itsi_acme/examples/high_level_warp.rs +52 -0
  8. data/ext/itsi_acme/examples/low_level.rs +87 -0
  9. data/ext/itsi_acme/examples/low_level_axum.rs +66 -0
  10. data/ext/itsi_acme/src/acceptor.rs +81 -0
  11. data/ext/itsi_acme/src/acme.rs +354 -0
  12. data/ext/itsi_acme/src/axum.rs +86 -0
  13. data/ext/itsi_acme/src/cache.rs +39 -0
  14. data/ext/itsi_acme/src/caches/boxed.rs +80 -0
  15. data/ext/itsi_acme/src/caches/composite.rs +69 -0
  16. data/ext/itsi_acme/src/caches/dir.rs +106 -0
  17. data/ext/itsi_acme/src/caches/mod.rs +11 -0
  18. data/ext/itsi_acme/src/caches/no.rs +78 -0
  19. data/ext/itsi_acme/src/caches/test.rs +136 -0
  20. data/ext/itsi_acme/src/config.rs +172 -0
  21. data/ext/itsi_acme/src/https_helper.rs +69 -0
  22. data/ext/itsi_acme/src/incoming.rs +142 -0
  23. data/ext/itsi_acme/src/jose.rs +161 -0
  24. data/ext/itsi_acme/src/lib.rs +142 -0
  25. data/ext/itsi_acme/src/resolver.rs +59 -0
  26. data/ext/itsi_acme/src/state.rs +424 -0
  27. data/ext/itsi_error/Cargo.toml +1 -0
  28. data/ext/itsi_error/src/lib.rs +106 -7
  29. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  30. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  31. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  32. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  33. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  34. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  35. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  36. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  37. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  38. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  39. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  40. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  41. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  42. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  43. data/ext/itsi_rb_helpers/Cargo.toml +1 -0
  44. data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
  45. data/ext/itsi_rb_helpers/src/lib.rs +63 -12
  46. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  47. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  48. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  49. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  50. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  51. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  52. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  53. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  54. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  55. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  56. data/ext/itsi_scheduler/Cargo.toml +1 -1
  57. data/ext/itsi_scheduler/src/itsi_scheduler.rs +9 -3
  58. data/ext/itsi_scheduler/src/lib.rs +1 -0
  59. data/ext/itsi_server/Cargo.lock +2956 -0
  60. data/ext/itsi_server/Cargo.toml +73 -29
  61. data/ext/itsi_server/src/default_responses/mod.rs +11 -0
  62. data/ext/itsi_server/src/env.rs +43 -0
  63. data/ext/itsi_server/src/lib.rs +114 -75
  64. data/ext/itsi_server/src/prelude.rs +2 -0
  65. data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
  66. data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +29 -8
  67. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
  68. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
  69. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +362 -0
  70. data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +84 -40
  71. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +233 -0
  72. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +565 -0
  73. data/ext/itsi_server/src/ruby_types/itsi_server.rs +86 -0
  74. data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
  75. data/ext/itsi_server/src/server/{bind.rs → binds/bind.rs} +59 -24
  76. data/ext/itsi_server/src/server/binds/listener.rs +444 -0
  77. data/ext/itsi_server/src/server/binds/mod.rs +4 -0
  78. data/ext/itsi_server/src/server/{tls → binds/tls}/locked_dir_cache.rs +57 -19
  79. data/ext/itsi_server/src/server/{tls.rs → binds/tls.rs} +120 -31
  80. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  81. data/ext/itsi_server/src/server/http_message_types.rs +97 -0
  82. data/ext/itsi_server/src/server/io_stream.rs +2 -1
  83. data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
  84. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +170 -0
  85. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +63 -0
  86. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +94 -0
  87. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +94 -0
  88. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +343 -0
  89. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +151 -0
  90. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +316 -0
  91. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +301 -0
  92. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +193 -0
  93. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +64 -0
  94. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +192 -0
  95. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +171 -0
  96. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +198 -0
  97. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
  98. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +209 -0
  99. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  100. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
  101. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +116 -0
  102. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +411 -0
  103. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +142 -0
  104. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +55 -0
  105. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +54 -0
  106. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +51 -0
  107. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
  108. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +187 -0
  109. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
  110. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +173 -0
  111. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +31 -0
  112. data/ext/itsi_server/src/server/middleware_stack/mod.rs +381 -0
  113. data/ext/itsi_server/src/server/mod.rs +7 -5
  114. data/ext/itsi_server/src/server/process_worker.rs +65 -14
  115. data/ext/itsi_server/src/server/redirect_type.rs +26 -0
  116. data/ext/itsi_server/src/server/request_job.rs +11 -0
  117. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +150 -50
  118. data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
  119. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +399 -165
  120. data/ext/itsi_server/src/server/signal.rs +33 -26
  121. data/ext/itsi_server/src/server/size_limited_incoming.rs +107 -0
  122. data/ext/itsi_server/src/server/thread_worker.rs +218 -107
  123. data/ext/itsi_server/src/services/cache_store.rs +74 -0
  124. data/ext/itsi_server/src/services/itsi_http_service.rs +257 -0
  125. data/ext/itsi_server/src/services/mime_types.rs +1416 -0
  126. data/ext/itsi_server/src/services/mod.rs +6 -0
  127. data/ext/itsi_server/src/services/password_hasher.rs +83 -0
  128. data/ext/itsi_server/src/services/rate_limiter.rs +580 -0
  129. data/ext/itsi_server/src/services/static_file_server.rs +1340 -0
  130. data/ext/itsi_tracing/Cargo.toml +1 -0
  131. data/ext/itsi_tracing/src/lib.rs +362 -33
  132. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  133. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  134. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  135. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  136. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  137. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  138. data/itsi-scheduler-100.png +0 -0
  139. data/lib/itsi/scheduler/version.rb +1 -1
  140. data/lib/itsi/scheduler.rb +11 -6
  141. metadata +117 -24
  142. data/CHANGELOG.md +0 -5
  143. data/CODE_OF_CONDUCT.md +0 -132
  144. data/LICENSE.txt +0 -21
  145. data/ext/itsi_error/src/from.rs +0 -71
  146. data/ext/itsi_server/extconf.rb +0 -6
  147. data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
  148. data/ext/itsi_server/src/request/itsi_request.rs +0 -277
  149. data/ext/itsi_server/src/request/mod.rs +0 -1
  150. data/ext/itsi_server/src/response/mod.rs +0 -1
  151. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
  152. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
  153. data/ext/itsi_server/src/server/itsi_server.rs +0 -244
  154. data/ext/itsi_server/src/server/listener.rs +0 -327
  155. /data/ext/itsi_server/src/server/{bind_protocol.rs → binds/bind_protocol.rs} +0 -0
@@ -1,42 +1,53 @@
1
1
  use crate::{
2
- request::itsi_request::ItsiRequest,
2
+ ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
3
3
  server::{
4
+ binds::listener::ListenerInfo,
4
5
  io_stream::IoStream,
5
- itsi_server::{RequestJob, Server},
6
6
  lifecycle_event::LifecycleEvent,
7
- listener::{Listener, TokioListener},
7
+ request_job::RequestJob,
8
+ signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
8
9
  thread_worker::{build_thread_workers, ThreadWorker},
9
10
  },
11
+ services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
10
12
  };
11
- use http::Request;
12
- use hyper::{body::Incoming, service::service_fn};
13
13
  use hyper_util::{
14
14
  rt::{TokioExecutor, TokioIo, TokioTimer},
15
15
  server::conn::auto::Builder,
16
16
  };
17
17
  use itsi_error::{ItsiError, Result};
18
+ use itsi_rb_helpers::{
19
+ call_with_gvl, call_without_gvl, create_ruby_thread, funcall_no_ret, print_rb_backtrace,
20
+ };
18
21
  use itsi_tracing::{debug, error, info};
22
+ use magnus::{value::ReprValue, Value};
19
23
  use nix::unistd::Pid;
24
+ use parking_lot::RwLock;
20
25
  use std::{
21
- num::NonZeroU8,
26
+ collections::HashMap,
22
27
  pin::Pin,
23
- sync::Arc,
24
- time::{Duration, Instant},
28
+ sync::{
29
+ atomic::{AtomicBool, Ordering},
30
+ Arc,
31
+ },
32
+ thread::sleep,
33
+ time::{Duration, Instant, SystemTime, UNIX_EPOCH},
25
34
  };
26
35
  use tokio::{
27
36
  runtime::{Builder as RuntimeBuilder, Runtime},
28
- sync::broadcast,
37
+ sync::{
38
+ broadcast,
39
+ watch::{self},
40
+ },
29
41
  task::JoinSet,
30
42
  };
31
43
  use tracing::instrument;
32
44
 
33
45
  pub struct SingleMode {
34
46
  pub executor: Builder<TokioExecutor>,
35
- pub server: Arc<Server>,
36
- pub sender: async_channel::Sender<RequestJob>,
37
- pub(crate) listeners: Arc<Vec<Arc<Listener>>>,
38
- pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
47
+ pub server_config: Arc<ItsiServerConfig>,
39
48
  pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
49
+ pub restart_requested: AtomicBool,
50
+ pub status: RwLock<HashMap<u8, (u64, u64)>>,
40
51
  }
41
52
 
42
53
  pub enum RunningPhase {
@@ -46,202 +57,425 @@ pub enum RunningPhase {
46
57
  }
47
58
 
48
59
  impl SingleMode {
49
- #[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
50
- pub(crate) fn new(
51
- server: Arc<Server>,
52
- listeners: Arc<Vec<Arc<Listener>>>,
53
- lifecycle_channel: broadcast::Sender<LifecycleEvent>,
54
- ) -> Result<Self> {
55
- let (thread_workers, sender) = build_thread_workers(
56
- Pid::this(),
57
- NonZeroU8::try_from(server.threads).unwrap(),
58
- server.app,
59
- server.scheduler_class.clone(),
60
- )?;
60
+ #[instrument(parent=None, skip_all)]
61
+ pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
62
+ server_config.server_params.read().preload_ruby()?;
61
63
  Ok(Self {
62
64
  executor: Builder::new(TokioExecutor::new()),
63
- listeners,
64
- server,
65
- sender,
66
- thread_workers,
67
- lifecycle_channel,
65
+ server_config,
66
+ lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
67
+ restart_requested: AtomicBool::new(false),
68
+ status: RwLock::new(HashMap::new()),
68
69
  })
69
70
  }
70
71
 
71
72
  pub fn build_runtime(&self) -> Runtime {
72
- let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
73
+ let mut builder: RuntimeBuilder = if self
74
+ .server_config
75
+ .server_params
76
+ .read()
77
+ .multithreaded_reactor
78
+ {
79
+ RuntimeBuilder::new_multi_thread()
80
+ } else {
81
+ RuntimeBuilder::new_current_thread()
82
+ };
73
83
  builder
74
84
  .thread_name("itsi-server-accept-loop")
75
85
  .thread_stack_size(3 * 1024 * 1024)
76
- .enable_io()
77
- .enable_time()
86
+ .enable_all()
78
87
  .build()
79
88
  .expect("Failed to build Tokio runtime")
80
89
  }
81
90
 
82
91
  pub fn stop(&self) -> Result<()> {
92
+ SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
93
+ self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
94
+ Ok(())
95
+ }
96
+
97
+ pub async fn print_info(&self, thread_workers: Arc<Vec<Arc<ThreadWorker>>>) -> Result<()> {
98
+ println!(" └─ Worker");
99
+ println!(
100
+ " - binds: {:?}",
101
+ self.server_config.server_params.read().binds
102
+ );
103
+
104
+ println!(
105
+ " ─ streaming body: {:?}",
106
+ self.server_config.server_params.read().streamable_body
107
+ );
108
+ println!(
109
+ " ─ multithreaded runtime: {:?}",
110
+ self.server_config
111
+ .server_params
112
+ .read()
113
+ .multithreaded_reactor
114
+ );
115
+ println!(
116
+ " ─ scheduler: {:?}",
117
+ self.server_config.server_params.read().scheduler_class
118
+ );
119
+ println!(
120
+ " ─ OOB GC Response threadhold: {:?}",
121
+ self.server_config
122
+ .server_params
123
+ .read()
124
+ .oob_gc_responses_threshold
125
+ );
126
+ for worker in thread_workers.iter() {
127
+ println!(" └─ - Thread : {:?}", worker.id);
128
+ println!(" - # Requests Processed: {:?}", worker.request_id);
129
+ println!(
130
+ " - Last Request Started: {:?} ago",
131
+ if worker.current_request_start.load(Ordering::Relaxed) == 0 {
132
+ Duration::from_secs(0)
133
+ } else {
134
+ SystemTime::now()
135
+ .duration_since(
136
+ UNIX_EPOCH
137
+ + Duration::from_secs(
138
+ worker.current_request_start.load(Ordering::Relaxed),
139
+ ),
140
+ )
141
+ .unwrap_or(Duration::from_secs(0))
142
+ }
143
+ );
144
+ call_with_gvl(|_| {
145
+ if let Some(thread) = worker.thread.read().as_ref() {
146
+ if let Ok(backtrace) = thread.funcall::<_, _, Vec<String>>("backtrace", ()) {
147
+ println!(" - Backtrace:");
148
+ for line in backtrace {
149
+ println!(" - {}", line);
150
+ }
151
+ }
152
+ }
153
+ })
154
+ }
155
+
83
156
  Ok(())
84
157
  }
85
158
 
86
- #[instrument(parent=None, skip(self))]
159
+ pub fn start_monitors(
160
+ self: Arc<Self>,
161
+ thread_workers: Arc<Vec<Arc<ThreadWorker>>>,
162
+ ) -> Option<magnus::Thread> {
163
+ call_with_gvl(move |_| {
164
+ create_ruby_thread(move || {
165
+ call_without_gvl(move || {
166
+ let monitor_runtime = RuntimeBuilder::new_current_thread()
167
+ .enable_all()
168
+ .build()
169
+ .unwrap();
170
+ let receiver = self.clone();
171
+ monitor_runtime.block_on({
172
+ let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
173
+ let receiver = receiver.clone();
174
+ let thread_workers = thread_workers.clone();
175
+ async move {
176
+ loop {
177
+ tokio::select! {
178
+ _ = tokio::time::sleep(Duration::from_secs(1)) => {
179
+ let mut status_lock = receiver.status.write();
180
+ thread_workers.iter().for_each(|worker| {
181
+ let worker_entry = status_lock.entry(worker.id);
182
+ let data = (
183
+ worker.request_id.load(Ordering::Relaxed),
184
+ worker.current_request_start.load(Ordering::Relaxed),
185
+ );
186
+ worker_entry.or_insert(data);
187
+ });
188
+ }
189
+ lifecycle_event = lifecycle_rx.recv() => {
190
+ match lifecycle_event {
191
+ Ok(LifecycleEvent::Restart) => {
192
+ receiver.restart().await.ok();
193
+ }
194
+ Ok(LifecycleEvent::Reload) => {
195
+ receiver.reload().await.ok();
196
+ }
197
+ Ok(LifecycleEvent::Shutdown) => {
198
+ break;
199
+ }
200
+ Ok(LifecycleEvent::PrintInfo) => {
201
+ receiver.print_info(thread_workers.clone()).await.ok();
202
+ }
203
+ _ => {}
204
+ }
205
+ }
206
+ }
207
+ }
208
+ }
209
+ })
210
+ })
211
+ })
212
+ })
213
+ }
214
+
215
+ #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
87
216
  pub fn run(self: Arc<Self>) -> Result<()> {
88
217
  let mut listener_task_set = JoinSet::new();
89
- let self_ref = Arc::new(self);
90
- self_ref.build_runtime().block_on(async {
91
-
92
- for listener in self_ref.listeners.clone().iter() {
93
- let listener = Arc::new(listener.to_tokio_listener());
94
- let mut lifecycle_rx = self_ref.lifecycle_channel.subscribe();
95
- let self_ref = self_ref.clone();
96
- let listener = listener.clone();
97
- let (shutdown_sender, mut shutdown_receiver) = tokio::sync::watch::channel::<RunningPhase>(RunningPhase::Running);
98
- let listener_clone = listener.clone();
99
-
100
- tokio::spawn(async move {
101
- listener_clone.spawn_state_task().await;
102
- });
103
-
104
- listener_task_set.spawn(async move {
105
- let strategy = self_ref.clone();
106
- loop {
107
- tokio::select! {
108
- accept_result = listener.accept() => match accept_result {
109
- Ok(accept_result) => {
110
- if let Err(e) = strategy.serve_connection(accept_result, listener.clone(), shutdown_receiver.clone()).await {
111
- error!("Error in serve_connection {:?}", e)
218
+ let runtime = self.build_runtime();
219
+
220
+ let (thread_workers, job_sender, nonblocking_sender) =
221
+ build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
222
+ .inspect_err(|e| {
223
+ if let Some(err_val) = e.value() {
224
+ print_rb_backtrace(err_val);
225
+ }
226
+ })?;
227
+
228
+ info!(
229
+ threads = thread_workers.len(),
230
+ binds = format!("{:?}", self.server_config.server_params.read().binds)
231
+ );
232
+
233
+ let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
234
+ let monitor_thread = self.clone().start_monitors(thread_workers.clone());
235
+ if monitor_thread.is_none() {
236
+ error!("Failed to start monitor thread");
237
+ return Err(ItsiError::new("Failed to start monitor thread"));
238
+ }
239
+ let monitor_thread = monitor_thread.unwrap();
240
+ if SHUTDOWN_REQUESTED.load(Ordering::SeqCst) {
241
+ return Ok(());
242
+ }
243
+ let result = runtime.block_on(
244
+ async {
245
+ let server_params = self.server_config.server_params.read().clone();
246
+ if let Err(err) = server_params.initialize_middleware().await {
247
+ error!("Failed to initialize middleware: {}", err);
248
+ return Err(ItsiError::new("Failed to initialize middleware"))
249
+ }
250
+ let tokio_listeners = server_params.listeners.lock()
251
+ .drain(..)
252
+ .map(|list| {
253
+ Arc::new(list.into_tokio_listener())
254
+ })
255
+ .collect::<Vec<_>>();
256
+
257
+ for listener in tokio_listeners.iter() {
258
+ let mut lifecycle_rx = self.lifecycle_channel.subscribe();
259
+
260
+ let listener_info = Arc::new(listener.listener_info());
261
+ let self_ref = self.clone();
262
+ let listener = listener.clone();
263
+ let shutdown_sender = shutdown_sender.clone();
264
+ let job_sender = job_sender.clone();
265
+ let nonblocking_sender = nonblocking_sender.clone();
266
+ let workers_clone = thread_workers.clone();
267
+ let listener_clone = listener.clone();
268
+ let mut shutdown_receiver = shutdown_sender.subscribe();
269
+ let shutdown_receiver_clone = shutdown_receiver.clone();
270
+ listener_task_set.spawn(async move {
271
+ listener_clone.spawn_state_task(shutdown_receiver_clone).await;
272
+ });
273
+
274
+ listener_task_set.spawn(async move {
275
+ let strategy_clone = self_ref.clone();
276
+ let mut acceptor_task_set = JoinSet::new();
277
+ loop {
278
+ tokio::select! {
279
+ accept_result = listener.accept() => match accept_result {
280
+ Ok(accept_result) => {
281
+ let strategy = strategy_clone.clone();
282
+ let listener_info = listener_info.clone();
283
+ let shutdown_receiver = shutdown_receiver.clone();
284
+ let job_sender = job_sender.clone();
285
+ let nonblocking_sender = nonblocking_sender.clone();
286
+ acceptor_task_set.spawn(async move {
287
+ strategy.serve_connection(accept_result, job_sender, nonblocking_sender, listener_info, shutdown_receiver).await;
288
+ });
289
+ },
290
+ Err(e) => debug!("Listener.accept failed {:?}", e),
291
+ },
292
+ _ = shutdown_receiver.changed() => {
293
+ break;
112
294
  }
113
- },
114
- Err(e) => debug!("Listener.accept failed {:?}", e),
115
- },
116
- _ = shutdown_receiver.changed() => {
117
- break;
295
+ lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
296
+ Ok(LifecycleEvent::Shutdown) => {
297
+ debug!("Received lifecycle event: {:?}", lifecycle_event);
298
+ shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
299
+ tokio::time::sleep(Duration::from_millis(25)).await;
300
+ for _i in 0..workers_clone.len() {
301
+ job_sender.send(RequestJob::Shutdown).await.unwrap();
302
+ nonblocking_sender.send(RequestJob::Shutdown).await.unwrap();
303
+ }
304
+ break;
305
+ },
306
+ Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
307
+ _ => {}
308
+ }
118
309
  }
119
- lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
120
- Ok(lifecycle_event) => {
121
- if let Err(e) = strategy.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
122
- match e {
123
- ItsiError::Break() => break,
124
- _ => error!("Error in handle_lifecycle_event {:?}", e)
125
- }
126
- }
310
+ }
127
311
 
128
- },
129
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
130
- }
312
+ let deadline = Instant::now()
313
+ + Duration::from_secs_f64(self_ref.server_config.server_params.read().shutdown_timeout);
314
+ tokio::select! {
315
+ _ = async {
316
+ while let Some(_res) = acceptor_task_set.join_next().await {}
317
+ } => {},
318
+ _ = tokio::time::sleep_until(tokio::time::Instant::from_std(deadline)) => {},
131
319
  }
132
- }
133
- if let Ok(listener) = Arc::try_unwrap(listener){
134
- listener.unbind();
135
- }
136
- });
320
+ });
137
321
 
138
- }
322
+ }
139
323
 
140
- while let Some(_res) = listener_task_set.join_next().await {}
141
- });
324
+ if self.is_single_mode() {
325
+ self.invoke_hook("after_start");
326
+ }
142
327
 
143
- Ok(())
328
+ while let Some(_res) = listener_task_set.join_next().await {}
329
+
330
+ // Explicitly drop all listeners to ensure file descriptors are released
331
+ drop(tokio_listeners);
332
+
333
+ Ok::<(), ItsiError>(())
334
+ });
335
+
336
+ debug!("Single mode runtime exited.");
337
+
338
+ if result.is_err() {
339
+ for _i in 0..thread_workers.len() {
340
+ job_sender.send_blocking(RequestJob::Shutdown).unwrap();
341
+ nonblocking_sender
342
+ .send_blocking(RequestJob::Shutdown)
343
+ .unwrap();
344
+ }
345
+ self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
346
+ }
347
+
348
+ shutdown_sender.send(RunningPhase::Shutdown).ok();
349
+ let deadline = Instant::now()
350
+ + Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
351
+
352
+ runtime.shutdown_timeout(Duration::from_millis(100));
353
+
354
+ debug!("Shutdown timeout finished.");
355
+ loop {
356
+ if thread_workers
357
+ .iter()
358
+ .all(|worker| call_with_gvl(move |_| !worker.poll_shutdown(deadline)))
359
+ {
360
+ funcall_no_ret(monitor_thread, "join", ()).ok();
361
+ break;
362
+ }
363
+ sleep(Duration::from_millis(50));
364
+ }
365
+
366
+ if self.is_single_mode() {
367
+ self.invoke_hook("before_shutdown");
368
+ }
369
+
370
+ if self.restart_requested.load(Ordering::SeqCst) {
371
+ self.restart_requested.store(false, Ordering::SeqCst);
372
+ info!("Worker restarting");
373
+ self.run()?;
374
+ }
375
+
376
+ debug!("Runtime has shut down");
377
+ result
378
+ }
379
+
380
+ pub fn is_single_mode(&self) -> bool {
381
+ self.server_config.server_params.read().workers == 1
144
382
  }
145
383
 
146
384
  pub(crate) async fn serve_connection(
147
385
  &self,
148
386
  stream: IoStream,
149
- listener: Arc<TokioListener>,
150
- shutdown_channel: tokio::sync::watch::Receiver<RunningPhase>,
151
- ) -> Result<()> {
152
- let sender_clone = self.sender.clone();
387
+ job_sender: async_channel::Sender<RequestJob>,
388
+ nonblocking_sender: async_channel::Sender<RequestJob>,
389
+ listener: Arc<ListenerInfo>,
390
+ shutdown_channel: watch::Receiver<RunningPhase>,
391
+ ) {
153
392
  let addr = stream.addr();
154
393
  let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
155
- let server = self.server.clone();
156
394
  let executor = self.executor.clone();
157
395
  let mut shutdown_channel_clone = shutdown_channel.clone();
158
- tokio::spawn(async move {
159
- let server = server.clone();
160
- let mut executor = executor.clone();
161
- let mut binding = executor.http1();
162
- let shutdown_channel = shutdown_channel_clone.clone();
163
- let mut serve = Box::pin(
164
- binding
165
- .timer(TokioTimer::new())
166
- .header_read_timeout(Duration::from_secs(1))
167
- .serve_connection_with_upgrades(
168
- io,
169
- service_fn(move |hyper_request: Request<Incoming>| {
170
- ItsiRequest::process_request(
171
- hyper_request,
172
- sender_clone.clone(),
173
- server.clone(),
174
- listener.clone(),
175
- addr.clone(),
176
- shutdown_channel.clone(),
177
- )
178
- }),
179
- ),
180
- );
396
+ let mut executor = executor.clone();
397
+ let mut binding = executor.http1();
398
+ let shutdown_channel = shutdown_channel_clone.clone();
181
399
 
182
- tokio::select! {
183
- // Await the connection finishing naturally.
184
- res = &mut serve => {
185
- match res{
186
- Ok(()) => {
187
- debug!("Connection closed normally")
188
- },
189
- Err(res) => {
190
- debug!("Connection finished with error: {:?}", res)
191
- }
192
- }
193
- serve.as_mut().graceful_shutdown();
194
- },
195
- // A lifecycle event triggers shutdown.
196
- _ = shutdown_channel_clone.changed() => {
197
- // Initiate graceful shutdown.
198
- serve.as_mut().graceful_shutdown();
199
- // Now await the connection to finish shutting down.
200
- if let Err(e) = serve.await {
201
- debug!("Connection shutdown error: {:?}", e);
400
+ let service = ItsiHttpService {
401
+ inner: Arc::new(ItsiHttpServiceInner {
402
+ sender: job_sender.clone(),
403
+ nonblocking_sender: nonblocking_sender.clone(),
404
+ server_params: self.server_config.server_params.read().clone(),
405
+ listener,
406
+ addr: addr.to_string(),
407
+ shutdown_channel: shutdown_channel.clone(),
408
+ }),
409
+ };
410
+ let mut serve = Box::pin(
411
+ binding
412
+ .timer(TokioTimer::new())
413
+ .header_read_timeout(self.server_config.server_params.read().header_read_timeout)
414
+ .serve_connection_with_upgrades(io, service),
415
+ );
416
+
417
+ tokio::select! {
418
+ // Await the connection finishing naturally.
419
+ res = &mut serve => {
420
+ match res{
421
+ Ok(()) => {
422
+ debug!("Connection closed normally")
423
+ },
424
+ Err(res) => {
425
+ debug!("Connection closed abruptly: {:?}", res)
202
426
  }
203
427
  }
204
- }
205
- });
206
- Ok(())
207
- }
428
+ serve.as_mut().graceful_shutdown();
429
+ },
430
+ // A lifecycle event triggers shutdown.
431
+ _ = shutdown_channel_clone.changed() => {
432
+ // Initiate graceful shutdown.
433
+ serve.as_mut().graceful_shutdown();
208
434
 
209
- pub async fn handle_lifecycle_event(
210
- &self,
211
- lifecycle_event: LifecycleEvent,
212
- shutdown_sender: tokio::sync::watch::Sender<RunningPhase>,
213
- ) -> Result<()> {
214
- if let LifecycleEvent::Shutdown = lifecycle_event {
215
- shutdown_sender
216
- .send(RunningPhase::ShutdownPending)
217
- .expect("Failed to send shutdown pending signal");
218
- let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
219
- for worker in &*self.thread_workers {
220
- worker.request_shutdown().await;
221
- }
222
- while Instant::now() < deadline {
223
- tokio::time::sleep(Duration::from_millis(50)).await;
224
- let alive_threads = self
225
- .thread_workers
226
- .iter()
227
- .filter(|worker| worker.poll_shutdown(deadline))
228
- .count();
229
- if alive_threads == 0 {
230
- break;
435
+ // Now await the connection to finish shutting down.
436
+ if let Err(e) = serve.await {
437
+ debug!("Connection shutdown error: {:?}", e);
231
438
  }
232
- tokio::time::sleep(Duration::from_millis(200)).await;
233
439
  }
440
+ }
441
+ }
234
442
 
235
- info!("Sending shutdown signal");
236
- shutdown_sender
237
- .send(RunningPhase::Shutdown)
238
- .expect("Failed to send shutdown signal");
239
- self.thread_workers.iter().for_each(|worker| {
240
- worker.poll_shutdown(deadline);
241
- });
443
+ /// Attempts to reload the config "live"
444
+ /// Not that when running in single mode this will not unload
445
+ /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
446
+ pub async fn reload(&self) -> Result<()> {
447
+ if !self.server_config.check_config().await {
448
+ return Ok(());
449
+ }
450
+ let should_reexec = self.server_config.clone().reload(false)?;
451
+ if should_reexec {
452
+ if self.is_single_mode() {
453
+ self.invoke_hook("before_restart");
454
+ }
455
+ self.server_config.dup_fds()?;
456
+ self.server_config.reload_exec()?;
457
+ }
458
+ self.restart_requested.store(true, Ordering::SeqCst);
459
+ self.stop()?;
460
+ self.server_config.server_params.read().preload_ruby()?;
461
+ Ok(())
462
+ }
242
463
 
243
- return Err(ItsiError::Break());
464
+ pub fn invoke_hook(&self, hook_name: &str) {
465
+ if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) {
466
+ call_with_gvl(|_| hook.call::<_, Value>(()).ok());
467
+ }
468
+ }
469
+ /// Restart the server while keeping connections open.
470
+ pub async fn restart(&self) -> Result<()> {
471
+ if !self.server_config.check_config().await {
472
+ return Ok(());
473
+ }
474
+ if self.is_single_mode() {
475
+ self.invoke_hook("before_restart");
244
476
  }
477
+ self.server_config.dup_fds()?;
478
+ self.server_config.reload_exec()?;
245
479
  Ok(())
246
480
  }
247
481
  }