itsi-server 0.1.1 → 0.1.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of itsi-server might be problematic. Click here for more details.

Files changed (143) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -0
  3. data/CODE_OF_CONDUCT.md +7 -0
  4. data/Cargo.lock +4417 -0
  5. data/Cargo.toml +7 -0
  6. data/README.md +4 -0
  7. data/Rakefile +8 -1
  8. data/_index.md +6 -0
  9. data/exe/itsi +94 -45
  10. data/ext/itsi_error/Cargo.toml +2 -0
  11. data/ext/itsi_error/src/from.rs +68 -0
  12. data/ext/itsi_error/src/lib.rs +18 -34
  13. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  14. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  15. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  16. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  17. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  18. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  19. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  20. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  21. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  22. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  23. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  24. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  25. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  26. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  27. data/ext/itsi_instrument_entry/Cargo.toml +15 -0
  28. data/ext/itsi_instrument_entry/src/lib.rs +31 -0
  29. data/ext/itsi_rb_helpers/Cargo.toml +3 -0
  30. data/ext/itsi_rb_helpers/src/heap_value.rs +139 -0
  31. data/ext/itsi_rb_helpers/src/lib.rs +140 -10
  32. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  33. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  34. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  35. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  36. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  37. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  38. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  39. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  40. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  41. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  42. data/ext/itsi_scheduler/Cargo.toml +24 -0
  43. data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
  44. data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
  45. data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
  46. data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
  47. data/ext/itsi_scheduler/src/lib.rs +38 -0
  48. data/ext/itsi_server/Cargo.lock +2956 -0
  49. data/ext/itsi_server/Cargo.toml +73 -13
  50. data/ext/itsi_server/extconf.rb +1 -1
  51. data/ext/itsi_server/src/env.rs +43 -0
  52. data/ext/itsi_server/src/lib.rs +100 -40
  53. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/big_bytes.rs +109 -0
  54. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +141 -0
  55. data/ext/itsi_server/src/ruby_types/itsi_grpc_request.rs +147 -0
  56. data/ext/itsi_server/src/ruby_types/itsi_grpc_response.rs +19 -0
  57. data/ext/itsi_server/src/ruby_types/itsi_grpc_stream/mod.rs +216 -0
  58. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +282 -0
  59. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +388 -0
  60. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
  61. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +355 -0
  62. data/ext/itsi_server/src/ruby_types/itsi_server.rs +82 -0
  63. data/ext/itsi_server/src/ruby_types/mod.rs +55 -0
  64. data/ext/itsi_server/src/server/bind.rs +75 -31
  65. data/ext/itsi_server/src/server/bind_protocol.rs +37 -0
  66. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  67. data/ext/itsi_server/src/server/cache_store.rs +74 -0
  68. data/ext/itsi_server/src/server/io_stream.rs +104 -0
  69. data/ext/itsi_server/src/server/itsi_service.rs +172 -0
  70. data/ext/itsi_server/src/server/lifecycle_event.rs +12 -0
  71. data/ext/itsi_server/src/server/listener.rs +332 -132
  72. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +153 -0
  73. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +47 -0
  74. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +58 -0
  75. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +82 -0
  76. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +321 -0
  77. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +139 -0
  78. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +300 -0
  79. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +287 -0
  80. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +48 -0
  81. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +127 -0
  82. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +191 -0
  83. data/ext/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +72 -0
  84. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +85 -0
  85. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +195 -0
  86. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  87. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +82 -0
  88. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +216 -0
  89. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +124 -0
  90. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
  91. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +43 -0
  92. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +34 -0
  93. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +93 -0
  94. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +162 -0
  95. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +158 -0
  96. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
  97. data/ext/itsi_server/src/server/middleware_stack/mod.rs +315 -0
  98. data/ext/itsi_server/src/server/mod.rs +15 -2
  99. data/ext/itsi_server/src/server/process_worker.rs +229 -0
  100. data/ext/itsi_server/src/server/rate_limiter.rs +565 -0
  101. data/ext/itsi_server/src/server/request_job.rs +11 -0
  102. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +337 -0
  103. data/ext/itsi_server/src/server/serve_strategy/mod.rs +30 -0
  104. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +421 -0
  105. data/ext/itsi_server/src/server/signal.rs +93 -0
  106. data/ext/itsi_server/src/server/static_file_server.rs +984 -0
  107. data/ext/itsi_server/src/server/thread_worker.rs +444 -0
  108. data/ext/itsi_server/src/server/tls/locked_dir_cache.rs +132 -0
  109. data/ext/itsi_server/src/server/tls.rs +187 -60
  110. data/ext/itsi_server/src/server/types.rs +43 -0
  111. data/ext/itsi_tracing/Cargo.toml +5 -0
  112. data/ext/itsi_tracing/src/lib.rs +225 -7
  113. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  114. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  115. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  116. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  117. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  118. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  119. data/lib/itsi/http_request.rb +87 -0
  120. data/lib/itsi/http_response.rb +39 -0
  121. data/lib/itsi/server/Itsi.rb +119 -0
  122. data/lib/itsi/server/config/dsl.rb +506 -0
  123. data/lib/itsi/server/config.rb +131 -0
  124. data/lib/itsi/server/default_app/default_app.rb +38 -0
  125. data/lib/itsi/server/default_app/index.html +91 -0
  126. data/lib/itsi/server/grpc_interface.rb +213 -0
  127. data/lib/itsi/server/rack/handler/itsi.rb +27 -0
  128. data/lib/itsi/server/rack_interface.rb +94 -0
  129. data/lib/itsi/server/scheduler_interface.rb +21 -0
  130. data/lib/itsi/server/scheduler_mode.rb +10 -0
  131. data/lib/itsi/server/signal_trap.rb +29 -0
  132. data/lib/itsi/server/version.rb +1 -1
  133. data/lib/itsi/server.rb +90 -9
  134. data/lib/itsi/standard_headers.rb +86 -0
  135. metadata +122 -31
  136. data/ext/itsi_server/src/request/itsi_request.rs +0 -143
  137. data/ext/itsi_server/src/request/mod.rs +0 -1
  138. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -32
  139. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -52
  140. data/ext/itsi_server/src/server/itsi_server.rs +0 -182
  141. data/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
  142. data/ext/itsi_server/src/stream_writer/mod.rs +0 -21
  143. data/lib/itsi/request.rb +0 -39
@@ -0,0 +1,444 @@
1
+ use async_channel::Sender;
2
+ use itsi_rb_helpers::{
3
+ call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
4
+ };
5
+ use itsi_tracing::{debug, error, warn};
6
+ use magnus::{
7
+ error::Result,
8
+ value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
9
+ Module, RClass, Ruby, Thread, Value,
10
+ };
11
+ use nix::unistd::Pid;
12
+ use parking_lot::{Mutex, RwLock};
13
+ use std::{
14
+ num::NonZeroU8,
15
+ ops::Deref,
16
+ sync::{
17
+ atomic::{AtomicBool, AtomicU64, Ordering},
18
+ Arc,
19
+ },
20
+ thread,
21
+ time::{Duration, Instant, SystemTime, UNIX_EPOCH},
22
+ };
23
+ use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
24
+ use tracing::instrument;
25
+
26
+ use crate::ruby_types::{
27
+ itsi_grpc_request::ItsiGrpcRequest, itsi_http_request::ItsiHttpRequest,
28
+ itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
29
+ };
30
+
31
+ use super::request_job::RequestJob;
32
+ pub struct ThreadWorker {
33
+ pub params: Arc<ServerParams>,
34
+ pub id: u8,
35
+ pub name: String,
36
+ pub request_id: AtomicU64,
37
+ pub current_request_start: AtomicU64,
38
+ pub receiver: Arc<async_channel::Receiver<RequestJob>>,
39
+ pub sender: Sender<RequestJob>,
40
+ pub thread: RwLock<Option<HeapValue<Thread>>>,
41
+ pub terminated: Arc<AtomicBool>,
42
+ pub scheduler_class: Option<Opaque<Value>>,
43
+ }
44
+
45
+ static ID_ALIVE: LazyId = LazyId::new("alive?");
46
+ static ID_SCHEDULER: LazyId = LazyId::new("scheduler");
47
+ static ID_SCHEDULE: LazyId = LazyId::new("schedule");
48
+ static ID_BLOCK: LazyId = LazyId::new("block");
49
+ static ID_YIELD: LazyId = LazyId::new("yield");
50
+ static ID_CONST_GET: LazyId = LazyId::new("const_get");
51
+ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
52
+ ruby.module_kernel()
53
+ .const_get::<_, RClass>("Fiber")
54
+ .unwrap()
55
+ });
56
+
57
+ pub struct TerminateWakerSignal(bool);
58
+ type ThreadWorkerBuildResult = Result<(Arc<Vec<Arc<ThreadWorker>>>, Sender<RequestJob>)>;
59
+
60
+ #[instrument(name = "boot", parent=None, skip(params, threads, pid))]
61
+ pub fn build_thread_workers(
62
+ params: Arc<ServerParams>,
63
+ pid: Pid,
64
+ threads: NonZeroU8,
65
+ ) -> ThreadWorkerBuildResult {
66
+ let (sender, receiver) = async_channel::bounded((threads.get() as u16 * 30) as usize);
67
+ let receiver_ref = Arc::new(receiver);
68
+ let sender_ref = sender;
69
+ let scheduler_class = load_scheduler_class(params.scheduler_class.clone())?;
70
+ Ok((
71
+ Arc::new(
72
+ (1..=u8::from(threads))
73
+ .map(|id| {
74
+ ThreadWorker::new(
75
+ params.clone(),
76
+ id,
77
+ format!("{:?}#{:?}", pid, id),
78
+ receiver_ref.clone(),
79
+ sender_ref.clone(),
80
+ scheduler_class,
81
+ )
82
+ })
83
+ .collect::<Result<Vec<_>>>()?,
84
+ ),
85
+ sender_ref,
86
+ ))
87
+ }
88
+
89
+ pub fn load_scheduler_class(scheduler_class: Option<String>) -> Result<Option<Opaque<Value>>> {
90
+ call_with_gvl(|ruby| {
91
+ let scheduler_class = if let Some(scheduler_class) = scheduler_class {
92
+ Some(Opaque::from(
93
+ ruby.module_kernel()
94
+ .funcall::<_, _, Value>(*ID_CONST_GET, (scheduler_class,))?,
95
+ ))
96
+ } else {
97
+ None
98
+ };
99
+ Ok(scheduler_class)
100
+ })
101
+ }
102
+ impl ThreadWorker {
103
+ pub fn new(
104
+ params: Arc<ServerParams>,
105
+ id: u8,
106
+ name: String,
107
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
108
+ sender: Sender<RequestJob>,
109
+ scheduler_class: Option<Opaque<Value>>,
110
+ ) -> Result<Arc<Self>> {
111
+ let worker = Arc::new(Self {
112
+ params,
113
+ id,
114
+ request_id: AtomicU64::new(0),
115
+ current_request_start: AtomicU64::new(0),
116
+ name,
117
+ receiver,
118
+ sender,
119
+ thread: RwLock::new(None),
120
+ terminated: Arc::new(AtomicBool::new(false)),
121
+ scheduler_class,
122
+ });
123
+ worker.clone().run()?;
124
+ Ok(worker)
125
+ }
126
+
127
+ #[instrument(skip(self, deadline), fields(id = self.id))]
128
+ pub fn poll_shutdown(&self, deadline: Instant) -> bool {
129
+ if let Some(thread) = self.thread.read().deref() {
130
+ if Instant::now() > deadline {
131
+ warn!("Worker shutdown timed out. Killing thread");
132
+ self.terminated.store(true, Ordering::SeqCst);
133
+ kill_threads(vec![thread.as_value()]);
134
+ }
135
+ debug!("Checking thread status");
136
+ if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
137
+ return true;
138
+ }
139
+ debug!("Thread has shut down");
140
+ }
141
+ self.thread.write().take();
142
+
143
+ false
144
+ }
145
+
146
+ pub fn run(self: Arc<Self>) -> Result<()> {
147
+ let name = self.name.clone();
148
+ let receiver = self.receiver.clone();
149
+ let terminated = self.terminated.clone();
150
+ let scheduler_class = self.scheduler_class;
151
+ let params = self.params.clone();
152
+ let self_ref = self.clone();
153
+ call_with_gvl(|_| {
154
+ *self.thread.write() = Some(
155
+ create_ruby_thread(move || {
156
+ if let Some(scheduler_class) = scheduler_class {
157
+ if let Err(err) = self_ref.fiber_accept_loop(
158
+ params,
159
+ name,
160
+ receiver,
161
+ scheduler_class,
162
+ terminated,
163
+ ) {
164
+ error!("Error in fiber_accept_loop: {:?}", err);
165
+ }
166
+ } else {
167
+ self_ref.accept_loop(params, name, receiver, terminated);
168
+ }
169
+ })
170
+ .into(),
171
+ );
172
+ Ok::<(), magnus::Error>(())
173
+ })?;
174
+ Ok(())
175
+ }
176
+
177
+ pub fn build_scheduler_proc(
178
+ self: Arc<Self>,
179
+ leader: &Arc<Mutex<Option<RequestJob>>>,
180
+ receiver: &Arc<async_channel::Receiver<RequestJob>>,
181
+ terminated: &Arc<AtomicBool>,
182
+ waker_sender: &watch::Sender<TerminateWakerSignal>,
183
+ oob_gc_responses_threshold: Option<u64>,
184
+ ) -> magnus::block::Proc {
185
+ let leader = leader.clone();
186
+ let receiver = receiver.clone();
187
+ let terminated = terminated.clone();
188
+ let waker_sender = waker_sender.clone();
189
+ Ruby::get().unwrap().proc_from_fn(move |ruby, _args, _blk| {
190
+ let scheduler = ruby
191
+ .get_inner(&CLASS_FIBER)
192
+ .funcall::<_, _, Value>(*ID_SCHEDULER, ())
193
+ .unwrap();
194
+ let server = ruby.get_inner(&ITSI_SERVER);
195
+ let thread_current = ruby.thread_current();
196
+ let leader_clone = leader.clone();
197
+ let receiver = receiver.clone();
198
+ let terminated = terminated.clone();
199
+ let waker_sender = waker_sender.clone();
200
+ let self_ref = self.clone();
201
+ let mut batch = Vec::with_capacity(MAX_BATCH_SIZE as usize);
202
+
203
+ static MAX_BATCH_SIZE: i32 = 25;
204
+ call_without_gvl(move || loop {
205
+ let mut idle_counter = 0;
206
+ if let Some(v) = leader_clone.lock().take() {
207
+ match v {
208
+ RequestJob::ProcessHttpRequest(itsi_request, app_proc) => {
209
+ batch.push(RequestJob::ProcessHttpRequest(itsi_request, app_proc))
210
+ }
211
+ RequestJob::ProcessGrpcRequest(itsi_request, app_proc) => {
212
+ batch.push(RequestJob::ProcessGrpcRequest(itsi_request, app_proc))
213
+ }
214
+ RequestJob::Shutdown => {
215
+ waker_sender.send(TerminateWakerSignal(true)).unwrap();
216
+ break;
217
+ }
218
+ }
219
+ }
220
+ for _ in 0..MAX_BATCH_SIZE {
221
+ if let Ok(req) = receiver.try_recv() {
222
+ batch.push(req);
223
+ } else {
224
+ break;
225
+ }
226
+ }
227
+
228
+ let shutdown_requested = call_with_gvl(|_| {
229
+ for req in batch.drain(..) {
230
+ match req {
231
+ RequestJob::ProcessHttpRequest(request, app_proc) => {
232
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
233
+ self_ref.current_request_start.store(
234
+ SystemTime::now()
235
+ .duration_since(UNIX_EPOCH)
236
+ .unwrap()
237
+ .as_secs(),
238
+ Ordering::Relaxed,
239
+ );
240
+ let response = request.response.clone();
241
+ if let Err(err) = server.funcall::<_, _, Value>(
242
+ *ID_SCHEDULE,
243
+ (app_proc.as_value(), request),
244
+ ) {
245
+ ItsiHttpRequest::internal_error(ruby, response, err)
246
+ }
247
+ }
248
+ RequestJob::ProcessGrpcRequest(request, app_proc) => {
249
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
250
+ self_ref.current_request_start.store(
251
+ SystemTime::now()
252
+ .duration_since(UNIX_EPOCH)
253
+ .unwrap()
254
+ .as_secs(),
255
+ Ordering::Relaxed,
256
+ );
257
+ let response = request.stream.clone();
258
+ if let Err(err) = server.funcall::<_, _, Value>(
259
+ *ID_SCHEDULE,
260
+ (app_proc.as_value(), request),
261
+ ) {
262
+ ItsiGrpcRequest::internal_error(ruby, response, err)
263
+ }
264
+ }
265
+ RequestJob::Shutdown => return true,
266
+ }
267
+ }
268
+ false
269
+ });
270
+
271
+ if shutdown_requested || terminated.load(Ordering::Relaxed) {
272
+ waker_sender.send(TerminateWakerSignal(true)).unwrap();
273
+ break;
274
+ }
275
+
276
+ let yield_result = if receiver.is_empty() {
277
+ let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
278
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
279
+ idle_counter == 0
280
+ } else {
281
+ false
282
+ };
283
+ waker_sender.send(TerminateWakerSignal(false)).unwrap();
284
+ call_with_gvl(|ruby| {
285
+ if should_gc {
286
+ ruby.gc_start();
287
+ }
288
+ scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
289
+ })
290
+ } else {
291
+ call_with_gvl(|_| scheduler.funcall::<_, _, Value>(*ID_YIELD, ()))
292
+ };
293
+
294
+ if yield_result.is_err() {
295
+ break;
296
+ }
297
+ })
298
+ })
299
+ }
300
+
301
+ #[instrument(skip_all, fields(thread_worker=name))]
302
+ pub fn fiber_accept_loop(
303
+ self: Arc<Self>,
304
+ params: Arc<ServerParams>,
305
+ name: String,
306
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
307
+ scheduler_class: Opaque<Value>,
308
+ terminated: Arc<AtomicBool>,
309
+ ) -> Result<()> {
310
+ let ruby = Ruby::get().unwrap();
311
+ let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
312
+ let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
313
+ let server_class = ruby.get_inner(&ITSI_SERVER);
314
+ let scheduler_proc = self.build_scheduler_proc(
315
+ &leader,
316
+ &receiver,
317
+ &terminated,
318
+ &waker_sender,
319
+ params.oob_gc_responses_threshold,
320
+ );
321
+ let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
322
+ "start_scheduler_loop",
323
+ (scheduler_class, scheduler_proc),
324
+ )?;
325
+ Self::start_waker_thread(
326
+ scheduler.into(),
327
+ scheduler_fiber.into(),
328
+ leader,
329
+ receiver,
330
+ waker_receiver,
331
+ );
332
+ Ok(())
333
+ }
334
+
335
+ #[allow(clippy::await_holding_lock)]
336
+ pub fn start_waker_thread(
337
+ scheduler: Opaque<Value>,
338
+ scheduler_fiber: Opaque<Value>,
339
+ leader: Arc<Mutex<Option<RequestJob>>>,
340
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
341
+ mut waker_receiver: watch::Receiver<TerminateWakerSignal>,
342
+ ) {
343
+ create_ruby_thread(move || {
344
+ let scheduler = scheduler.get_inner_with(&Ruby::get().unwrap());
345
+ let leader = leader.clone();
346
+ call_without_gvl(|| {
347
+ RuntimeBuilder::new_current_thread()
348
+ .build()
349
+ .expect("Failed to build Tokio runtime")
350
+ .block_on(async {
351
+ loop {
352
+ waker_receiver.changed().await.ok();
353
+ if waker_receiver.borrow().0 {
354
+ break;
355
+ }
356
+ tokio::select! {
357
+ _ = waker_receiver.changed() => {
358
+ if waker_receiver.borrow().0 {
359
+ break;
360
+ }
361
+ },
362
+ next_msg = receiver.recv() => {
363
+ *leader.lock() = next_msg.ok();
364
+ call_with_gvl(|_| {
365
+ scheduler
366
+ .funcall::<_, _, Value>(
367
+ "unblock",
368
+ (None::<u8>, scheduler_fiber),
369
+ )
370
+ .ok();
371
+ });
372
+ }
373
+ }
374
+ }
375
+ })
376
+ });
377
+ });
378
+ }
379
+
380
+ #[instrument(skip_all, fields(thread_worker=id))]
381
+ pub fn accept_loop(
382
+ self: Arc<Self>,
383
+ params: Arc<ServerParams>,
384
+ id: String,
385
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
386
+ terminated: Arc<AtomicBool>,
387
+ ) {
388
+ let ruby = Ruby::get().unwrap();
389
+ let mut idle_counter = 0;
390
+ let self_ref = self.clone();
391
+ call_without_gvl(|| loop {
392
+ if receiver.is_empty() {
393
+ if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
394
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
395
+ if idle_counter == 0 {
396
+ call_with_gvl(|_ruby| {
397
+ ruby.gc_start();
398
+ });
399
+ }
400
+ };
401
+ }
402
+ match receiver.recv_blocking() {
403
+ Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
404
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
405
+ self_ref.current_request_start.store(
406
+ SystemTime::now()
407
+ .duration_since(UNIX_EPOCH)
408
+ .unwrap()
409
+ .as_secs(),
410
+ Ordering::Relaxed,
411
+ );
412
+ call_with_gvl(|_ruby| {
413
+ request.process(&ruby, app_proc).ok();
414
+ });
415
+ if terminated.load(Ordering::Relaxed) {
416
+ break;
417
+ }
418
+ }
419
+ Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
420
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
421
+ self_ref.current_request_start.store(
422
+ SystemTime::now()
423
+ .duration_since(UNIX_EPOCH)
424
+ .unwrap()
425
+ .as_secs(),
426
+ Ordering::Relaxed,
427
+ );
428
+ call_with_gvl(|_ruby| {
429
+ request.process(&ruby, app_proc).ok();
430
+ });
431
+ if terminated.load(Ordering::Relaxed) {
432
+ break;
433
+ }
434
+ }
435
+ Ok(RequestJob::Shutdown) => {
436
+ break;
437
+ }
438
+ Err(_) => {
439
+ thread::sleep(Duration::from_micros(1));
440
+ }
441
+ }
442
+ });
443
+ }
444
+ }
@@ -0,0 +1,132 @@
1
+ use async_trait::async_trait;
2
+ use fs2::FileExt;
3
+ use parking_lot::Mutex;
4
+ use std::fs::{self, OpenOptions};
5
+ use std::io::Error as IoError;
6
+ use std::path::{Path, PathBuf};
7
+ use tokio_rustls_acme::caches::DirCache;
8
+ use tokio_rustls_acme::{AccountCache, CertCache};
9
+
10
+ use crate::env::ITSI_ACME_LOCK_FILE_NAME;
11
+
12
+ /// A wrapper around DirCache that locks a file before writing cert/account data.
13
+ pub struct LockedDirCache<P: AsRef<Path> + Send + Sync> {
14
+ inner: DirCache<P>,
15
+ lock_path: PathBuf,
16
+ current_lock: Mutex<Option<std::fs::File>>,
17
+ }
18
+
19
+ impl<P: AsRef<Path> + Send + Sync> LockedDirCache<P> {
20
+ pub fn new(dir: P) -> Self {
21
+ let dir_path = dir.as_ref().to_path_buf();
22
+ std::fs::create_dir_all(&dir_path).unwrap();
23
+ let lock_path = dir_path.join(&*ITSI_ACME_LOCK_FILE_NAME);
24
+ Self::touch_file(&lock_path).expect("Failed to create lock file");
25
+
26
+ Self {
27
+ inner: DirCache::new(dir),
28
+ lock_path,
29
+ current_lock: Mutex::new(None),
30
+ }
31
+ }
32
+
33
+ fn touch_file(path: &PathBuf) -> std::io::Result<()> {
34
+ if let Some(parent) = path.parent() {
35
+ fs::create_dir_all(parent)?;
36
+ }
37
+ fs::OpenOptions::new()
38
+ .create(true)
39
+ .write(true)
40
+ .truncate(true)
41
+ .open(path)?;
42
+ Ok(())
43
+ }
44
+
45
+ fn lock_exclusive(&self) -> Result<(), IoError> {
46
+ if self.current_lock.lock().is_some() {
47
+ return Ok(());
48
+ }
49
+
50
+ if let Some(parent) = self.lock_path.parent() {
51
+ std::fs::create_dir_all(parent)?;
52
+ }
53
+ let lockfile = OpenOptions::new()
54
+ .create(true)
55
+ .write(true)
56
+ .truncate(true)
57
+ .open(&self.lock_path)?;
58
+ lockfile.lock_exclusive()?;
59
+ *self.current_lock.lock() = Some(lockfile);
60
+ Ok(())
61
+ }
62
+
63
+ fn unlock(&self) -> Result<(), IoError> {
64
+ self.current_lock.lock().take();
65
+ Ok(())
66
+ }
67
+ }
68
+
69
+ #[async_trait]
70
+ impl<P: AsRef<Path> + Send + Sync> CertCache for LockedDirCache<P> {
71
+ type EC = IoError;
72
+
73
+ async fn load_cert(
74
+ &self,
75
+ domains: &[String],
76
+ directory_url: &str,
77
+ ) -> Result<Option<Vec<u8>>, Self::EC> {
78
+ self.lock_exclusive()?;
79
+ let result = self.inner.load_cert(domains, directory_url).await;
80
+
81
+ if let Ok(Some(_)) = result {
82
+ self.unlock()?;
83
+ }
84
+
85
+ result
86
+ }
87
+
88
+ async fn store_cert(
89
+ &self,
90
+ domains: &[String],
91
+ directory_url: &str,
92
+ cert: &[u8],
93
+ ) -> Result<(), Self::EC> {
94
+ // Acquire the lock before storing
95
+ self.lock_exclusive()?;
96
+
97
+ // Perform the store operation
98
+ let result = self.inner.store_cert(domains, directory_url, cert).await;
99
+
100
+ if let Ok(()) = result {
101
+ self.unlock()?;
102
+ }
103
+ result
104
+ }
105
+ }
106
+
107
+ #[async_trait]
108
+ impl<P: AsRef<Path> + Send + Sync> AccountCache for LockedDirCache<P> {
109
+ type EA = IoError;
110
+
111
+ async fn load_account(
112
+ &self,
113
+ contact: &[String],
114
+ directory_url: &str,
115
+ ) -> Result<Option<Vec<u8>>, Self::EA> {
116
+ self.lock_exclusive()?;
117
+ self.inner.load_account(contact, directory_url).await
118
+ }
119
+
120
+ async fn store_account(
121
+ &self,
122
+ contact: &[String],
123
+ directory_url: &str,
124
+ account: &[u8],
125
+ ) -> Result<(), Self::EA> {
126
+ self.lock_exclusive()?;
127
+
128
+ self.inner
129
+ .store_account(contact, directory_url, account)
130
+ .await
131
+ }
132
+ }