itsi-scheduler 0.1.5 → 0.1.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of itsi-scheduler might be problematic. Click here for more details.

Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/CODE_OF_CONDUCT.md +7 -0
  3. data/Cargo.lock +83 -22
  4. data/README.md +5 -0
  5. data/_index.md +7 -0
  6. data/ext/itsi_error/src/from.rs +26 -29
  7. data/ext/itsi_error/src/lib.rs +10 -1
  8. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  9. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  10. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  11. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  12. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  13. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  14. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  15. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  16. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  17. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  18. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  19. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  20. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  21. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  22. data/ext/itsi_rb_helpers/Cargo.toml +1 -0
  23. data/ext/itsi_rb_helpers/src/heap_value.rs +18 -0
  24. data/ext/itsi_rb_helpers/src/lib.rs +59 -9
  25. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  26. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  27. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  28. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  29. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  30. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  31. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  32. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  33. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  34. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  35. data/ext/itsi_server/Cargo.lock +2956 -0
  36. data/ext/itsi_server/Cargo.toml +69 -26
  37. data/ext/itsi_server/src/env.rs +43 -0
  38. data/ext/itsi_server/src/lib.rs +81 -75
  39. data/ext/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
  40. data/ext/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +22 -3
  41. data/ext/itsi_server/src/ruby_types/itsi_grpc_request.rs +147 -0
  42. data/ext/itsi_server/src/ruby_types/itsi_grpc_response.rs +19 -0
  43. data/ext/itsi_server/src/ruby_types/itsi_grpc_stream/mod.rs +216 -0
  44. data/ext/itsi_server/src/{request/itsi_request.rs → ruby_types/itsi_http_request.rs} +108 -103
  45. data/ext/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +79 -38
  46. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
  47. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +355 -0
  48. data/ext/itsi_server/src/ruby_types/itsi_server.rs +82 -0
  49. data/ext/itsi_server/src/ruby_types/mod.rs +55 -0
  50. data/ext/itsi_server/src/server/bind.rs +33 -20
  51. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  52. data/ext/itsi_server/src/server/cache_store.rs +74 -0
  53. data/ext/itsi_server/src/server/itsi_service.rs +172 -0
  54. data/ext/itsi_server/src/server/lifecycle_event.rs +3 -0
  55. data/ext/itsi_server/src/server/listener.rs +197 -106
  56. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +153 -0
  57. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +47 -0
  58. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +58 -0
  59. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +82 -0
  60. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +264 -0
  61. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +139 -0
  62. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +300 -0
  63. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +287 -0
  64. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +48 -0
  65. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +127 -0
  66. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +191 -0
  67. data/ext/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +72 -0
  68. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +85 -0
  69. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +195 -0
  70. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  71. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +82 -0
  72. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +216 -0
  73. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +124 -0
  74. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
  75. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +43 -0
  76. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +34 -0
  77. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +93 -0
  78. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +162 -0
  79. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +158 -0
  80. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
  81. data/ext/itsi_server/src/server/middleware_stack/mod.rs +315 -0
  82. data/ext/itsi_server/src/server/mod.rs +8 -1
  83. data/ext/itsi_server/src/server/process_worker.rs +44 -11
  84. data/ext/itsi_server/src/server/rate_limiter.rs +565 -0
  85. data/ext/itsi_server/src/server/request_job.rs +11 -0
  86. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +129 -46
  87. data/ext/itsi_server/src/server/serve_strategy/mod.rs +9 -6
  88. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +337 -163
  89. data/ext/itsi_server/src/server/signal.rs +25 -2
  90. data/ext/itsi_server/src/server/static_file_server.rs +984 -0
  91. data/ext/itsi_server/src/server/thread_worker.rs +164 -88
  92. data/ext/itsi_server/src/server/tls/locked_dir_cache.rs +55 -17
  93. data/ext/itsi_server/src/server/tls.rs +104 -28
  94. data/ext/itsi_server/src/server/types.rs +43 -0
  95. data/ext/itsi_tracing/Cargo.toml +1 -0
  96. data/ext/itsi_tracing/src/lib.rs +222 -34
  97. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  98. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  99. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  100. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  101. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  102. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  103. data/lib/itsi/scheduler/version.rb +1 -1
  104. data/lib/itsi/scheduler.rb +2 -2
  105. metadata +79 -14
  106. data/ext/itsi_server/extconf.rb +0 -6
  107. data/ext/itsi_server/src/body_proxy/mod.rs +0 -2
  108. data/ext/itsi_server/src/request/mod.rs +0 -1
  109. data/ext/itsi_server/src/response/mod.rs +0 -1
  110. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -13
  111. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -5
  112. data/ext/itsi_server/src/server/itsi_server.rs +0 -244
@@ -1,9 +1,8 @@
1
- use super::itsi_server::RequestJob;
2
- use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
1
+ use async_channel::Sender;
3
2
  use itsi_rb_helpers::{
4
3
  call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
5
4
  };
6
- use itsi_tracing::{debug, error, info, warn};
5
+ use itsi_tracing::{debug, error, warn};
7
6
  use magnus::{
8
7
  error::Result,
9
8
  value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
@@ -15,25 +14,34 @@ use std::{
15
14
  num::NonZeroU8,
16
15
  ops::Deref,
17
16
  sync::{
18
- atomic::{AtomicBool, Ordering},
17
+ atomic::{AtomicBool, AtomicU64, Ordering},
19
18
  Arc,
20
19
  },
21
20
  thread,
22
- time::{Duration, Instant},
21
+ time::{Duration, Instant, SystemTime, UNIX_EPOCH},
23
22
  };
24
23
  use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
25
24
  use tracing::instrument;
25
+
26
+ use crate::ruby_types::{
27
+ itsi_grpc_request::ItsiGrpcRequest, itsi_http_request::ItsiHttpRequest,
28
+ itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
29
+ };
30
+
31
+ use super::request_job::RequestJob;
26
32
  pub struct ThreadWorker {
27
- pub id: String,
28
- pub app: Opaque<Value>,
33
+ pub params: Arc<ServerParams>,
34
+ pub id: u8,
35
+ pub name: String,
36
+ pub request_id: AtomicU64,
37
+ pub current_request_start: AtomicU64,
29
38
  pub receiver: Arc<async_channel::Receiver<RequestJob>>,
30
- pub sender: async_channel::Sender<RequestJob>,
39
+ pub sender: Sender<RequestJob>,
31
40
  pub thread: RwLock<Option<HeapValue<Thread>>>,
32
41
  pub terminated: Arc<AtomicBool>,
33
42
  pub scheduler_class: Option<Opaque<Value>>,
34
43
  }
35
44
 
36
- static ID_CALL: LazyId = LazyId::new("call");
37
45
  static ID_ALIVE: LazyId = LazyId::new("alive?");
38
46
  static ID_SCHEDULER: LazyId = LazyId::new("scheduler");
39
47
  static ID_SCHEDULE: LazyId = LazyId::new("schedule");
@@ -47,26 +55,26 @@ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
47
55
  });
48
56
 
49
57
  pub struct TerminateWakerSignal(bool);
58
+ type ThreadWorkerBuildResult = Result<(Arc<Vec<Arc<ThreadWorker>>>, Sender<RequestJob>)>;
50
59
 
51
- #[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
60
+ #[instrument(name = "boot", parent=None, skip(params, threads, pid))]
52
61
  pub fn build_thread_workers(
62
+ params: Arc<ServerParams>,
53
63
  pid: Pid,
54
64
  threads: NonZeroU8,
55
- app: Opaque<Value>,
56
- scheduler_class: Option<String>,
57
- ) -> Result<(Arc<Vec<ThreadWorker>>, async_channel::Sender<RequestJob>)> {
58
- let (sender, receiver) = async_channel::bounded(20);
65
+ ) -> ThreadWorkerBuildResult {
66
+ let (sender, receiver) = async_channel::bounded((threads.get() as u16 * 30) as usize);
59
67
  let receiver_ref = Arc::new(receiver);
60
68
  let sender_ref = sender;
61
- let (app, scheduler_class) = load_app(app, scheduler_class)?;
69
+ let scheduler_class = load_scheduler_class(params.scheduler_class.clone())?;
62
70
  Ok((
63
71
  Arc::new(
64
72
  (1..=u8::from(threads))
65
73
  .map(|id| {
66
- info!(pid = pid.as_raw(), id, "Thread");
67
74
  ThreadWorker::new(
75
+ params.clone(),
76
+ id,
68
77
  format!("{:?}#{:?}", pid, id),
69
- app,
70
78
  receiver_ref.clone(),
71
79
  sender_ref.clone(),
72
80
  scheduler_class,
@@ -78,16 +86,8 @@ pub fn build_thread_workers(
78
86
  ))
79
87
  }
80
88
 
81
- pub fn load_app(
82
- app: Opaque<Value>,
83
- scheduler_class: Option<String>,
84
- ) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
89
+ pub fn load_scheduler_class(scheduler_class: Option<String>) -> Result<Option<Opaque<Value>>> {
85
90
  call_with_gvl(|ruby| {
86
- let app = app.get_inner_with(&ruby);
87
- let app = Opaque::from(
88
- app.funcall::<_, _, Value>(*ID_CALL, ())
89
- .expect("Couldn't load app"),
90
- );
91
91
  let scheduler_class = if let Some(scheduler_class) = scheduler_class {
92
92
  Some(Opaque::from(
93
93
  ruby.module_kernel()
@@ -96,76 +96,75 @@ pub fn load_app(
96
96
  } else {
97
97
  None
98
98
  };
99
- Ok((app, scheduler_class))
99
+ Ok(scheduler_class)
100
100
  })
101
101
  }
102
102
  impl ThreadWorker {
103
103
  pub fn new(
104
- id: String,
105
- app: Opaque<Value>,
104
+ params: Arc<ServerParams>,
105
+ id: u8,
106
+ name: String,
106
107
  receiver: Arc<async_channel::Receiver<RequestJob>>,
107
- sender: async_channel::Sender<RequestJob>,
108
+ sender: Sender<RequestJob>,
108
109
  scheduler_class: Option<Opaque<Value>>,
109
- ) -> Result<Self> {
110
- let mut worker = Self {
110
+ ) -> Result<Arc<Self>> {
111
+ let worker = Arc::new(Self {
112
+ params,
111
113
  id,
112
- app,
114
+ request_id: AtomicU64::new(0),
115
+ current_request_start: AtomicU64::new(0),
116
+ name,
113
117
  receiver,
114
118
  sender,
115
119
  thread: RwLock::new(None),
116
120
  terminated: Arc::new(AtomicBool::new(false)),
117
121
  scheduler_class,
118
- };
119
- worker.run()?;
122
+ });
123
+ worker.clone().run()?;
120
124
  Ok(worker)
121
125
  }
122
126
 
123
- #[instrument(skip(self), fields(id = self.id))]
124
- pub async fn request_shutdown(&self) {
125
- match self.sender.send(RequestJob::Shutdown).await {
126
- Ok(_) => {}
127
- Err(err) => error!("Failed to send shutdown request: {}", err),
128
- };
129
- info!("Requesting shutdown");
130
- }
131
-
132
127
  #[instrument(skip(self, deadline), fields(id = self.id))]
133
128
  pub fn poll_shutdown(&self, deadline: Instant) -> bool {
134
- call_with_gvl(|_ruby| {
135
- if let Some(thread) = self.thread.read().deref() {
136
- if Instant::now() > deadline {
137
- warn!("Worker shutdown timed out. Killing thread");
138
- self.terminated.store(true, Ordering::SeqCst);
139
- kill_threads(vec![thread.as_value()]);
140
- }
141
- if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
142
- return true;
143
- }
144
- info!("Thread has shut down");
129
+ if let Some(thread) = self.thread.read().deref() {
130
+ if Instant::now() > deadline {
131
+ warn!("Worker shutdown timed out. Killing thread");
132
+ self.terminated.store(true, Ordering::SeqCst);
133
+ kill_threads(vec![thread.as_value()]);
145
134
  }
146
- self.thread.write().take();
135
+ debug!("Checking thread status");
136
+ if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
137
+ return true;
138
+ }
139
+ debug!("Thread has shut down");
140
+ }
141
+ self.thread.write().take();
147
142
 
148
- false
149
- })
143
+ false
150
144
  }
151
145
 
152
- pub fn run(&mut self) -> Result<()> {
153
- let id = self.id.clone();
154
- let app = self.app;
146
+ pub fn run(self: Arc<Self>) -> Result<()> {
147
+ let name = self.name.clone();
155
148
  let receiver = self.receiver.clone();
156
149
  let terminated = self.terminated.clone();
157
150
  let scheduler_class = self.scheduler_class;
151
+ let params = self.params.clone();
152
+ let self_ref = self.clone();
158
153
  call_with_gvl(|_| {
159
154
  *self.thread.write() = Some(
160
155
  create_ruby_thread(move || {
161
156
  if let Some(scheduler_class) = scheduler_class {
162
- if let Err(err) =
163
- Self::fiber_accept_loop(id, app, receiver, scheduler_class, terminated)
164
- {
157
+ if let Err(err) = self_ref.fiber_accept_loop(
158
+ params,
159
+ name,
160
+ receiver,
161
+ scheduler_class,
162
+ terminated,
163
+ ) {
165
164
  error!("Error in fiber_accept_loop: {:?}", err);
166
165
  }
167
166
  } else {
168
- Self::accept_loop(id, app, receiver, terminated);
167
+ self_ref.accept_loop(params, name, receiver, terminated);
169
168
  }
170
169
  })
171
170
  .into(),
@@ -176,11 +175,12 @@ impl ThreadWorker {
176
175
  }
177
176
 
178
177
  pub fn build_scheduler_proc(
179
- app: Opaque<Value>,
178
+ self: Arc<Self>,
180
179
  leader: &Arc<Mutex<Option<RequestJob>>>,
181
180
  receiver: &Arc<async_channel::Receiver<RequestJob>>,
182
181
  terminated: &Arc<AtomicBool>,
183
182
  waker_sender: &watch::Sender<TerminateWakerSignal>,
183
+ oob_gc_responses_threshold: Option<u64>,
184
184
  ) -> magnus::block::Proc {
185
185
  let leader = leader.clone();
186
186
  let receiver = receiver.clone();
@@ -197,6 +197,7 @@ impl ThreadWorker {
197
197
  let receiver = receiver.clone();
198
198
  let terminated = terminated.clone();
199
199
  let waker_sender = waker_sender.clone();
200
+ let self_ref = self.clone();
200
201
  let mut batch = Vec::with_capacity(MAX_BATCH_SIZE as usize);
201
202
 
202
203
  static MAX_BATCH_SIZE: i32 = 25;
@@ -204,8 +205,11 @@ impl ThreadWorker {
204
205
  let mut idle_counter = 0;
205
206
  if let Some(v) = leader_clone.lock().take() {
206
207
  match v {
207
- RequestJob::ProcessRequest(itsi_request) => {
208
- batch.push(RequestJob::ProcessRequest(itsi_request))
208
+ RequestJob::ProcessHttpRequest(itsi_request, app_proc) => {
209
+ batch.push(RequestJob::ProcessHttpRequest(itsi_request, app_proc))
210
+ }
211
+ RequestJob::ProcessGrpcRequest(itsi_request, app_proc) => {
212
+ batch.push(RequestJob::ProcessGrpcRequest(itsi_request, app_proc))
209
213
  }
210
214
  RequestJob::Shutdown => {
211
215
  waker_sender.send(TerminateWakerSignal(true)).unwrap();
@@ -224,12 +228,38 @@ impl ThreadWorker {
224
228
  let shutdown_requested = call_with_gvl(|_| {
225
229
  for req in batch.drain(..) {
226
230
  match req {
227
- RequestJob::ProcessRequest(request) => {
231
+ RequestJob::ProcessHttpRequest(request, app_proc) => {
232
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
233
+ self_ref.current_request_start.store(
234
+ SystemTime::now()
235
+ .duration_since(UNIX_EPOCH)
236
+ .unwrap()
237
+ .as_secs(),
238
+ Ordering::Relaxed,
239
+ );
228
240
  let response = request.response.clone();
229
- if let Err(err) =
230
- server.funcall::<_, _, Value>(*ID_SCHEDULE, (app, request))
231
- {
232
- ItsiRequest::internal_error(ruby, response, err)
241
+ if let Err(err) = server.funcall::<_, _, Value>(
242
+ *ID_SCHEDULE,
243
+ (app_proc.as_value(), request),
244
+ ) {
245
+ ItsiHttpRequest::internal_error(ruby, response, err)
246
+ }
247
+ }
248
+ RequestJob::ProcessGrpcRequest(request, app_proc) => {
249
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
250
+ self_ref.current_request_start.store(
251
+ SystemTime::now()
252
+ .duration_since(UNIX_EPOCH)
253
+ .unwrap()
254
+ .as_secs(),
255
+ Ordering::Relaxed,
256
+ );
257
+ let response = request.stream.clone();
258
+ if let Err(err) = server.funcall::<_, _, Value>(
259
+ *ID_SCHEDULE,
260
+ (app_proc.as_value(), request),
261
+ ) {
262
+ ItsiGrpcRequest::internal_error(ruby, response, err)
233
263
  }
234
264
  }
235
265
  RequestJob::Shutdown => return true,
@@ -244,10 +274,15 @@ impl ThreadWorker {
244
274
  }
245
275
 
246
276
  let yield_result = if receiver.is_empty() {
277
+ let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
278
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
279
+ idle_counter == 0
280
+ } else {
281
+ false
282
+ };
247
283
  waker_sender.send(TerminateWakerSignal(false)).unwrap();
248
- idle_counter = (idle_counter + 1) % 100;
249
284
  call_with_gvl(|ruby| {
250
- if idle_counter == 0 {
285
+ if should_gc {
251
286
  ruby.gc_start();
252
287
  }
253
288
  scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
@@ -263,10 +298,11 @@ impl ThreadWorker {
263
298
  })
264
299
  }
265
300
 
266
- #[instrument(skip_all, fields(thread_worker=id))]
301
+ #[instrument(skip_all, fields(thread_worker=name))]
267
302
  pub fn fiber_accept_loop(
268
- id: String,
269
- app: Opaque<Value>,
303
+ self: Arc<Self>,
304
+ params: Arc<ServerParams>,
305
+ name: String,
270
306
  receiver: Arc<async_channel::Receiver<RequestJob>>,
271
307
  scheduler_class: Opaque<Value>,
272
308
  terminated: Arc<AtomicBool>,
@@ -274,10 +310,15 @@ impl ThreadWorker {
274
310
  let ruby = Ruby::get().unwrap();
275
311
  let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
276
312
  let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
277
- let server = ruby.get_inner(&ITSI_SERVER);
278
- let scheduler_proc =
279
- Self::build_scheduler_proc(app, &leader, &receiver, &terminated, &waker_sender);
280
- let (scheduler, scheduler_fiber) = server.funcall::<_, _, (Value, Value)>(
313
+ let server_class = ruby.get_inner(&ITSI_SERVER);
314
+ let scheduler_proc = self.build_scheduler_proc(
315
+ &leader,
316
+ &receiver,
317
+ &terminated,
318
+ &waker_sender,
319
+ params.oob_gc_responses_threshold,
320
+ );
321
+ let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
281
322
  "start_scheduler_loop",
282
323
  (scheduler_class, scheduler_proc),
283
324
  )?;
@@ -338,25 +379,60 @@ impl ThreadWorker {
338
379
 
339
380
  #[instrument(skip_all, fields(thread_worker=id))]
340
381
  pub fn accept_loop(
382
+ self: Arc<Self>,
383
+ params: Arc<ServerParams>,
341
384
  id: String,
342
- app: Opaque<Value>,
343
385
  receiver: Arc<async_channel::Receiver<RequestJob>>,
344
386
  terminated: Arc<AtomicBool>,
345
387
  ) {
346
388
  let ruby = Ruby::get().unwrap();
347
- let server = ruby.get_inner(&ITSI_SERVER);
389
+ let mut idle_counter = 0;
390
+ let self_ref = self.clone();
348
391
  call_without_gvl(|| loop {
392
+ if receiver.is_empty() {
393
+ if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
394
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
395
+ if idle_counter == 0 {
396
+ call_with_gvl(|_ruby| {
397
+ ruby.gc_start();
398
+ });
399
+ }
400
+ };
401
+ }
349
402
  match receiver.recv_blocking() {
350
- Ok(RequestJob::ProcessRequest(request)) => {
403
+ Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
404
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
405
+ self_ref.current_request_start.store(
406
+ SystemTime::now()
407
+ .duration_since(UNIX_EPOCH)
408
+ .unwrap()
409
+ .as_secs(),
410
+ Ordering::Relaxed,
411
+ );
412
+ call_with_gvl(|_ruby| {
413
+ request.process(&ruby, app_proc).ok();
414
+ });
351
415
  if terminated.load(Ordering::Relaxed) {
352
416
  break;
353
417
  }
418
+ }
419
+ Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
420
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
421
+ self_ref.current_request_start.store(
422
+ SystemTime::now()
423
+ .duration_since(UNIX_EPOCH)
424
+ .unwrap()
425
+ .as_secs(),
426
+ Ordering::Relaxed,
427
+ );
354
428
  call_with_gvl(|_ruby| {
355
- request.process(&ruby, server, app).ok();
356
- })
429
+ request.process(&ruby, app_proc).ok();
430
+ });
431
+ if terminated.load(Ordering::Relaxed) {
432
+ break;
433
+ }
357
434
  }
358
435
  Ok(RequestJob::Shutdown) => {
359
- debug!("Shutting down thread worker");
360
436
  break;
361
437
  }
362
438
  Err(_) => {
@@ -1,34 +1,68 @@
1
1
  use async_trait::async_trait;
2
- use fs2::FileExt; // for lock_exclusive, unlock
3
- use std::fs::OpenOptions;
2
+ use fs2::FileExt;
3
+ use parking_lot::Mutex;
4
+ use std::fs::{self, OpenOptions};
4
5
  use std::io::Error as IoError;
5
6
  use std::path::{Path, PathBuf};
6
7
  use tokio_rustls_acme::caches::DirCache;
7
8
  use tokio_rustls_acme::{AccountCache, CertCache};
8
9
 
10
+ use crate::env::ITSI_ACME_LOCK_FILE_NAME;
11
+
9
12
  /// A wrapper around DirCache that locks a file before writing cert/account data.
10
13
  pub struct LockedDirCache<P: AsRef<Path> + Send + Sync> {
11
14
  inner: DirCache<P>,
12
15
  lock_path: PathBuf,
16
+ current_lock: Mutex<Option<std::fs::File>>,
13
17
  }
14
18
 
15
19
  impl<P: AsRef<Path> + Send + Sync> LockedDirCache<P> {
16
20
  pub fn new(dir: P) -> Self {
17
21
  let dir_path = dir.as_ref().to_path_buf();
18
- let lock_path = dir_path.join(".acme.lock");
22
+ std::fs::create_dir_all(&dir_path).unwrap();
23
+ let lock_path = dir_path.join(&*ITSI_ACME_LOCK_FILE_NAME);
24
+ Self::touch_file(&lock_path).expect("Failed to create lock file");
25
+
19
26
  Self {
20
27
  inner: DirCache::new(dir),
21
28
  lock_path,
29
+ current_lock: Mutex::new(None),
30
+ }
31
+ }
32
+
33
+ fn touch_file(path: &PathBuf) -> std::io::Result<()> {
34
+ if let Some(parent) = path.parent() {
35
+ fs::create_dir_all(parent)?;
22
36
  }
37
+ fs::OpenOptions::new()
38
+ .create(true)
39
+ .write(true)
40
+ .truncate(true)
41
+ .open(path)?;
42
+ Ok(())
23
43
  }
24
44
 
25
- fn lock_exclusive(&self) -> Result<std::fs::File, IoError> {
45
+ fn lock_exclusive(&self) -> Result<(), IoError> {
46
+ if self.current_lock.lock().is_some() {
47
+ return Ok(());
48
+ }
49
+
50
+ if let Some(parent) = self.lock_path.parent() {
51
+ std::fs::create_dir_all(parent)?;
52
+ }
26
53
  let lockfile = OpenOptions::new()
54
+ .create(true)
27
55
  .write(true)
28
56
  .truncate(true)
29
57
  .open(&self.lock_path)?;
30
58
  lockfile.lock_exclusive()?;
31
- Ok(lockfile)
59
+ *self.current_lock.lock() = Some(lockfile);
60
+ Ok(())
61
+ }
62
+
63
+ fn unlock(&self) -> Result<(), IoError> {
64
+ self.current_lock.lock().take();
65
+ Ok(())
32
66
  }
33
67
  }
34
68
 
@@ -41,8 +75,14 @@ impl<P: AsRef<Path> + Send + Sync> CertCache for LockedDirCache<P> {
41
75
  domains: &[String],
42
76
  directory_url: &str,
43
77
  ) -> Result<Option<Vec<u8>>, Self::EC> {
44
- // Just delegate to the inner DirCache
45
- self.inner.load_cert(domains, directory_url).await
78
+ self.lock_exclusive()?;
79
+ let result = self.inner.load_cert(domains, directory_url).await;
80
+
81
+ if let Ok(Some(_)) = result {
82
+ self.unlock()?;
83
+ }
84
+
85
+ result
46
86
  }
47
87
 
48
88
  async fn store_cert(
@@ -52,13 +92,14 @@ impl<P: AsRef<Path> + Send + Sync> CertCache for LockedDirCache<P> {
52
92
  cert: &[u8],
53
93
  ) -> Result<(), Self::EC> {
54
94
  // Acquire the lock before storing
55
- let lockfile = self.lock_exclusive()?;
95
+ self.lock_exclusive()?;
56
96
 
57
97
  // Perform the store operation
58
98
  let result = self.inner.store_cert(domains, directory_url, cert).await;
59
99
 
60
- // Unlock and return
61
- let _ = fs2::FileExt::unlock(&lockfile);
100
+ if let Ok(()) = result {
101
+ self.unlock()?;
102
+ }
62
103
  result
63
104
  }
64
105
  }
@@ -72,6 +113,7 @@ impl<P: AsRef<Path> + Send + Sync> AccountCache for LockedDirCache<P> {
72
113
  contact: &[String],
73
114
  directory_url: &str,
74
115
  ) -> Result<Option<Vec<u8>>, Self::EA> {
116
+ self.lock_exclusive()?;
75
117
  self.inner.load_account(contact, directory_url).await
76
118
  }
77
119
 
@@ -81,14 +123,10 @@ impl<P: AsRef<Path> + Send + Sync> AccountCache for LockedDirCache<P> {
81
123
  directory_url: &str,
82
124
  account: &[u8],
83
125
  ) -> Result<(), Self::EA> {
84
- let lockfile = self.lock_exclusive()?;
126
+ self.lock_exclusive()?;
85
127
 
86
- let result = self
87
- .inner
128
+ self.inner
88
129
  .store_account(contact, directory_url, account)
89
- .await;
90
-
91
- let _ = fs2::FileExt::unlock(&lockfile);
92
- result
130
+ .await
93
131
  }
94
132
  }