itsi-server 0.1.1 → 0.1.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -0
  3. data/CODE_OF_CONDUCT.md +7 -0
  4. data/Cargo.lock +3937 -0
  5. data/Cargo.toml +7 -0
  6. data/README.md +4 -0
  7. data/Rakefile +8 -1
  8. data/_index.md +6 -0
  9. data/exe/itsi +141 -46
  10. data/ext/itsi_error/Cargo.toml +3 -0
  11. data/ext/itsi_error/src/lib.rs +98 -24
  12. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  13. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  14. data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  15. data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  16. data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
  17. data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
  18. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
  19. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
  20. data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
  21. data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
  22. data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
  23. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
  24. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
  25. data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
  26. data/ext/itsi_instrument_entry/Cargo.toml +15 -0
  27. data/ext/itsi_instrument_entry/src/lib.rs +31 -0
  28. data/ext/itsi_rb_helpers/Cargo.toml +3 -0
  29. data/ext/itsi_rb_helpers/src/heap_value.rs +139 -0
  30. data/ext/itsi_rb_helpers/src/lib.rs +140 -10
  31. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
  32. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
  33. data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
  34. data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
  35. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
  36. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
  37. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
  38. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
  39. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
  40. data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
  41. data/ext/itsi_scheduler/Cargo.toml +24 -0
  42. data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
  43. data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
  44. data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
  45. data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
  46. data/ext/itsi_scheduler/src/lib.rs +38 -0
  47. data/ext/itsi_server/Cargo.lock +2956 -0
  48. data/ext/itsi_server/Cargo.toml +72 -14
  49. data/ext/itsi_server/extconf.rb +1 -1
  50. data/ext/itsi_server/src/default_responses/html/401.html +68 -0
  51. data/ext/itsi_server/src/default_responses/html/403.html +68 -0
  52. data/ext/itsi_server/src/default_responses/html/404.html +68 -0
  53. data/ext/itsi_server/src/default_responses/html/413.html +71 -0
  54. data/ext/itsi_server/src/default_responses/html/429.html +68 -0
  55. data/ext/itsi_server/src/default_responses/html/500.html +71 -0
  56. data/ext/itsi_server/src/default_responses/html/502.html +71 -0
  57. data/ext/itsi_server/src/default_responses/html/503.html +68 -0
  58. data/ext/itsi_server/src/default_responses/html/504.html +69 -0
  59. data/ext/itsi_server/src/default_responses/html/index.html +238 -0
  60. data/ext/itsi_server/src/default_responses/json/401.json +6 -0
  61. data/ext/itsi_server/src/default_responses/json/403.json +6 -0
  62. data/ext/itsi_server/src/default_responses/json/404.json +6 -0
  63. data/ext/itsi_server/src/default_responses/json/413.json +6 -0
  64. data/ext/itsi_server/src/default_responses/json/429.json +6 -0
  65. data/ext/itsi_server/src/default_responses/json/500.json +6 -0
  66. data/ext/itsi_server/src/default_responses/json/502.json +6 -0
  67. data/ext/itsi_server/src/default_responses/json/503.json +6 -0
  68. data/ext/itsi_server/src/default_responses/json/504.json +6 -0
  69. data/ext/itsi_server/src/default_responses/mod.rs +11 -0
  70. data/ext/itsi_server/src/env.rs +43 -0
  71. data/ext/itsi_server/src/lib.rs +132 -40
  72. data/ext/itsi_server/src/prelude.rs +2 -0
  73. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/big_bytes.rs +109 -0
  74. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +143 -0
  75. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +344 -0
  76. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +264 -0
  77. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +345 -0
  78. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +391 -0
  79. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
  80. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +375 -0
  81. data/ext/itsi_server/src/ruby_types/itsi_server.rs +83 -0
  82. data/ext/itsi_server/src/ruby_types/mod.rs +48 -0
  83. data/ext/itsi_server/src/server/binds/bind.rs +201 -0
  84. data/ext/itsi_server/src/server/binds/bind_protocol.rs +37 -0
  85. data/ext/itsi_server/src/server/binds/listener.rs +432 -0
  86. data/ext/itsi_server/src/server/binds/mod.rs +4 -0
  87. data/ext/itsi_server/src/server/binds/tls/locked_dir_cache.rs +132 -0
  88. data/ext/itsi_server/src/server/binds/tls.rs +270 -0
  89. data/ext/itsi_server/src/server/byte_frame.rs +32 -0
  90. data/ext/itsi_server/src/server/http_message_types.rs +97 -0
  91. data/ext/itsi_server/src/server/io_stream.rs +105 -0
  92. data/ext/itsi_server/src/server/lifecycle_event.rs +12 -0
  93. data/ext/itsi_server/src/server/middleware_stack/middleware.rs +165 -0
  94. data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +56 -0
  95. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +87 -0
  96. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +86 -0
  97. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +285 -0
  98. data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +142 -0
  99. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +289 -0
  100. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +292 -0
  101. data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +55 -0
  102. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +190 -0
  103. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +157 -0
  104. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +195 -0
  105. data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +82 -0
  106. data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +201 -0
  107. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
  108. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +47 -0
  109. data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +87 -0
  110. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +414 -0
  111. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +131 -0
  112. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
  113. data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +44 -0
  114. data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +36 -0
  115. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +126 -0
  116. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +180 -0
  117. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +55 -0
  118. data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +163 -0
  119. data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
  120. data/ext/itsi_server/src/server/middleware_stack/mod.rs +347 -0
  121. data/ext/itsi_server/src/server/mod.rs +12 -5
  122. data/ext/itsi_server/src/server/process_worker.rs +247 -0
  123. data/ext/itsi_server/src/server/request_job.rs +11 -0
  124. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +342 -0
  125. data/ext/itsi_server/src/server/serve_strategy/mod.rs +30 -0
  126. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +421 -0
  127. data/ext/itsi_server/src/server/signal.rs +76 -0
  128. data/ext/itsi_server/src/server/size_limited_incoming.rs +101 -0
  129. data/ext/itsi_server/src/server/thread_worker.rs +475 -0
  130. data/ext/itsi_server/src/services/cache_store.rs +74 -0
  131. data/ext/itsi_server/src/services/itsi_http_service.rs +239 -0
  132. data/ext/itsi_server/src/services/mime_types.rs +1416 -0
  133. data/ext/itsi_server/src/services/mod.rs +6 -0
  134. data/ext/itsi_server/src/services/password_hasher.rs +83 -0
  135. data/ext/itsi_server/src/services/rate_limiter.rs +569 -0
  136. data/ext/itsi_server/src/services/static_file_server.rs +1324 -0
  137. data/ext/itsi_tracing/Cargo.toml +5 -0
  138. data/ext/itsi_tracing/src/lib.rs +315 -7
  139. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
  140. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
  141. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
  142. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
  143. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
  144. data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
  145. data/lib/itsi/http_request/response_status_shortcodes.rb +74 -0
  146. data/lib/itsi/http_request.rb +186 -0
  147. data/lib/itsi/http_response.rb +41 -0
  148. data/lib/itsi/passfile.rb +109 -0
  149. data/lib/itsi/server/config/dsl.rb +565 -0
  150. data/lib/itsi/server/config.rb +166 -0
  151. data/lib/itsi/server/default_app/default_app.rb +34 -0
  152. data/lib/itsi/server/default_app/index.html +115 -0
  153. data/lib/itsi/server/default_config/Itsi-rackup.rb +119 -0
  154. data/lib/itsi/server/default_config/Itsi.rb +107 -0
  155. data/lib/itsi/server/grpc/grpc_call.rb +246 -0
  156. data/lib/itsi/server/grpc/grpc_interface.rb +100 -0
  157. data/lib/itsi/server/grpc/reflection/v1/reflection_pb.rb +26 -0
  158. data/lib/itsi/server/grpc/reflection/v1/reflection_services_pb.rb +122 -0
  159. data/lib/itsi/server/rack/handler/itsi.rb +27 -0
  160. data/lib/itsi/server/rack_interface.rb +94 -0
  161. data/lib/itsi/server/route_tester.rb +107 -0
  162. data/lib/itsi/server/scheduler_interface.rb +21 -0
  163. data/lib/itsi/server/scheduler_mode.rb +10 -0
  164. data/lib/itsi/server/signal_trap.rb +29 -0
  165. data/lib/itsi/server/typed_handlers/param_parser.rb +200 -0
  166. data/lib/itsi/server/typed_handlers/source_parser.rb +55 -0
  167. data/lib/itsi/server/typed_handlers.rb +17 -0
  168. data/lib/itsi/server/version.rb +1 -1
  169. data/lib/itsi/server.rb +160 -9
  170. data/lib/itsi/standard_headers.rb +86 -0
  171. data/lib/ruby_lsp/itsi/addon.rb +111 -0
  172. data/lib/shell_completions/completions.rb +26 -0
  173. metadata +182 -25
  174. data/ext/itsi_server/src/request/itsi_request.rs +0 -143
  175. data/ext/itsi_server/src/request/mod.rs +0 -1
  176. data/ext/itsi_server/src/server/bind.rs +0 -138
  177. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -32
  178. data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -52
  179. data/ext/itsi_server/src/server/itsi_server.rs +0 -182
  180. data/ext/itsi_server/src/server/listener.rs +0 -218
  181. data/ext/itsi_server/src/server/tls.rs +0 -138
  182. data/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
  183. data/ext/itsi_server/src/stream_writer/mod.rs +0 -21
  184. data/lib/itsi/request.rb +0 -39
@@ -0,0 +1,475 @@
1
+ use async_channel::Sender;
2
+ use itsi_rb_helpers::{
3
+ call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
4
+ };
5
+ use itsi_tracing::{debug, error, warn};
6
+ use magnus::{
7
+ error::Result,
8
+ value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
9
+ Module, RClass, Ruby, Thread, Value,
10
+ };
11
+ use nix::unistd::Pid;
12
+ use parking_lot::{Mutex, RwLock};
13
+ use std::{
14
+ ops::Deref,
15
+ sync::{
16
+ atomic::{AtomicBool, AtomicU64, Ordering},
17
+ Arc,
18
+ },
19
+ thread,
20
+ time::{Duration, Instant, SystemTime, UNIX_EPOCH},
21
+ };
22
+ use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
23
+ use tracing::instrument;
24
+
25
+ use crate::ruby_types::{
26
+ itsi_grpc_call::ItsiGrpcCall, itsi_http_request::ItsiHttpRequest,
27
+ itsi_server::itsi_server_config::ServerParams, ITSI_SERVER,
28
+ };
29
+
30
+ use super::request_job::RequestJob;
31
+ pub struct ThreadWorker {
32
+ pub params: Arc<ServerParams>,
33
+ pub id: u8,
34
+ pub name: String,
35
+ pub request_id: AtomicU64,
36
+ pub current_request_start: AtomicU64,
37
+ pub receiver: Arc<async_channel::Receiver<RequestJob>>,
38
+ pub sender: Sender<RequestJob>,
39
+ pub thread: RwLock<Option<HeapValue<Thread>>>,
40
+ pub terminated: Arc<AtomicBool>,
41
+ pub scheduler_class: Option<Opaque<Value>>,
42
+ }
43
+
44
+ static ID_ALIVE: LazyId = LazyId::new("alive?");
45
+ static ID_SCHEDULER: LazyId = LazyId::new("scheduler");
46
+ static ID_SCHEDULE: LazyId = LazyId::new("schedule");
47
+ static ID_BLOCK: LazyId = LazyId::new("block");
48
+ static ID_YIELD: LazyId = LazyId::new("yield");
49
+ static ID_CONST_GET: LazyId = LazyId::new("const_get");
50
+ static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
51
+ ruby.module_kernel()
52
+ .const_get::<_, RClass>("Fiber")
53
+ .unwrap()
54
+ });
55
+
56
+ pub struct TerminateWakerSignal(bool);
57
+ type ThreadWorkerBuildResult = Result<(
58
+ Arc<Vec<Arc<ThreadWorker>>>,
59
+ Sender<RequestJob>,
60
+ Sender<RequestJob>,
61
+ )>;
62
+
63
+ #[instrument(name = "boot", parent=None, skip(params, pid))]
64
+ pub fn build_thread_workers(params: Arc<ServerParams>, pid: Pid) -> ThreadWorkerBuildResult {
65
+ let blocking_thread_count = params.threads;
66
+ let nonblocking_thread_count = params.scheduler_threads;
67
+
68
+ let (blocking_sender, blocking_receiver) =
69
+ async_channel::bounded((blocking_thread_count as u16 * 30) as usize);
70
+ let blocking_receiver_ref = Arc::new(blocking_receiver);
71
+ let blocking_sender_ref = blocking_sender;
72
+ let scheduler_class = load_scheduler_class(params.scheduler_class.clone())?;
73
+
74
+ let mut workers = (1..=blocking_thread_count)
75
+ .map(|id| {
76
+ ThreadWorker::new(
77
+ params.clone(),
78
+ id,
79
+ format!("{:?}#{:?}", pid, id),
80
+ blocking_receiver_ref.clone(),
81
+ blocking_sender_ref.clone(),
82
+ if nonblocking_thread_count.is_some() {
83
+ None
84
+ } else {
85
+ scheduler_class
86
+ },
87
+ )
88
+ })
89
+ .collect::<Result<Vec<_>>>()?;
90
+
91
+ let nonblocking_sender_ref = if let (Some(nonblocking_thread_count), Some(scheduler_class)) =
92
+ (nonblocking_thread_count, scheduler_class)
93
+ {
94
+ let (nonblocking_sender, nonblocking_receiver) =
95
+ async_channel::bounded((nonblocking_thread_count as u16 * 30) as usize);
96
+ let nonblocking_receiver_ref = Arc::new(nonblocking_receiver);
97
+ let nonblocking_sender_ref = nonblocking_sender.clone();
98
+ for id in 0..nonblocking_thread_count {
99
+ workers.push(ThreadWorker::new(
100
+ params.clone(),
101
+ id,
102
+ format!("{:?}#{:?}", pid, id),
103
+ nonblocking_receiver_ref.clone(),
104
+ nonblocking_sender_ref.clone(),
105
+ Some(scheduler_class),
106
+ )?)
107
+ }
108
+ nonblocking_sender
109
+ } else {
110
+ blocking_sender_ref.clone()
111
+ };
112
+
113
+ Ok((
114
+ Arc::new(workers),
115
+ blocking_sender_ref,
116
+ nonblocking_sender_ref,
117
+ ))
118
+ }
119
+
120
+ pub fn load_scheduler_class(scheduler_class: Option<String>) -> Result<Option<Opaque<Value>>> {
121
+ call_with_gvl(|ruby| {
122
+ let scheduler_class = if let Some(scheduler_class) = scheduler_class {
123
+ Some(Opaque::from(
124
+ ruby.module_kernel()
125
+ .funcall::<_, _, Value>(*ID_CONST_GET, (scheduler_class,))?,
126
+ ))
127
+ } else {
128
+ None
129
+ };
130
+ Ok(scheduler_class)
131
+ })
132
+ }
133
+ impl ThreadWorker {
134
+ pub fn new(
135
+ params: Arc<ServerParams>,
136
+ id: u8,
137
+ name: String,
138
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
139
+ sender: Sender<RequestJob>,
140
+ scheduler_class: Option<Opaque<Value>>,
141
+ ) -> Result<Arc<Self>> {
142
+ let worker = Arc::new(Self {
143
+ params,
144
+ id,
145
+ request_id: AtomicU64::new(0),
146
+ current_request_start: AtomicU64::new(0),
147
+ name,
148
+ receiver,
149
+ sender,
150
+ thread: RwLock::new(None),
151
+ terminated: Arc::new(AtomicBool::new(false)),
152
+ scheduler_class,
153
+ });
154
+ worker.clone().run()?;
155
+ Ok(worker)
156
+ }
157
+
158
+ #[instrument(skip(self, deadline), fields(id = self.id))]
159
+ pub fn poll_shutdown(&self, deadline: Instant) -> bool {
160
+ if let Some(thread) = self.thread.read().deref() {
161
+ if Instant::now() > deadline {
162
+ warn!("Worker shutdown timed out. Killing thread");
163
+ self.terminated.store(true, Ordering::SeqCst);
164
+ kill_threads(vec![thread.as_value()]);
165
+ }
166
+ debug!("Checking thread status");
167
+ if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
168
+ return true;
169
+ }
170
+ debug!("Thread has shut down");
171
+ }
172
+ self.thread.write().take();
173
+
174
+ false
175
+ }
176
+
177
+ pub fn run(self: Arc<Self>) -> Result<()> {
178
+ let name = self.name.clone();
179
+ let receiver = self.receiver.clone();
180
+ let terminated = self.terminated.clone();
181
+ let scheduler_class = self.scheduler_class;
182
+ let params = self.params.clone();
183
+ let self_ref = self.clone();
184
+ call_with_gvl(|_| {
185
+ *self.thread.write() = Some(
186
+ create_ruby_thread(move || {
187
+ if let Some(scheduler_class) = scheduler_class {
188
+ if let Err(err) = self_ref.fiber_accept_loop(
189
+ params,
190
+ name,
191
+ receiver,
192
+ scheduler_class,
193
+ terminated,
194
+ ) {
195
+ error!("Error in fiber_accept_loop: {:?}", err);
196
+ }
197
+ } else {
198
+ self_ref.accept_loop(params, name, receiver, terminated);
199
+ }
200
+ })
201
+ .into(),
202
+ );
203
+ Ok::<(), magnus::Error>(())
204
+ })?;
205
+ Ok(())
206
+ }
207
+
208
+ pub fn build_scheduler_proc(
209
+ self: Arc<Self>,
210
+ leader: &Arc<Mutex<Option<RequestJob>>>,
211
+ receiver: &Arc<async_channel::Receiver<RequestJob>>,
212
+ terminated: &Arc<AtomicBool>,
213
+ waker_sender: &watch::Sender<TerminateWakerSignal>,
214
+ oob_gc_responses_threshold: Option<u64>,
215
+ ) -> magnus::block::Proc {
216
+ let leader = leader.clone();
217
+ let receiver = receiver.clone();
218
+ let terminated = terminated.clone();
219
+ let waker_sender = waker_sender.clone();
220
+ Ruby::get().unwrap().proc_from_fn(move |ruby, _args, _blk| {
221
+ let scheduler = ruby
222
+ .get_inner(&CLASS_FIBER)
223
+ .funcall::<_, _, Value>(*ID_SCHEDULER, ())
224
+ .unwrap();
225
+ let server = ruby.get_inner(&ITSI_SERVER);
226
+ let thread_current = ruby.thread_current();
227
+ let leader_clone = leader.clone();
228
+ let receiver = receiver.clone();
229
+ let terminated = terminated.clone();
230
+ let waker_sender = waker_sender.clone();
231
+ let self_ref = self.clone();
232
+ let mut batch = Vec::with_capacity(MAX_BATCH_SIZE as usize);
233
+
234
+ static MAX_BATCH_SIZE: i32 = 25;
235
+ call_without_gvl(move || loop {
236
+ let mut idle_counter = 0;
237
+ if let Some(v) = leader_clone.lock().take() {
238
+ match v {
239
+ RequestJob::ProcessHttpRequest(itsi_request, app_proc) => {
240
+ batch.push(RequestJob::ProcessHttpRequest(itsi_request, app_proc))
241
+ }
242
+ RequestJob::ProcessGrpcRequest(itsi_request, app_proc) => {
243
+ batch.push(RequestJob::ProcessGrpcRequest(itsi_request, app_proc))
244
+ }
245
+ RequestJob::Shutdown => {
246
+ waker_sender.send(TerminateWakerSignal(true)).unwrap();
247
+ break;
248
+ }
249
+ }
250
+ }
251
+ for _ in 0..MAX_BATCH_SIZE {
252
+ if let Ok(req) = receiver.try_recv() {
253
+ batch.push(req);
254
+ } else {
255
+ break;
256
+ }
257
+ }
258
+
259
+ let shutdown_requested = call_with_gvl(|_| {
260
+ for req in batch.drain(..) {
261
+ match req {
262
+ RequestJob::ProcessHttpRequest(request, app_proc) => {
263
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
264
+ self_ref.current_request_start.store(
265
+ SystemTime::now()
266
+ .duration_since(UNIX_EPOCH)
267
+ .unwrap()
268
+ .as_secs(),
269
+ Ordering::Relaxed,
270
+ );
271
+ let response = request.response.clone();
272
+ if let Err(err) = server.funcall::<_, _, Value>(
273
+ *ID_SCHEDULE,
274
+ (app_proc.as_value(), request),
275
+ ) {
276
+ ItsiHttpRequest::internal_error(ruby, response, err)
277
+ }
278
+ }
279
+ RequestJob::ProcessGrpcRequest(request, app_proc) => {
280
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
281
+ self_ref.current_request_start.store(
282
+ SystemTime::now()
283
+ .duration_since(UNIX_EPOCH)
284
+ .unwrap()
285
+ .as_secs(),
286
+ Ordering::Relaxed,
287
+ );
288
+ let response = request.stream.clone();
289
+ if let Err(err) = server.funcall::<_, _, Value>(
290
+ *ID_SCHEDULE,
291
+ (app_proc.as_value(), request),
292
+ ) {
293
+ ItsiGrpcCall::internal_error(ruby, response, err)
294
+ }
295
+ }
296
+ RequestJob::Shutdown => return true,
297
+ }
298
+ }
299
+ false
300
+ });
301
+
302
+ if shutdown_requested || terminated.load(Ordering::Relaxed) {
303
+ waker_sender.send(TerminateWakerSignal(true)).unwrap();
304
+ break;
305
+ }
306
+
307
+ let yield_result = if receiver.is_empty() {
308
+ let should_gc = if let Some(oob_gc_threshold) = oob_gc_responses_threshold {
309
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
310
+ idle_counter == 0
311
+ } else {
312
+ false
313
+ };
314
+ waker_sender.send(TerminateWakerSignal(false)).unwrap();
315
+ call_with_gvl(|ruby| {
316
+ if should_gc {
317
+ ruby.gc_start();
318
+ }
319
+ scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
320
+ })
321
+ } else {
322
+ call_with_gvl(|_| scheduler.funcall::<_, _, Value>(*ID_YIELD, ()))
323
+ };
324
+
325
+ if yield_result.is_err() {
326
+ break;
327
+ }
328
+ })
329
+ })
330
+ }
331
+
332
+ #[instrument(skip_all, fields(thread_worker=name))]
333
+ pub fn fiber_accept_loop(
334
+ self: Arc<Self>,
335
+ params: Arc<ServerParams>,
336
+ name: String,
337
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
338
+ scheduler_class: Opaque<Value>,
339
+ terminated: Arc<AtomicBool>,
340
+ ) -> Result<()> {
341
+ let ruby = Ruby::get().unwrap();
342
+ let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
343
+ let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
344
+ let server_class = ruby.get_inner(&ITSI_SERVER);
345
+ let scheduler_proc = self.build_scheduler_proc(
346
+ &leader,
347
+ &receiver,
348
+ &terminated,
349
+ &waker_sender,
350
+ params.oob_gc_responses_threshold,
351
+ );
352
+ let (scheduler, scheduler_fiber) = server_class.funcall::<_, _, (Value, Value)>(
353
+ "start_scheduler_loop",
354
+ (scheduler_class, scheduler_proc),
355
+ )?;
356
+ Self::start_waker_thread(
357
+ scheduler.into(),
358
+ scheduler_fiber.into(),
359
+ leader,
360
+ receiver,
361
+ waker_receiver,
362
+ );
363
+ Ok(())
364
+ }
365
+
366
+ #[allow(clippy::await_holding_lock)]
367
+ pub fn start_waker_thread(
368
+ scheduler: Opaque<Value>,
369
+ scheduler_fiber: Opaque<Value>,
370
+ leader: Arc<Mutex<Option<RequestJob>>>,
371
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
372
+ mut waker_receiver: watch::Receiver<TerminateWakerSignal>,
373
+ ) {
374
+ create_ruby_thread(move || {
375
+ let scheduler = scheduler.get_inner_with(&Ruby::get().unwrap());
376
+ let leader = leader.clone();
377
+ call_without_gvl(|| {
378
+ RuntimeBuilder::new_current_thread()
379
+ .build()
380
+ .expect("Failed to build Tokio runtime")
381
+ .block_on(async {
382
+ loop {
383
+ waker_receiver.changed().await.ok();
384
+ if waker_receiver.borrow().0 {
385
+ break;
386
+ }
387
+ tokio::select! {
388
+ _ = waker_receiver.changed() => {
389
+ if waker_receiver.borrow().0 {
390
+ break;
391
+ }
392
+ },
393
+ next_msg = receiver.recv() => {
394
+ *leader.lock() = next_msg.ok();
395
+ call_with_gvl(|_| {
396
+ scheduler
397
+ .funcall::<_, _, Value>(
398
+ "unblock",
399
+ (None::<u8>, scheduler_fiber),
400
+ )
401
+ .ok();
402
+ });
403
+ }
404
+ }
405
+ }
406
+ })
407
+ });
408
+ });
409
+ }
410
+
411
+ #[instrument(skip_all, fields(thread_worker=id))]
412
+ pub fn accept_loop(
413
+ self: Arc<Self>,
414
+ params: Arc<ServerParams>,
415
+ id: String,
416
+ receiver: Arc<async_channel::Receiver<RequestJob>>,
417
+ terminated: Arc<AtomicBool>,
418
+ ) {
419
+ let ruby = Ruby::get().unwrap();
420
+ let mut idle_counter = 0;
421
+ let self_ref = self.clone();
422
+ call_without_gvl(|| loop {
423
+ if receiver.is_empty() {
424
+ if let Some(oob_gc_threshold) = params.oob_gc_responses_threshold {
425
+ idle_counter = (idle_counter + 1) % oob_gc_threshold;
426
+ if idle_counter == 0 {
427
+ call_with_gvl(|_ruby| {
428
+ ruby.gc_start();
429
+ });
430
+ }
431
+ };
432
+ }
433
+ match receiver.recv_blocking() {
434
+ Ok(RequestJob::ProcessHttpRequest(request, app_proc)) => {
435
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
436
+ self_ref.current_request_start.store(
437
+ SystemTime::now()
438
+ .duration_since(UNIX_EPOCH)
439
+ .unwrap()
440
+ .as_secs(),
441
+ Ordering::Relaxed,
442
+ );
443
+ call_with_gvl(|_ruby| {
444
+ request.process(&ruby, app_proc).ok();
445
+ });
446
+ if terminated.load(Ordering::Relaxed) {
447
+ break;
448
+ }
449
+ }
450
+ Ok(RequestJob::ProcessGrpcRequest(request, app_proc)) => {
451
+ self_ref.request_id.fetch_add(1, Ordering::Relaxed);
452
+ self_ref.current_request_start.store(
453
+ SystemTime::now()
454
+ .duration_since(UNIX_EPOCH)
455
+ .unwrap()
456
+ .as_secs(),
457
+ Ordering::Relaxed,
458
+ );
459
+ call_with_gvl(|_ruby| {
460
+ request.process(&ruby, app_proc).ok();
461
+ });
462
+ if terminated.load(Ordering::Relaxed) {
463
+ break;
464
+ }
465
+ }
466
+ Ok(RequestJob::Shutdown) => {
467
+ break;
468
+ }
469
+ Err(_) => {
470
+ thread::sleep(Duration::from_micros(1));
471
+ }
472
+ }
473
+ });
474
+ }
475
+ }
@@ -0,0 +1,74 @@
1
+ use async_trait::async_trait;
2
+ use redis::aio::ConnectionManager;
3
+ use redis::{Client, RedisError, Script};
4
+ use std::sync::Arc;
5
+ use std::time::Duration;
6
+
7
+ #[derive(Debug)]
8
+ pub enum CacheError {
9
+ RedisError(RedisError),
10
+ // Other error variants as needed.
11
+ }
12
+ /// A general-purpose cache trait with an atomic “increment with timeout” operation.
13
+ #[async_trait]
14
+ pub trait CacheStore: Send + Sync + std::fmt::Debug {
15
+ /// Increments the counter associated with `key` and sets (or extends) its expiration.
16
+ /// Returns the new counter value.
17
+ async fn increment(&self, key: &str, timeout: Duration) -> Result<u64, CacheError>;
18
+ }
19
+
20
+ /// A Redis-backed cache store using an async connection manager.
21
+ /// This uses a TLS-enabled connection when the URL is prefixed with "rediss://".
22
+ #[derive(Clone)]
23
+ pub struct RedisCacheStore {
24
+ connection: Arc<ConnectionManager>,
25
+ }
26
+
27
+ impl std::fmt::Debug for RedisCacheStore {
28
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
29
+ f.debug_struct("RedisCacheStore").finish()
30
+ }
31
+ }
32
+
33
+ impl RedisCacheStore {
34
+ /// Constructs a new RedisCacheStore.
35
+ ///
36
+ /// Use a connection URL like "rediss://host:port" to enable TLS (with rustls under the hood).
37
+ /// This constructor is async because it sets up the connection manager.
38
+ pub async fn new(connection_url: &str) -> Result<Self, CacheError> {
39
+ let client = Client::open(connection_url).map_err(CacheError::RedisError)?;
40
+ let connection_manager = ConnectionManager::new(client)
41
+ .await
42
+ .map_err(CacheError::RedisError)?;
43
+ Ok(Self {
44
+ connection: Arc::new(connection_manager),
45
+ })
46
+ }
47
+ }
48
+
49
+ #[async_trait]
50
+ impl CacheStore for RedisCacheStore {
51
+ async fn increment(&self, key: &str, timeout: Duration) -> Result<u64, CacheError> {
52
+ let timeout_secs = timeout.as_secs();
53
+ // Lua script to:
54
+ // 1. INCR the key.
55
+ // 2. If the key doesn't have a TTL, set it.
56
+ let script = r#"
57
+ local current = redis.call('INCR', KEYS[1])
58
+ if redis.call('TTL', KEYS[1]) < 0 then
59
+ redis.call('EXPIRE', KEYS[1], ARGV[1])
60
+ end
61
+ return current
62
+ "#;
63
+ let script = Script::new(script);
64
+ // The ConnectionManager is cloneable and can be used concurrently.
65
+ let mut connection = (*self.connection).clone();
66
+ let value: i64 = script
67
+ .key(key)
68
+ .arg(timeout_secs)
69
+ .invoke_async(&mut connection)
70
+ .await
71
+ .map_err(CacheError::RedisError)?;
72
+ Ok(value as u64)
73
+ }
74
+ }