itsi-server 0.2.15 → 0.2.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +75 -73
  3. data/exe/itsi +6 -1
  4. data/ext/itsi_acme/Cargo.toml +1 -1
  5. data/ext/itsi_scheduler/Cargo.toml +1 -1
  6. data/ext/itsi_server/Cargo.lock +1 -1
  7. data/ext/itsi_server/Cargo.toml +3 -1
  8. data/ext/itsi_server/extconf.rb +3 -1
  9. data/ext/itsi_server/src/lib.rs +7 -1
  10. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
  11. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +6 -6
  12. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
  13. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +71 -42
  14. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
  15. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +6 -15
  16. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +32 -6
  17. data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
  18. data/ext/itsi_server/src/server/binds/listener.rs +49 -8
  19. data/ext/itsi_server/src/server/frame_stream.rs +142 -0
  20. data/ext/itsi_server/src/server/http_message_types.rs +143 -10
  21. data/ext/itsi_server/src/server/io_stream.rs +28 -5
  22. data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
  23. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
  24. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
  25. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
  26. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
  27. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -58
  28. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +6 -9
  29. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +27 -42
  30. data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +65 -14
  31. data/ext/itsi_server/src/server/middleware_stack/middlewares/max_body.rs +1 -1
  32. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +8 -11
  33. data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +21 -8
  34. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
  35. data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +1 -5
  36. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
  37. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +13 -6
  38. data/ext/itsi_server/src/server/mod.rs +1 -0
  39. data/ext/itsi_server/src/server/process_worker.rs +5 -5
  40. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +100 -0
  41. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +87 -31
  42. data/ext/itsi_server/src/server/serve_strategy/mod.rs +1 -0
  43. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +166 -206
  44. data/ext/itsi_server/src/server/signal.rs +37 -9
  45. data/ext/itsi_server/src/server/thread_worker.rs +92 -70
  46. data/ext/itsi_server/src/services/itsi_http_service.rs +67 -62
  47. data/ext/itsi_server/src/services/mime_types.rs +185 -183
  48. data/ext/itsi_server/src/services/rate_limiter.rs +16 -34
  49. data/ext/itsi_server/src/services/static_file_server.rs +35 -60
  50. data/lib/itsi/http_request.rb +31 -39
  51. data/lib/itsi/http_response.rb +5 -0
  52. data/lib/itsi/rack_env_pool.rb +59 -0
  53. data/lib/itsi/server/config/config_helpers.rb +1 -2
  54. data/lib/itsi/server/config/dsl.rb +5 -4
  55. data/lib/itsi/server/config/middleware/etag.md +3 -7
  56. data/lib/itsi/server/config/middleware/etag.rb +2 -4
  57. data/lib/itsi/server/config/middleware/proxy.rb +1 -1
  58. data/lib/itsi/server/config/middleware/rackup_file.rb +2 -2
  59. data/lib/itsi/server/config/options/auto_reload_config.rb +6 -2
  60. data/lib/itsi/server/config/options/include.rb +5 -2
  61. data/lib/itsi/server/config/options/listen_backlog.rb +1 -1
  62. data/lib/itsi/server/config/options/pipeline_flush.md +16 -0
  63. data/lib/itsi/server/config/options/pipeline_flush.rb +19 -0
  64. data/lib/itsi/server/config/options/send_buffer_size.md +15 -0
  65. data/lib/itsi/server/config/options/send_buffer_size.rb +19 -0
  66. data/lib/itsi/server/config/options/writev.md +25 -0
  67. data/lib/itsi/server/config/options/writev.rb +19 -0
  68. data/lib/itsi/server/config.rb +43 -31
  69. data/lib/itsi/server/default_config/Itsi.rb +1 -4
  70. data/lib/itsi/server/grpc/grpc_call.rb +2 -0
  71. data/lib/itsi/server/grpc/grpc_interface.rb +2 -2
  72. data/lib/itsi/server/rack/handler/itsi.rb +3 -1
  73. data/lib/itsi/server/rack_interface.rb +17 -12
  74. data/lib/itsi/server/route_tester.rb +1 -1
  75. data/lib/itsi/server/scheduler_interface.rb +2 -0
  76. data/lib/itsi/server/version.rb +1 -1
  77. data/lib/itsi/server.rb +1 -0
  78. data/lib/ruby_lsp/itsi/addon.rb +12 -13
  79. metadata +10 -1
@@ -1,15 +1,13 @@
1
1
  use std::sync::OnceLock;
2
2
 
3
3
  use super::{FromValue, MiddlewareLayer};
4
- use crate::server::http_message_types::{HttpRequest, HttpResponse};
4
+ use crate::server::http_message_types::{HttpBody, HttpRequest, HttpResponse};
5
5
  use crate::services::itsi_http_service::HttpRequestContext;
6
6
  use async_trait::async_trait;
7
7
  use bytes::Bytes;
8
8
  use derive_more::Debug;
9
9
  use either::Either;
10
10
  use http::{HeaderMap, HeaderName, HeaderValue, Response, StatusCode};
11
- use http_body_util::combinators::BoxBody;
12
- use http_body_util::Full;
13
11
  use itsi_error::ItsiError;
14
12
  use magnus::error::Result;
15
13
  use serde::Deserialize;
@@ -21,6 +19,10 @@ pub struct StaticResponse {
21
19
  body: Vec<u8>,
22
20
  #[serde(skip)]
23
21
  header_map: OnceLock<HeaderMap>,
22
+ #[serde(skip)]
23
+ body_bytes: OnceLock<Bytes>,
24
+ #[serde(skip)]
25
+ status_code: OnceLock<StatusCode>,
24
26
  }
25
27
 
26
28
  #[async_trait]
@@ -35,6 +37,12 @@ impl MiddlewareLayer for StaticResponse {
35
37
  self.header_map
36
38
  .set(header_map)
37
39
  .map_err(|_| ItsiError::new("Failed to set headers"))?;
40
+ self.body_bytes
41
+ .set(Bytes::from(self.body.clone()))
42
+ .map_err(|_| ItsiError::new("Failed to set body bytes"))?;
43
+ self.status_code
44
+ .set(StatusCode::from_u16(self.code).unwrap_or(StatusCode::OK))
45
+ .map_err(|_| ItsiError::new("Failed to set status code"))?;
38
46
  Ok(())
39
47
  }
40
48
 
@@ -43,9 +51,8 @@ impl MiddlewareLayer for StaticResponse {
43
51
  _req: HttpRequest,
44
52
  _context: &mut HttpRequestContext,
45
53
  ) -> Result<Either<HttpRequest, HttpResponse>> {
46
- let mut resp = Response::new(BoxBody::new(Full::new(Bytes::from(self.body.clone()))));
47
- let status = StatusCode::from_u16(self.code).unwrap_or(StatusCode::OK);
48
- *resp.status_mut() = status;
54
+ let mut resp = Response::new(HttpBody::full(self.body_bytes.get().unwrap().clone()));
55
+ *resp.status_mut() = *self.status_code.get().unwrap();
49
56
  *resp.headers_mut() = self.header_map.get().unwrap().clone();
50
57
 
51
58
  Ok(Either::Right(resp))
@@ -1,5 +1,6 @@
1
1
  pub mod binds;
2
2
  pub mod byte_frame;
3
+ pub mod frame_stream;
3
4
  pub mod http_message_types;
4
5
  pub mod io_stream;
5
6
  pub mod lifecycle_event;
@@ -42,7 +42,8 @@ impl Default for ProcessWorker {
42
42
  }
43
43
  }
44
44
 
45
- static CORE_IDS: LazyLock<Vec<CoreId>> = LazyLock::new(|| core_affinity::get_core_ids().unwrap());
45
+ pub static CORE_IDS: LazyLock<Vec<CoreId>> =
46
+ LazyLock::new(|| core_affinity::get_core_ids().unwrap());
46
47
 
47
48
  impl ProcessWorker {
48
49
  #[instrument(skip(self, cluster_template), fields(self.worker_id = %self.worker_id))]
@@ -78,7 +79,7 @@ impl ProcessWorker {
78
79
  ) {
79
80
  error!("Failed to set process group ID: {}", e);
80
81
  }
81
- match SingleMode::new(cluster_template.server_config.clone()) {
82
+ match SingleMode::new(cluster_template.server_config.clone(), self.worker_id) {
82
83
  Ok(single_mode) => {
83
84
  if cluster_template
84
85
  .server_config
@@ -87,7 +88,7 @@ impl ProcessWorker {
87
88
  .pin_worker_cores
88
89
  {
89
90
  core_affinity::set_for_current(
90
- CORE_IDS[self.worker_id % CORE_IDS.len()],
91
+ CORE_IDS[(2 * self.worker_id) % CORE_IDS.len()],
91
92
  );
92
93
  }
93
94
  Arc::new(single_mode).run().ok();
@@ -165,7 +166,7 @@ impl ProcessWorker {
165
166
  }
166
167
 
167
168
  pub(crate) fn boot_if_dead(&self, cluster_template: Arc<ClusterMode>) -> bool {
168
- if !self.is_alive() {
169
+ if !self.is_alive() && self.child_pid.lock().is_some() {
169
170
  if self.just_started() {
170
171
  error!(
171
172
  "Worker in crash loop {:?}. Refusing to restart",
@@ -201,7 +202,6 @@ impl ProcessWorker {
201
202
  let child_pid = *self.child_pid.lock();
202
203
  if let Some(pid) = child_pid {
203
204
  if self.is_alive() {
204
- info!("Worker still alive, sending SIGKILL {}", pid);
205
205
  if let Err(e) = kill(pid, SIGKILL) {
206
206
  error!("Failed to force kill process {}: {}", pid, e);
207
207
  }
@@ -0,0 +1,100 @@
1
+ use hyper_util::rt::TokioIo;
2
+ use std::{ops::Deref, pin::Pin, sync::Arc, time::Duration};
3
+ use tokio::task::JoinSet;
4
+ use tracing::debug;
5
+
6
+ use crate::{
7
+ ruby_types::itsi_server::itsi_server_config::ServerParams,
8
+ server::{binds::listener::ListenerInfo, io_stream::IoStream, request_job::RequestJob},
9
+ services::itsi_http_service::{ItsiHttpService, ItsiHttpServiceInner},
10
+ };
11
+
12
+ use super::single_mode::{RunningPhase, SingleMode};
13
+
14
+ pub struct Acceptor {
15
+ pub acceptor_args: Arc<AcceptorArgs>,
16
+ pub join_set: JoinSet<()>,
17
+ }
18
+
19
+ impl Deref for Acceptor {
20
+ type Target = Arc<AcceptorArgs>;
21
+
22
+ fn deref(&self) -> &Self::Target {
23
+ &self.acceptor_args
24
+ }
25
+ }
26
+
27
+ pub struct AcceptorArgs {
28
+ pub strategy: Arc<SingleMode>,
29
+ pub listener_info: ListenerInfo,
30
+ pub shutdown_receiver: tokio::sync::watch::Receiver<RunningPhase>,
31
+ pub job_sender: async_channel::Sender<RequestJob>,
32
+ pub nonblocking_sender: async_channel::Sender<RequestJob>,
33
+ pub server_params: Arc<ServerParams>,
34
+ }
35
+
36
+ impl Acceptor {
37
+ pub(crate) async fn serve_connection(&mut self, stream: IoStream) {
38
+ let addr = stream.addr();
39
+ let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
40
+ let mut shutdown_channel = self.shutdown_receiver.clone();
41
+ let acceptor_args = self.acceptor_args.clone();
42
+ let service = ItsiHttpService {
43
+ inner: Arc::new(ItsiHttpServiceInner {
44
+ acceptor_args: acceptor_args.clone(),
45
+ addr,
46
+ }),
47
+ };
48
+
49
+ self.join_set.spawn(async move {
50
+ let executor = &acceptor_args.strategy.executor;
51
+ let svc = hyper::service::service_fn(move |req| {
52
+ let service = service.clone();
53
+ async move { service.handle_request(req).await }
54
+ });
55
+
56
+ let mut serve = Box::pin(executor.serve_connection_with_upgrades(io, svc));
57
+
58
+ tokio::select! {
59
+ // Await the connection finishing naturally.
60
+ res = &mut serve => {
61
+ match res {
62
+ Ok(()) => {
63
+ debug!("Connection closed normally");
64
+ },
65
+ Err(res) => {
66
+ debug!("Connection closed abruptly: {:?}", res);
67
+ }
68
+ }
69
+ },
70
+ // A lifecycle event triggers shutdown.
71
+ _ = shutdown_channel.changed() => {
72
+ // Initiate graceful shutdown.
73
+ serve.as_mut().graceful_shutdown();
74
+
75
+ // Now await the connection to finish shutting down.
76
+ if let Err(e) = serve.await {
77
+ debug!("Connection shutdown error: {:?}", e);
78
+ }
79
+ }
80
+ }
81
+ });
82
+ }
83
+
84
+ pub async fn join(&mut self) {
85
+ // Join all acceptor tasks with timeout
86
+
87
+ let deadline = tokio::time::Instant::now()
88
+ + Duration::from_secs_f64(self.server_params.shutdown_timeout);
89
+ let sleep_until = tokio::time::sleep_until(deadline);
90
+ tokio::select! {
91
+ _ = async {
92
+ while (self.join_set.join_next().await).is_some() {}
93
+ } => {},
94
+ _ = sleep_until => {
95
+ self.join_set.abort_all();
96
+ debug!("Shutdown timeout reached; abandoning remaining acceptor tasks.");
97
+ }
98
+ }
99
+ }
100
+ }
@@ -1,5 +1,5 @@
1
1
  use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerConfig;
2
- use crate::server::signal::SIGNAL_HANDLER_CHANNEL;
2
+ use crate::server::signal::{subscribe_runtime_to_signals, unsubscribe_runtime};
3
3
  use crate::server::{lifecycle_event::LifecycleEvent, process_worker::ProcessWorker};
4
4
  use itsi_error::{ItsiError, Result};
5
5
  use itsi_rb_helpers::{call_with_gvl, call_without_gvl, create_ruby_thread};
@@ -7,31 +7,32 @@ use itsi_tracing::{error, info, warn};
7
7
  use magnus::Value;
8
8
  use nix::{libc::exit, unistd::Pid};
9
9
 
10
+ use std::sync::atomic::{AtomicBool, Ordering};
10
11
  use std::{
11
- sync::{atomic::AtomicUsize, Arc},
12
+ sync::Arc,
12
13
  time::{Duration, Instant},
13
14
  };
14
15
  use tokio::{
15
16
  runtime::{Builder as RuntimeBuilder, Runtime},
16
- sync::{broadcast, watch, Mutex},
17
+ sync::{watch, Mutex},
17
18
  time::{self, sleep},
18
19
  };
19
20
  use tracing::{debug, instrument};
20
21
  pub(crate) struct ClusterMode {
21
22
  pub server_config: Arc<ItsiServerConfig>,
22
23
  pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
23
- pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
24
24
  }
25
25
 
26
- static WORKER_ID: AtomicUsize = AtomicUsize::new(0);
27
26
  static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
28
27
  parking_lot::Mutex::new(None);
29
28
 
29
+ static RELOAD_IN_PROGRESS: AtomicBool = AtomicBool::new(false);
30
+
30
31
  impl ClusterMode {
31
32
  pub fn new(server_config: Arc<ItsiServerConfig>) -> Self {
32
33
  let process_workers = (0..server_config.server_params.read().workers)
33
- .map(|_| ProcessWorker {
34
- worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
34
+ .map(|id| ProcessWorker {
35
+ worker_id: id as usize,
35
36
  ..Default::default()
36
37
  })
37
38
  .collect();
@@ -39,7 +40,6 @@ impl ClusterMode {
39
40
  Self {
40
41
  server_config,
41
42
  process_workers: parking_lot::Mutex::new(process_workers),
42
- lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
43
43
  }
44
44
  }
45
45
 
@@ -60,6 +60,26 @@ impl ClusterMode {
60
60
  }
61
61
  }
62
62
 
63
+ fn next_worker_id(&self) -> usize {
64
+ let mut ids: Vec<usize> = self
65
+ .process_workers
66
+ .lock()
67
+ .iter()
68
+ .map(|w| w.worker_id)
69
+ .collect();
70
+ self.next_available_id_in(&mut ids)
71
+ }
72
+
73
+ fn next_available_id_in(&self, list: &mut [usize]) -> usize {
74
+ list.sort_unstable();
75
+ for (expected, &id) in list.iter().enumerate() {
76
+ if id != expected {
77
+ return expected;
78
+ }
79
+ }
80
+ list.len()
81
+ }
82
+
63
83
  #[allow(clippy::await_holding_lock)]
64
84
  pub async fn handle_lifecycle_event(
65
85
  self: Arc<Self>,
@@ -97,40 +117,56 @@ impl ClusterMode {
97
117
  self.shutdown().await.ok();
98
118
  self.server_config.reload_exec()?;
99
119
  }
100
- let mut workers_to_load = self.server_config.server_params.read().workers;
101
- let mut next_workers = Vec::new();
102
- for worker in self.process_workers.lock().drain(..) {
103
- if workers_to_load == 0 {
104
- worker.graceful_shutdown(self.clone()).await
105
- } else {
106
- workers_to_load -= 1;
107
- worker.reboot(self.clone()).await?;
108
- next_workers.push(worker);
109
- }
120
+
121
+ if RELOAD_IN_PROGRESS
122
+ .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
123
+ .is_err()
124
+ {
125
+ warn!("Reload already in progress, ignoring request");
126
+ return Ok(());
110
127
  }
111
- self.process_workers.lock().extend(next_workers);
112
- while workers_to_load > 0 {
113
- let mut workers = self.process_workers.lock();
128
+ let workers_to_load = self.server_config.server_params.read().workers;
129
+ let mut next_workers = Vec::new();
130
+ let mut old_workers = self.process_workers.lock().drain(..).collect::<Vec<_>>();
131
+
132
+ // Spawn new workers
133
+ for i in 0..workers_to_load {
114
134
  let worker = ProcessWorker {
115
- worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
135
+ worker_id: i as usize,
116
136
  ..Default::default()
117
137
  };
118
138
  let worker_clone = worker.clone();
119
139
  let self_clone = self.clone();
120
- create_ruby_thread(move || {
121
- call_without_gvl(move || {
122
- worker_clone.boot(self_clone).ok();
123
- })
140
+
141
+ call_with_gvl(|_| {
142
+ create_ruby_thread(move || {
143
+ call_without_gvl(move || match worker_clone.boot(self_clone) {
144
+ Err(err) => error!("Worker boot failed {:?}", err),
145
+ _ => {}
146
+ })
147
+ });
124
148
  });
125
- workers.push(worker);
126
- workers_to_load -= 1
149
+
150
+ next_workers.push(worker);
151
+
152
+ if let Some(old) = old_workers.pop() {
153
+ old.graceful_shutdown(self.clone()).await;
154
+ }
127
155
  }
156
+
157
+ for worker in old_workers {
158
+ worker.graceful_shutdown(self.clone()).await;
159
+ }
160
+
161
+ self.process_workers.lock().extend(next_workers);
162
+ RELOAD_IN_PROGRESS.store(false, Ordering::SeqCst);
163
+
128
164
  Ok(())
129
165
  }
130
166
  LifecycleEvent::IncreaseWorkers => {
131
167
  let mut workers = self.process_workers.lock();
132
168
  let worker = ProcessWorker {
133
- worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
169
+ worker_id: self.next_worker_id(),
134
170
  ..Default::default()
135
171
  };
136
172
  let worker_clone = worker.clone();
@@ -171,6 +207,10 @@ impl ClusterMode {
171
207
  unsafe { exit(0) };
172
208
  }
173
209
  LifecycleEvent::ChildTerminated => {
210
+ if RELOAD_IN_PROGRESS.load(Ordering::SeqCst) {
211
+ warn!("Reload already in progress, ignoring child signal");
212
+ return Ok(());
213
+ }
174
214
  CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
175
215
  i.send(()).ok();
176
216
  });
@@ -275,18 +315,29 @@ impl ClusterMode {
275
315
  pub fn run(self: Arc<Self>) -> Result<()> {
276
316
  info!("Starting in Cluster mode");
277
317
  self.invoke_hook("before_fork");
318
+
278
319
  self.process_workers
279
320
  .lock()
280
321
  .iter()
281
322
  .try_for_each(|worker| worker.boot(Arc::clone(&self)))?;
282
323
 
324
+ if cfg!(target_os = "linux") {
325
+ self.server_config
326
+ .server_params
327
+ .write()
328
+ .listeners
329
+ .lock()
330
+ .drain(..);
331
+ };
332
+
283
333
  let (sender, mut receiver) = watch::channel(());
284
334
  *CHILD_SIGNAL_SENDER.lock() = Some(sender);
285
335
 
286
- let mut lifecycle_rx = self.lifecycle_channel.subscribe();
287
336
  let self_ref = self.clone();
288
337
 
289
338
  self.build_runtime().block_on(async {
339
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
340
+
290
341
  let self_ref = self_ref.clone();
291
342
  let memory_check_duration = if self_ref.server_config.server_params.read().worker_memory_limit.is_some(){
292
343
  time::Duration::from_secs(15)
@@ -338,11 +389,16 @@ impl ClusterMode {
338
389
  }
339
390
 
340
391
  },
341
- Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
392
+ Err(e) => {
393
+ debug!("Lifecycle channel closed: {:?}, exiting cluster monitor loop", e);
394
+ break
395
+ },
342
396
  }
343
397
  }
344
398
  }
345
399
  });
400
+
401
+ unsubscribe_runtime();
346
402
  self.server_config
347
403
  .server_params
348
404
  .write()
@@ -4,6 +4,7 @@ use cluster_mode::ClusterMode;
4
4
  use itsi_error::Result;
5
5
  use single_mode::SingleMode;
6
6
 
7
+ pub mod acceptor;
7
8
  pub mod cluster_mode;
8
9
  pub mod single_mode;
9
10