itsi-server 0.2.16 → 0.2.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. checksums.yaml +4 -4
  2. data/Cargo.lock +3 -1
  3. data/exe/itsi +6 -1
  4. data/ext/itsi_acme/Cargo.toml +1 -1
  5. data/ext/itsi_scheduler/Cargo.toml +1 -1
  6. data/ext/itsi_server/Cargo.toml +3 -1
  7. data/ext/itsi_server/src/lib.rs +6 -1
  8. data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
  9. data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +4 -4
  10. data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
  11. data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +64 -33
  12. data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
  13. data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +6 -15
  14. data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +26 -5
  15. data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
  16. data/ext/itsi_server/src/server/binds/listener.rs +45 -7
  17. data/ext/itsi_server/src/server/frame_stream.rs +142 -0
  18. data/ext/itsi_server/src/server/http_message_types.rs +142 -9
  19. data/ext/itsi_server/src/server/io_stream.rs +28 -5
  20. data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
  21. data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
  22. data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
  23. data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
  24. data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
  25. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -56
  26. data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +5 -7
  27. data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +5 -5
  28. data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +7 -10
  29. data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
  30. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
  31. data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +4 -6
  32. data/ext/itsi_server/src/server/mod.rs +1 -0
  33. data/ext/itsi_server/src/server/process_worker.rs +3 -4
  34. data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +16 -12
  35. data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +87 -31
  36. data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +158 -142
  37. data/ext/itsi_server/src/server/signal.rs +37 -9
  38. data/ext/itsi_server/src/server/thread_worker.rs +84 -69
  39. data/ext/itsi_server/src/services/itsi_http_service.rs +43 -43
  40. data/ext/itsi_server/src/services/static_file_server.rs +28 -47
  41. data/lib/itsi/http_request.rb +31 -39
  42. data/lib/itsi/http_response.rb +5 -0
  43. data/lib/itsi/rack_env_pool.rb +59 -0
  44. data/lib/itsi/server/config/dsl.rb +5 -4
  45. data/lib/itsi/server/config/middleware/proxy.rb +1 -1
  46. data/lib/itsi/server/config/middleware/rackup_file.rb +2 -2
  47. data/lib/itsi/server/config/options/auto_reload_config.rb +6 -2
  48. data/lib/itsi/server/config/options/include.rb +5 -2
  49. data/lib/itsi/server/config/options/pipeline_flush.md +16 -0
  50. data/lib/itsi/server/config/options/pipeline_flush.rb +19 -0
  51. data/lib/itsi/server/config/options/writev.md +25 -0
  52. data/lib/itsi/server/config/options/writev.rb +19 -0
  53. data/lib/itsi/server/config.rb +21 -8
  54. data/lib/itsi/server/default_config/Itsi.rb +1 -4
  55. data/lib/itsi/server/grpc/grpc_call.rb +2 -0
  56. data/lib/itsi/server/grpc/grpc_interface.rb +2 -2
  57. data/lib/itsi/server/rack/handler/itsi.rb +3 -1
  58. data/lib/itsi/server/rack_interface.rb +17 -12
  59. data/lib/itsi/server/scheduler_interface.rb +2 -0
  60. data/lib/itsi/server/version.rb +1 -1
  61. data/lib/itsi/server.rb +1 -0
  62. data/lib/ruby_lsp/itsi/addon.rb +12 -13
  63. metadata +7 -1
@@ -4,7 +4,10 @@ use crate::{
4
4
  lifecycle_event::LifecycleEvent,
5
5
  request_job::RequestJob,
6
6
  serve_strategy::acceptor::{Acceptor, AcceptorArgs},
7
- signal::{SHUTDOWN_REQUESTED, SIGNAL_HANDLER_CHANNEL},
7
+ signal::{
8
+ send_lifecycle_event, subscribe_runtime_to_signals, unsubscribe_runtime,
9
+ SHUTDOWN_REQUESTED,
10
+ },
8
11
  thread_worker::{build_thread_workers, ThreadWorker},
9
12
  },
10
13
  };
@@ -29,22 +32,20 @@ use std::{
29
32
  };
30
33
  use tokio::{
31
34
  runtime::{Builder as RuntimeBuilder, Runtime},
32
- sync::{
33
- broadcast,
34
- watch::{self},
35
- },
35
+ sync::watch::{self},
36
36
  task::JoinSet,
37
37
  };
38
38
  use tracing::instrument;
39
39
 
40
40
  pub struct SingleMode {
41
+ pub worker_id: usize,
41
42
  pub executor: Builder<TokioExecutor>,
42
43
  pub server_config: Arc<ItsiServerConfig>,
43
- pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
44
44
  pub restart_requested: AtomicBool,
45
45
  pub status: RwLock<HashMap<u8, (u64, u64)>>,
46
46
  }
47
47
 
48
+ #[derive(PartialEq, Debug)]
48
49
  pub enum RunningPhase {
49
50
  Running,
50
51
  ShutdownPending,
@@ -53,31 +54,45 @@ pub enum RunningPhase {
53
54
 
54
55
  impl SingleMode {
55
56
  #[instrument(parent=None, skip_all)]
56
- pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
57
+ pub fn new(server_config: Arc<ItsiServerConfig>, worker_id: usize) -> Result<Self> {
57
58
  server_config.server_params.read().preload_ruby()?;
58
- let mut executor = Builder::new(TokioExecutor::new());
59
- executor
60
- .http1()
61
- .header_read_timeout(server_config.server_params.read().header_read_timeout)
62
- .writev(true)
63
- .timer(TokioTimer::new());
64
- executor
65
- .http2()
66
- .max_concurrent_streams(100)
67
- .max_local_error_reset_streams(100)
68
- .enable_connect_protocol()
69
- .max_header_list_size(10 * 1024 * 1024)
70
- .max_send_buf_size(16 * 1024 * 1024);
59
+ let executor = {
60
+ let mut executor = Builder::new(TokioExecutor::new());
61
+ let server_params = server_config.server_params.read();
62
+ let mut http1_executor = executor.http1();
63
+
64
+ http1_executor
65
+ .header_read_timeout(server_params.header_read_timeout)
66
+ .pipeline_flush(server_params.pipeline_flush)
67
+ .timer(TokioTimer::new());
68
+
69
+ if let Some(writev) = server_params.writev {
70
+ http1_executor.writev(writev);
71
+ }
72
+
73
+ executor
74
+ .http2()
75
+ .max_concurrent_streams(server_params.max_concurrent_streams)
76
+ .max_local_error_reset_streams(server_params.max_local_error_reset_streams)
77
+ .max_header_list_size(server_params.max_header_list_size)
78
+ .max_send_buf_size(server_params.max_send_buf_size)
79
+ .enable_connect_protocol();
80
+ executor
81
+ };
71
82
 
72
83
  Ok(Self {
84
+ worker_id,
73
85
  executor,
74
86
  server_config,
75
- lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
76
87
  restart_requested: AtomicBool::new(false),
77
88
  status: RwLock::new(HashMap::new()),
78
89
  })
79
90
  }
80
91
 
92
+ pub fn is_zero_worker(&self) -> bool {
93
+ self.worker_id == 0
94
+ }
95
+
81
96
  pub fn build_runtime(&self) -> Runtime {
82
97
  let mut builder: RuntimeBuilder = if self
83
98
  .server_config
@@ -103,7 +118,7 @@ impl SingleMode {
103
118
 
104
119
  pub fn stop(&self) -> Result<()> {
105
120
  SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
106
- self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
121
+ send_lifecycle_event(LifecycleEvent::Shutdown);
107
122
  Ok(())
108
123
  }
109
124
 
@@ -182,7 +197,7 @@ impl SingleMode {
182
197
  .unwrap();
183
198
  let receiver = self.clone();
184
199
  monitor_runtime.block_on({
185
- let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
200
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
186
201
  let receiver = receiver.clone();
187
202
  let thread_workers = thread_workers.clone();
188
203
  async move {
@@ -201,18 +216,19 @@ impl SingleMode {
201
216
  }
202
217
  lifecycle_event = lifecycle_rx.recv() => {
203
218
  match lifecycle_event {
204
- Ok(LifecycleEvent::Restart) => {
219
+ Ok(LifecycleEvent::Restart) | Ok(LifecycleEvent::Reload) => {
205
220
  receiver.restart().await.ok();
206
221
  }
207
- Ok(LifecycleEvent::Reload) => {
208
- receiver.reload().await.ok();
209
- }
210
222
  Ok(LifecycleEvent::Shutdown) => {
211
223
  break;
212
224
  }
213
225
  Ok(LifecycleEvent::PrintInfo) => {
214
226
  receiver.print_info(thread_workers.clone()).await.ok();
215
227
  }
228
+ Err(e) => {
229
+ debug!("Lifecycle channel closed: {:?}, exiting single mode monitor loop", e);
230
+ break;
231
+ }
216
232
  _ => {}
217
233
  }
218
234
  }
@@ -227,13 +243,15 @@ impl SingleMode {
227
243
 
228
244
  #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
229
245
  pub fn run(self: Arc<Self>) -> Result<()> {
230
- let (thread_workers, job_sender, nonblocking_sender) =
231
- build_thread_workers(self.server_config.server_params.read().clone(), Pid::this())
232
- .inspect_err(|e| {
233
- if let Some(err_val) = e.value() {
234
- print_rb_backtrace(err_val);
235
- }
236
- })?;
246
+ let (thread_workers, job_sender, nonblocking_sender) = build_thread_workers(
247
+ self.server_config.server_params.read().clone(),
248
+ self.worker_id,
249
+ )
250
+ .inspect_err(|e| {
251
+ if let Some(err_val) = e.value() {
252
+ print_rb_backtrace(err_val);
253
+ }
254
+ })?;
237
255
 
238
256
  let worker_count = thread_workers.len();
239
257
  info!(
@@ -244,6 +262,7 @@ impl SingleMode {
244
262
  let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
245
263
  let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
246
264
  let monitor_thread = self.clone().start_monitors(thread_workers.clone());
265
+ let is_zero_worker = self.is_zero_worker();
247
266
  if monitor_thread.is_none() {
248
267
  error!("Failed to start monitor thread");
249
268
  return Err(ItsiError::new("Failed to start monitor thread"));
@@ -253,106 +272,123 @@ impl SingleMode {
253
272
  return Ok(());
254
273
  }
255
274
  let runtime = self.build_runtime();
256
- let result = runtime.block_on(
257
- async {
258
- let mut listener_task_set = JoinSet::new();
259
- let server_params = self.server_config.server_params.read().clone();
260
- if let Err(err) = server_params.initialize_middleware().await {
261
- error!("Failed to initialize middleware: {}", err);
262
- return Err(ItsiError::new("Failed to initialize middleware"))
263
- }
264
- let tokio_listeners = server_params.listeners.lock()
265
- .drain(..)
266
- .map(|list| {
267
- Arc::new(list.into_tokio_listener())
268
- })
269
- .collect::<Vec<_>>();
270
-
271
- tokio_listeners.iter().cloned().for_each(|listener| {
272
- let shutdown_sender = shutdown_sender.clone();
273
- let job_sender = job_sender.clone();
274
- let nonblocking_sender = nonblocking_sender.clone();
275
-
276
- let mut lifecycle_rx = self.lifecycle_channel.subscribe();
277
- let mut shutdown_receiver = shutdown_sender.subscribe();
278
- let mut acceptor = Acceptor{
279
- acceptor_args: Arc::new(
280
- AcceptorArgs{
281
- strategy: self.clone(),
282
- listener_info: listener.listener_info(),
283
- shutdown_receiver: shutdown_sender.subscribe(),
284
- job_sender: job_sender.clone(),
285
- nonblocking_sender: nonblocking_sender.clone(),
286
- server_params: server_params.clone()
287
- }
288
- ),
289
- join_set: JoinSet::new()
290
- };
291
-
292
- let shutdown_rx_for_acme_task = shutdown_receiver.clone();
293
- let acme_task_listener_clone = listener.clone();
294
- listener_task_set.spawn(async move {
295
- acme_task_listener_clone.spawn_acme_event_task(shutdown_rx_for_acme_task).await;
296
- });
297
-
298
- listener_task_set.spawn(async move {
299
- loop {
300
- tokio::select! {
301
- accept_result = listener.accept() => {
302
- match accept_result {
303
- Ok(accepted) => acceptor.serve_connection(accepted).await,
304
- Err(e) => debug!("Listener.accept failed: {:?}", e)
305
- }
306
- },
307
- _ = shutdown_receiver.changed() => {
308
- debug!("Shutdown requested via receiver");
309
- break;
310
- },
311
- lifecycle_event = lifecycle_rx.recv() => {
312
- match lifecycle_event {
313
- Ok(LifecycleEvent::Shutdown) => {
314
- debug!("Received LifecycleEvent::Shutdown");
315
- let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
316
- for _ in 0..worker_count {
317
- let _ = job_sender.send(RequestJob::Shutdown).await;
318
- let _ = nonblocking_sender.send(RequestJob::Shutdown).await;
319
- }
320
- break;
321
- },
322
- Err(e) => error!("Error receiving lifecycle event: {:?}", e),
323
- _ => ()
275
+ let result = runtime.block_on(async {
276
+ let mut listener_task_set = JoinSet::new();
277
+ let server_params = self.server_config.server_params.read().clone();
278
+ if let Err(err) = server_params.initialize_middleware().await {
279
+ error!("Failed to initialize middleware: {}", err);
280
+ return Err(ItsiError::new("Failed to initialize middleware"));
281
+ }
282
+ let tokio_listeners = server_params
283
+ .listeners
284
+ .lock()
285
+ .drain(..)
286
+ .map(|list| Arc::new(list.into_tokio_listener(is_zero_worker)))
287
+ .collect::<Vec<_>>();
288
+
289
+ tokio_listeners.iter().cloned().for_each(|listener| {
290
+ let shutdown_sender = shutdown_sender.clone();
291
+ let job_sender = job_sender.clone();
292
+ let nonblocking_sender = nonblocking_sender.clone();
293
+
294
+ let mut lifecycle_rx = subscribe_runtime_to_signals();
295
+ let mut shutdown_receiver = shutdown_sender.subscribe();
296
+ let mut acceptor = Acceptor {
297
+ acceptor_args: Arc::new(AcceptorArgs {
298
+ strategy: self.clone(),
299
+ listener_info: listener.listener_info(),
300
+ shutdown_receiver: shutdown_sender.subscribe(),
301
+ job_sender: job_sender.clone(),
302
+ nonblocking_sender: nonblocking_sender.clone(),
303
+ server_params: server_params.clone(),
304
+ }),
305
+ join_set: JoinSet::new(),
306
+ };
307
+
308
+ let shutdown_rx_for_acme_task = shutdown_receiver.clone();
309
+ let acme_task_listener_clone = listener.clone();
310
+
311
+ let mut after_accept_wait: Option<Duration> = None::<Duration>;
312
+
313
+ if cfg!(target_os = "macos") {
314
+ after_accept_wait = if server_params.workers > 1 {
315
+ Some(Duration::from_nanos(10 * server_params.workers as u64))
316
+ } else {
317
+ None
318
+ };
319
+ };
320
+
321
+ listener_task_set.spawn(async move {
322
+ acme_task_listener_clone
323
+ .spawn_acme_event_task(shutdown_rx_for_acme_task)
324
+ .await;
325
+ });
326
+
327
+ listener_task_set.spawn(async move {
328
+ loop {
329
+ // Process any pending signals before select
330
+ tokio::select! {
331
+ accept_result = listener.accept() => {
332
+ match accept_result {
333
+ Ok(accepted) => acceptor.serve_connection(accepted).await,
334
+ Err(e) => debug!("Listener.accept failed: {:?}", e)
335
+ }
336
+ if cfg!(target_os = "macos") {
337
+ if let Some(after_accept_wait) = after_accept_wait{
338
+ tokio::time::sleep(after_accept_wait).await;
324
339
  }
325
- }
326
- }
327
- }
328
- acceptor.join().await;
329
- });
330
- });
331
-
332
- if self.is_single_mode() {
340
+ }
341
+ },
342
+ _ = shutdown_receiver.changed() => {
343
+ debug!("Shutdown requested via receiver");
344
+ break;
345
+ },
346
+ lifecycle_event = lifecycle_rx.recv() => {
347
+ match lifecycle_event {
348
+ Ok(LifecycleEvent::Shutdown) => {
349
+ debug!("Received LifecycleEvent::Shutdown");
350
+ let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
351
+ break;
352
+ },
353
+ Err(e) => {
354
+ debug!("Lifecycle channel closed: {:?}, exiting accept loop", e);
355
+ break
356
+ },
357
+ _ => ()
358
+ }
359
+ }
360
+ }
361
+ }
362
+ acceptor.join().await;
363
+ });
364
+ });
365
+
366
+ if self.is_single_mode() {
333
367
  self.invoke_hook("after_start");
334
- }
368
+ }
335
369
 
336
- while let Some(_res) = listener_task_set.join_next().await {}
337
- drop(tokio_listeners);
370
+ while let Some(_res) = listener_task_set.join_next().await {}
371
+ drop(tokio_listeners);
338
372
 
339
- Ok::<(), ItsiError>(())
340
- });
373
+ Ok::<(), ItsiError>(())
374
+ });
341
375
 
342
376
  debug!("Single mode runtime exited.");
343
377
 
378
+ for _i in 0..thread_workers.len() {
379
+ job_sender.send_blocking(RequestJob::Shutdown).unwrap();
380
+ nonblocking_sender
381
+ .send_blocking(RequestJob::Shutdown)
382
+ .unwrap();
383
+ }
344
384
  if result.is_err() {
345
- for _i in 0..thread_workers.len() {
346
- job_sender.send_blocking(RequestJob::Shutdown).unwrap();
347
- nonblocking_sender
348
- .send_blocking(RequestJob::Shutdown)
349
- .unwrap();
350
- }
351
- self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
385
+ send_lifecycle_event(LifecycleEvent::Shutdown);
352
386
  }
353
387
 
354
388
  shutdown_sender.send(RunningPhase::Shutdown).ok();
355
389
  runtime.shutdown_timeout(Duration::from_millis(100));
390
+ unsubscribe_runtime();
391
+
356
392
  debug!("Shutdown timeout finished.");
357
393
 
358
394
  let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout);
@@ -384,26 +420,6 @@ impl SingleMode {
384
420
  pub fn is_single_mode(&self) -> bool {
385
421
  self.server_config.server_params.read().workers == 1
386
422
  }
387
- /// Attempts to reload the config "live"
388
- /// Not that when running in single mode this will not unload
389
- /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
390
- pub async fn reload(&self) -> Result<()> {
391
- if !self.server_config.check_config().await {
392
- return Ok(());
393
- }
394
- let should_reexec = self.server_config.clone().reload(false)?;
395
- if should_reexec {
396
- if self.is_single_mode() {
397
- self.invoke_hook("before_restart");
398
- }
399
- self.server_config.dup_fds()?;
400
- self.server_config.reload_exec()?;
401
- }
402
- self.restart_requested.store(true, Ordering::SeqCst);
403
- self.stop()?;
404
- self.server_config.server_params.read().preload_ruby()?;
405
- Ok(())
406
- }
407
423
 
408
424
  pub fn invoke_hook(&self, hook_name: &str) {
409
425
  if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) {
@@ -1,22 +1,50 @@
1
- use std::sync::{
2
- atomic::{AtomicBool, AtomicI8},
3
- LazyLock,
1
+ use std::{
2
+ collections::VecDeque,
3
+ sync::atomic::{AtomicBool, AtomicI8},
4
4
  };
5
5
 
6
6
  use nix::libc::{self, sighandler_t};
7
- use tokio::sync::{self, broadcast};
7
+ use parking_lot::Mutex;
8
+ use tokio::sync::broadcast;
8
9
 
9
10
  use super::lifecycle_event::LifecycleEvent;
10
11
 
11
12
  pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
12
13
  pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
13
- pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
14
- broadcast::Sender<LifecycleEvent>,
15
- broadcast::Receiver<LifecycleEvent>,
16
- )> = LazyLock::new(|| sync::broadcast::channel(5));
14
+ pub static SIGNAL_HANDLER_CHANNEL: Mutex<Option<broadcast::Sender<LifecycleEvent>>> =
15
+ Mutex::new(None);
16
+
17
+ pub static PENDING_QUEUE: Mutex<VecDeque<LifecycleEvent>> = Mutex::new(VecDeque::new());
18
+
19
+ pub fn subscribe_runtime_to_signals() -> broadcast::Receiver<LifecycleEvent> {
20
+ let mut guard = SIGNAL_HANDLER_CHANNEL.lock();
21
+ if let Some(sender) = guard.as_ref() {
22
+ return sender.subscribe();
23
+ }
24
+ let (sender, receiver) = broadcast::channel(5);
25
+ let sender_clone = sender.clone();
26
+ std::thread::spawn(move || {
27
+ std::thread::sleep(std::time::Duration::from_millis(50));
28
+ for event in PENDING_QUEUE.lock().drain(..) {
29
+ sender_clone.send(event).ok();
30
+ }
31
+ });
32
+
33
+ guard.replace(sender);
34
+
35
+ receiver
36
+ }
37
+
38
+ pub fn unsubscribe_runtime() {
39
+ SIGNAL_HANDLER_CHANNEL.lock().take();
40
+ }
17
41
 
18
42
  pub fn send_lifecycle_event(event: LifecycleEvent) {
19
- SIGNAL_HANDLER_CHANNEL.0.send(event).ok();
43
+ if let Some(sender) = SIGNAL_HANDLER_CHANNEL.lock().as_ref() {
44
+ sender.send(event).ok();
45
+ } else {
46
+ PENDING_QUEUE.lock().push_back(event);
47
+ }
20
48
  }
21
49
 
22
50
  fn receive_signal(signum: i32, _: sighandler_t) {