itsi-server 0.2.16 → 0.2.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +3 -1
- data/exe/itsi +6 -1
- data/ext/itsi_acme/Cargo.toml +1 -1
- data/ext/itsi_scheduler/Cargo.toml +1 -1
- data/ext/itsi_server/Cargo.toml +3 -1
- data/ext/itsi_server/src/lib.rs +6 -1
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +2 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_call.rs +4 -4
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response_stream/mod.rs +14 -13
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +64 -33
- data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +151 -152
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +6 -15
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +26 -5
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +1 -1
- data/ext/itsi_server/src/server/binds/listener.rs +45 -7
- data/ext/itsi_server/src/server/frame_stream.rs +142 -0
- data/ext/itsi_server/src/server/http_message_types.rs +142 -9
- data/ext/itsi_server/src/server/io_stream.rs +28 -5
- data/ext/itsi_server/src/server/lifecycle_event.rs +1 -1
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +2 -3
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +8 -10
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +2 -3
- data/ext/itsi_server/src/server/middleware_stack/middlewares/csp.rs +3 -3
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response/default_responses.rs +54 -56
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +5 -7
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +5 -5
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +7 -10
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +2 -3
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +1 -2
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_response.rs +4 -6
- data/ext/itsi_server/src/server/mod.rs +1 -0
- data/ext/itsi_server/src/server/process_worker.rs +3 -4
- data/ext/itsi_server/src/server/serve_strategy/acceptor.rs +16 -12
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +87 -31
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +158 -142
- data/ext/itsi_server/src/server/signal.rs +37 -9
- data/ext/itsi_server/src/server/thread_worker.rs +84 -69
- data/ext/itsi_server/src/services/itsi_http_service.rs +43 -43
- data/ext/itsi_server/src/services/static_file_server.rs +28 -47
- data/lib/itsi/http_request.rb +31 -39
- data/lib/itsi/http_response.rb +5 -0
- data/lib/itsi/rack_env_pool.rb +59 -0
- data/lib/itsi/server/config/dsl.rb +5 -4
- data/lib/itsi/server/config/middleware/proxy.rb +1 -1
- data/lib/itsi/server/config/middleware/rackup_file.rb +2 -2
- data/lib/itsi/server/config/options/auto_reload_config.rb +6 -2
- data/lib/itsi/server/config/options/include.rb +5 -2
- data/lib/itsi/server/config/options/pipeline_flush.md +16 -0
- data/lib/itsi/server/config/options/pipeline_flush.rb +19 -0
- data/lib/itsi/server/config/options/writev.md +25 -0
- data/lib/itsi/server/config/options/writev.rb +19 -0
- data/lib/itsi/server/config.rb +21 -8
- data/lib/itsi/server/default_config/Itsi.rb +1 -4
- data/lib/itsi/server/grpc/grpc_call.rb +2 -0
- data/lib/itsi/server/grpc/grpc_interface.rb +2 -2
- data/lib/itsi/server/rack/handler/itsi.rb +3 -1
- data/lib/itsi/server/rack_interface.rb +17 -12
- data/lib/itsi/server/scheduler_interface.rb +2 -0
- data/lib/itsi/server/version.rb +1 -1
- data/lib/itsi/server.rb +1 -0
- data/lib/ruby_lsp/itsi/addon.rb +12 -13
- metadata +7 -1
@@ -4,7 +4,10 @@ use crate::{
|
|
4
4
|
lifecycle_event::LifecycleEvent,
|
5
5
|
request_job::RequestJob,
|
6
6
|
serve_strategy::acceptor::{Acceptor, AcceptorArgs},
|
7
|
-
signal::{
|
7
|
+
signal::{
|
8
|
+
send_lifecycle_event, subscribe_runtime_to_signals, unsubscribe_runtime,
|
9
|
+
SHUTDOWN_REQUESTED,
|
10
|
+
},
|
8
11
|
thread_worker::{build_thread_workers, ThreadWorker},
|
9
12
|
},
|
10
13
|
};
|
@@ -29,22 +32,20 @@ use std::{
|
|
29
32
|
};
|
30
33
|
use tokio::{
|
31
34
|
runtime::{Builder as RuntimeBuilder, Runtime},
|
32
|
-
sync::{
|
33
|
-
broadcast,
|
34
|
-
watch::{self},
|
35
|
-
},
|
35
|
+
sync::watch::{self},
|
36
36
|
task::JoinSet,
|
37
37
|
};
|
38
38
|
use tracing::instrument;
|
39
39
|
|
40
40
|
pub struct SingleMode {
|
41
|
+
pub worker_id: usize,
|
41
42
|
pub executor: Builder<TokioExecutor>,
|
42
43
|
pub server_config: Arc<ItsiServerConfig>,
|
43
|
-
pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
44
44
|
pub restart_requested: AtomicBool,
|
45
45
|
pub status: RwLock<HashMap<u8, (u64, u64)>>,
|
46
46
|
}
|
47
47
|
|
48
|
+
#[derive(PartialEq, Debug)]
|
48
49
|
pub enum RunningPhase {
|
49
50
|
Running,
|
50
51
|
ShutdownPending,
|
@@ -53,31 +54,45 @@ pub enum RunningPhase {
|
|
53
54
|
|
54
55
|
impl SingleMode {
|
55
56
|
#[instrument(parent=None, skip_all)]
|
56
|
-
pub fn new(server_config: Arc<ItsiServerConfig
|
57
|
+
pub fn new(server_config: Arc<ItsiServerConfig>, worker_id: usize) -> Result<Self> {
|
57
58
|
server_config.server_params.read().preload_ruby()?;
|
58
|
-
let
|
59
|
-
|
60
|
-
.
|
61
|
-
.
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
59
|
+
let executor = {
|
60
|
+
let mut executor = Builder::new(TokioExecutor::new());
|
61
|
+
let server_params = server_config.server_params.read();
|
62
|
+
let mut http1_executor = executor.http1();
|
63
|
+
|
64
|
+
http1_executor
|
65
|
+
.header_read_timeout(server_params.header_read_timeout)
|
66
|
+
.pipeline_flush(server_params.pipeline_flush)
|
67
|
+
.timer(TokioTimer::new());
|
68
|
+
|
69
|
+
if let Some(writev) = server_params.writev {
|
70
|
+
http1_executor.writev(writev);
|
71
|
+
}
|
72
|
+
|
73
|
+
executor
|
74
|
+
.http2()
|
75
|
+
.max_concurrent_streams(server_params.max_concurrent_streams)
|
76
|
+
.max_local_error_reset_streams(server_params.max_local_error_reset_streams)
|
77
|
+
.max_header_list_size(server_params.max_header_list_size)
|
78
|
+
.max_send_buf_size(server_params.max_send_buf_size)
|
79
|
+
.enable_connect_protocol();
|
80
|
+
executor
|
81
|
+
};
|
71
82
|
|
72
83
|
Ok(Self {
|
84
|
+
worker_id,
|
73
85
|
executor,
|
74
86
|
server_config,
|
75
|
-
lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
|
76
87
|
restart_requested: AtomicBool::new(false),
|
77
88
|
status: RwLock::new(HashMap::new()),
|
78
89
|
})
|
79
90
|
}
|
80
91
|
|
92
|
+
pub fn is_zero_worker(&self) -> bool {
|
93
|
+
self.worker_id == 0
|
94
|
+
}
|
95
|
+
|
81
96
|
pub fn build_runtime(&self) -> Runtime {
|
82
97
|
let mut builder: RuntimeBuilder = if self
|
83
98
|
.server_config
|
@@ -103,7 +118,7 @@ impl SingleMode {
|
|
103
118
|
|
104
119
|
pub fn stop(&self) -> Result<()> {
|
105
120
|
SHUTDOWN_REQUESTED.store(true, std::sync::atomic::Ordering::SeqCst);
|
106
|
-
|
121
|
+
send_lifecycle_event(LifecycleEvent::Shutdown);
|
107
122
|
Ok(())
|
108
123
|
}
|
109
124
|
|
@@ -182,7 +197,7 @@ impl SingleMode {
|
|
182
197
|
.unwrap();
|
183
198
|
let receiver = self.clone();
|
184
199
|
monitor_runtime.block_on({
|
185
|
-
let mut lifecycle_rx =
|
200
|
+
let mut lifecycle_rx = subscribe_runtime_to_signals();
|
186
201
|
let receiver = receiver.clone();
|
187
202
|
let thread_workers = thread_workers.clone();
|
188
203
|
async move {
|
@@ -201,18 +216,19 @@ impl SingleMode {
|
|
201
216
|
}
|
202
217
|
lifecycle_event = lifecycle_rx.recv() => {
|
203
218
|
match lifecycle_event {
|
204
|
-
Ok(LifecycleEvent::Restart) => {
|
219
|
+
Ok(LifecycleEvent::Restart) | Ok(LifecycleEvent::Reload) => {
|
205
220
|
receiver.restart().await.ok();
|
206
221
|
}
|
207
|
-
Ok(LifecycleEvent::Reload) => {
|
208
|
-
receiver.reload().await.ok();
|
209
|
-
}
|
210
222
|
Ok(LifecycleEvent::Shutdown) => {
|
211
223
|
break;
|
212
224
|
}
|
213
225
|
Ok(LifecycleEvent::PrintInfo) => {
|
214
226
|
receiver.print_info(thread_workers.clone()).await.ok();
|
215
227
|
}
|
228
|
+
Err(e) => {
|
229
|
+
debug!("Lifecycle channel closed: {:?}, exiting single mode monitor loop", e);
|
230
|
+
break;
|
231
|
+
}
|
216
232
|
_ => {}
|
217
233
|
}
|
218
234
|
}
|
@@ -227,13 +243,15 @@ impl SingleMode {
|
|
227
243
|
|
228
244
|
#[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
|
229
245
|
pub fn run(self: Arc<Self>) -> Result<()> {
|
230
|
-
let (thread_workers, job_sender, nonblocking_sender) =
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
246
|
+
let (thread_workers, job_sender, nonblocking_sender) = build_thread_workers(
|
247
|
+
self.server_config.server_params.read().clone(),
|
248
|
+
self.worker_id,
|
249
|
+
)
|
250
|
+
.inspect_err(|e| {
|
251
|
+
if let Some(err_val) = e.value() {
|
252
|
+
print_rb_backtrace(err_val);
|
253
|
+
}
|
254
|
+
})?;
|
237
255
|
|
238
256
|
let worker_count = thread_workers.len();
|
239
257
|
info!(
|
@@ -244,6 +262,7 @@ impl SingleMode {
|
|
244
262
|
let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
|
245
263
|
let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
|
246
264
|
let monitor_thread = self.clone().start_monitors(thread_workers.clone());
|
265
|
+
let is_zero_worker = self.is_zero_worker();
|
247
266
|
if monitor_thread.is_none() {
|
248
267
|
error!("Failed to start monitor thread");
|
249
268
|
return Err(ItsiError::new("Failed to start monitor thread"));
|
@@ -253,106 +272,123 @@ impl SingleMode {
|
|
253
272
|
return Ok(());
|
254
273
|
}
|
255
274
|
let runtime = self.build_runtime();
|
256
|
-
let result = runtime.block_on(
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
break;
|
321
|
-
},
|
322
|
-
Err(e) => error!("Error receiving lifecycle event: {:?}", e),
|
323
|
-
_ => ()
|
275
|
+
let result = runtime.block_on(async {
|
276
|
+
let mut listener_task_set = JoinSet::new();
|
277
|
+
let server_params = self.server_config.server_params.read().clone();
|
278
|
+
if let Err(err) = server_params.initialize_middleware().await {
|
279
|
+
error!("Failed to initialize middleware: {}", err);
|
280
|
+
return Err(ItsiError::new("Failed to initialize middleware"));
|
281
|
+
}
|
282
|
+
let tokio_listeners = server_params
|
283
|
+
.listeners
|
284
|
+
.lock()
|
285
|
+
.drain(..)
|
286
|
+
.map(|list| Arc::new(list.into_tokio_listener(is_zero_worker)))
|
287
|
+
.collect::<Vec<_>>();
|
288
|
+
|
289
|
+
tokio_listeners.iter().cloned().for_each(|listener| {
|
290
|
+
let shutdown_sender = shutdown_sender.clone();
|
291
|
+
let job_sender = job_sender.clone();
|
292
|
+
let nonblocking_sender = nonblocking_sender.clone();
|
293
|
+
|
294
|
+
let mut lifecycle_rx = subscribe_runtime_to_signals();
|
295
|
+
let mut shutdown_receiver = shutdown_sender.subscribe();
|
296
|
+
let mut acceptor = Acceptor {
|
297
|
+
acceptor_args: Arc::new(AcceptorArgs {
|
298
|
+
strategy: self.clone(),
|
299
|
+
listener_info: listener.listener_info(),
|
300
|
+
shutdown_receiver: shutdown_sender.subscribe(),
|
301
|
+
job_sender: job_sender.clone(),
|
302
|
+
nonblocking_sender: nonblocking_sender.clone(),
|
303
|
+
server_params: server_params.clone(),
|
304
|
+
}),
|
305
|
+
join_set: JoinSet::new(),
|
306
|
+
};
|
307
|
+
|
308
|
+
let shutdown_rx_for_acme_task = shutdown_receiver.clone();
|
309
|
+
let acme_task_listener_clone = listener.clone();
|
310
|
+
|
311
|
+
let mut after_accept_wait: Option<Duration> = None::<Duration>;
|
312
|
+
|
313
|
+
if cfg!(target_os = "macos") {
|
314
|
+
after_accept_wait = if server_params.workers > 1 {
|
315
|
+
Some(Duration::from_nanos(10 * server_params.workers as u64))
|
316
|
+
} else {
|
317
|
+
None
|
318
|
+
};
|
319
|
+
};
|
320
|
+
|
321
|
+
listener_task_set.spawn(async move {
|
322
|
+
acme_task_listener_clone
|
323
|
+
.spawn_acme_event_task(shutdown_rx_for_acme_task)
|
324
|
+
.await;
|
325
|
+
});
|
326
|
+
|
327
|
+
listener_task_set.spawn(async move {
|
328
|
+
loop {
|
329
|
+
// Process any pending signals before select
|
330
|
+
tokio::select! {
|
331
|
+
accept_result = listener.accept() => {
|
332
|
+
match accept_result {
|
333
|
+
Ok(accepted) => acceptor.serve_connection(accepted).await,
|
334
|
+
Err(e) => debug!("Listener.accept failed: {:?}", e)
|
335
|
+
}
|
336
|
+
if cfg!(target_os = "macos") {
|
337
|
+
if let Some(after_accept_wait) = after_accept_wait{
|
338
|
+
tokio::time::sleep(after_accept_wait).await;
|
324
339
|
}
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
340
|
+
}
|
341
|
+
},
|
342
|
+
_ = shutdown_receiver.changed() => {
|
343
|
+
debug!("Shutdown requested via receiver");
|
344
|
+
break;
|
345
|
+
},
|
346
|
+
lifecycle_event = lifecycle_rx.recv() => {
|
347
|
+
match lifecycle_event {
|
348
|
+
Ok(LifecycleEvent::Shutdown) => {
|
349
|
+
debug!("Received LifecycleEvent::Shutdown");
|
350
|
+
let _ = shutdown_sender.send(RunningPhase::ShutdownPending);
|
351
|
+
break;
|
352
|
+
},
|
353
|
+
Err(e) => {
|
354
|
+
debug!("Lifecycle channel closed: {:?}, exiting accept loop", e);
|
355
|
+
break
|
356
|
+
},
|
357
|
+
_ => ()
|
358
|
+
}
|
359
|
+
}
|
360
|
+
}
|
361
|
+
}
|
362
|
+
acceptor.join().await;
|
363
|
+
});
|
364
|
+
});
|
365
|
+
|
366
|
+
if self.is_single_mode() {
|
333
367
|
self.invoke_hook("after_start");
|
334
|
-
|
368
|
+
}
|
335
369
|
|
336
|
-
|
337
|
-
|
370
|
+
while let Some(_res) = listener_task_set.join_next().await {}
|
371
|
+
drop(tokio_listeners);
|
338
372
|
|
339
|
-
|
340
|
-
|
373
|
+
Ok::<(), ItsiError>(())
|
374
|
+
});
|
341
375
|
|
342
376
|
debug!("Single mode runtime exited.");
|
343
377
|
|
378
|
+
for _i in 0..thread_workers.len() {
|
379
|
+
job_sender.send_blocking(RequestJob::Shutdown).unwrap();
|
380
|
+
nonblocking_sender
|
381
|
+
.send_blocking(RequestJob::Shutdown)
|
382
|
+
.unwrap();
|
383
|
+
}
|
344
384
|
if result.is_err() {
|
345
|
-
|
346
|
-
job_sender.send_blocking(RequestJob::Shutdown).unwrap();
|
347
|
-
nonblocking_sender
|
348
|
-
.send_blocking(RequestJob::Shutdown)
|
349
|
-
.unwrap();
|
350
|
-
}
|
351
|
-
self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
|
385
|
+
send_lifecycle_event(LifecycleEvent::Shutdown);
|
352
386
|
}
|
353
387
|
|
354
388
|
shutdown_sender.send(RunningPhase::Shutdown).ok();
|
355
389
|
runtime.shutdown_timeout(Duration::from_millis(100));
|
390
|
+
unsubscribe_runtime();
|
391
|
+
|
356
392
|
debug!("Shutdown timeout finished.");
|
357
393
|
|
358
394
|
let deadline = Instant::now() + Duration::from_secs_f64(shutdown_timeout);
|
@@ -384,26 +420,6 @@ impl SingleMode {
|
|
384
420
|
pub fn is_single_mode(&self) -> bool {
|
385
421
|
self.server_config.server_params.read().workers == 1
|
386
422
|
}
|
387
|
-
/// Attempts to reload the config "live"
|
388
|
-
/// Not that when running in single mode this will not unload
|
389
|
-
/// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
|
390
|
-
pub async fn reload(&self) -> Result<()> {
|
391
|
-
if !self.server_config.check_config().await {
|
392
|
-
return Ok(());
|
393
|
-
}
|
394
|
-
let should_reexec = self.server_config.clone().reload(false)?;
|
395
|
-
if should_reexec {
|
396
|
-
if self.is_single_mode() {
|
397
|
-
self.invoke_hook("before_restart");
|
398
|
-
}
|
399
|
-
self.server_config.dup_fds()?;
|
400
|
-
self.server_config.reload_exec()?;
|
401
|
-
}
|
402
|
-
self.restart_requested.store(true, Ordering::SeqCst);
|
403
|
-
self.stop()?;
|
404
|
-
self.server_config.server_params.read().preload_ruby()?;
|
405
|
-
Ok(())
|
406
|
-
}
|
407
423
|
|
408
424
|
pub fn invoke_hook(&self, hook_name: &str) {
|
409
425
|
if let Some(hook) = self.server_config.server_params.read().hooks.get(hook_name) {
|
@@ -1,22 +1,50 @@
|
|
1
|
-
use std::
|
2
|
-
|
3
|
-
|
1
|
+
use std::{
|
2
|
+
collections::VecDeque,
|
3
|
+
sync::atomic::{AtomicBool, AtomicI8},
|
4
4
|
};
|
5
5
|
|
6
6
|
use nix::libc::{self, sighandler_t};
|
7
|
-
use
|
7
|
+
use parking_lot::Mutex;
|
8
|
+
use tokio::sync::broadcast;
|
8
9
|
|
9
10
|
use super::lifecycle_event::LifecycleEvent;
|
10
11
|
|
11
12
|
pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
|
12
13
|
pub static SHUTDOWN_REQUESTED: AtomicBool = AtomicBool::new(false);
|
13
|
-
pub static SIGNAL_HANDLER_CHANNEL:
|
14
|
-
|
15
|
-
|
16
|
-
|
14
|
+
pub static SIGNAL_HANDLER_CHANNEL: Mutex<Option<broadcast::Sender<LifecycleEvent>>> =
|
15
|
+
Mutex::new(None);
|
16
|
+
|
17
|
+
pub static PENDING_QUEUE: Mutex<VecDeque<LifecycleEvent>> = Mutex::new(VecDeque::new());
|
18
|
+
|
19
|
+
pub fn subscribe_runtime_to_signals() -> broadcast::Receiver<LifecycleEvent> {
|
20
|
+
let mut guard = SIGNAL_HANDLER_CHANNEL.lock();
|
21
|
+
if let Some(sender) = guard.as_ref() {
|
22
|
+
return sender.subscribe();
|
23
|
+
}
|
24
|
+
let (sender, receiver) = broadcast::channel(5);
|
25
|
+
let sender_clone = sender.clone();
|
26
|
+
std::thread::spawn(move || {
|
27
|
+
std::thread::sleep(std::time::Duration::from_millis(50));
|
28
|
+
for event in PENDING_QUEUE.lock().drain(..) {
|
29
|
+
sender_clone.send(event).ok();
|
30
|
+
}
|
31
|
+
});
|
32
|
+
|
33
|
+
guard.replace(sender);
|
34
|
+
|
35
|
+
receiver
|
36
|
+
}
|
37
|
+
|
38
|
+
pub fn unsubscribe_runtime() {
|
39
|
+
SIGNAL_HANDLER_CHANNEL.lock().take();
|
40
|
+
}
|
17
41
|
|
18
42
|
pub fn send_lifecycle_event(event: LifecycleEvent) {
|
19
|
-
SIGNAL_HANDLER_CHANNEL.
|
43
|
+
if let Some(sender) = SIGNAL_HANDLER_CHANNEL.lock().as_ref() {
|
44
|
+
sender.send(event).ok();
|
45
|
+
} else {
|
46
|
+
PENDING_QUEUE.lock().push_back(event);
|
47
|
+
}
|
20
48
|
}
|
21
49
|
|
22
50
|
fn receive_signal(signum: i32, _: sighandler_t) {
|