itsi-server 0.1.1 → 0.1.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of itsi-server might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +7 -0
- data/Cargo.lock +4417 -0
- data/Cargo.toml +7 -0
- data/README.md +4 -0
- data/Rakefile +8 -1
- data/_index.md +6 -0
- data/exe/itsi +94 -45
- data/ext/itsi_error/Cargo.toml +2 -0
- data/ext/itsi_error/src/from.rs +68 -0
- data/ext/itsi_error/src/lib.rs +18 -34
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.toml +3 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +139 -0
- data/ext/itsi_rb_helpers/src/lib.rs +140 -10
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
- data/ext/itsi_scheduler/Cargo.toml +24 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
- data/ext/itsi_scheduler/src/lib.rs +38 -0
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +73 -13
- data/ext/itsi_server/extconf.rb +1 -1
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +100 -40
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/big_bytes.rs +109 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +141 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_request.rs +147 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response.rs +19 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_stream/mod.rs +216 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +282 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +388 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +355 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +82 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +55 -0
- data/ext/itsi_server/src/server/bind.rs +75 -31
- data/ext/itsi_server/src/server/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/cache_store.rs +74 -0
- data/ext/itsi_server/src/server/io_stream.rs +104 -0
- data/ext/itsi_server/src/server/itsi_service.rs +172 -0
- data/ext/itsi_server/src/server/lifecycle_event.rs +12 -0
- data/ext/itsi_server/src/server/listener.rs +332 -132
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +153 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +58 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +321 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +139 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +300 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +287 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +48 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +127 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +191 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +72 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +85 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +195 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +216 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +124 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +43 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +34 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +93 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +162 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +158 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +315 -0
- data/ext/itsi_server/src/server/mod.rs +15 -2
- data/ext/itsi_server/src/server/process_worker.rs +229 -0
- data/ext/itsi_server/src/server/rate_limiter.rs +565 -0
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +337 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +30 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +421 -0
- data/ext/itsi_server/src/server/signal.rs +93 -0
- data/ext/itsi_server/src/server/static_file_server.rs +984 -0
- data/ext/itsi_server/src/server/thread_worker.rs +444 -0
- data/ext/itsi_server/src/server/tls/locked_dir_cache.rs +132 -0
- data/ext/itsi_server/src/server/tls.rs +187 -60
- data/ext/itsi_server/src/server/types.rs +43 -0
- data/ext/itsi_tracing/Cargo.toml +5 -0
- data/ext/itsi_tracing/src/lib.rs +225 -7
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
- data/lib/itsi/http_request.rb +87 -0
- data/lib/itsi/http_response.rb +39 -0
- data/lib/itsi/server/Itsi.rb +119 -0
- data/lib/itsi/server/config/dsl.rb +506 -0
- data/lib/itsi/server/config.rb +131 -0
- data/lib/itsi/server/default_app/default_app.rb +38 -0
- data/lib/itsi/server/default_app/index.html +91 -0
- data/lib/itsi/server/grpc_interface.rb +213 -0
- data/lib/itsi/server/rack/handler/itsi.rb +27 -0
- data/lib/itsi/server/rack_interface.rb +94 -0
- data/lib/itsi/server/scheduler_interface.rb +21 -0
- data/lib/itsi/server/scheduler_mode.rb +10 -0
- data/lib/itsi/server/signal_trap.rb +29 -0
- data/lib/itsi/server/version.rb +1 -1
- data/lib/itsi/server.rb +90 -9
- data/lib/itsi/standard_headers.rb +86 -0
- metadata +122 -31
- data/ext/itsi_server/src/request/itsi_request.rs +0 -143
- data/ext/itsi_server/src/request/mod.rs +0 -1
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -32
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -52
- data/ext/itsi_server/src/server/itsi_server.rs +0 -182
- data/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
- data/ext/itsi_server/src/stream_writer/mod.rs +0 -21
- data/lib/itsi/request.rb +0 -39
| @@ -0,0 +1,337 @@ | |
| 1 | 
            +
            use crate::ruby_types::itsi_server::itsi_server_config::ItsiServerConfig;
         | 
| 2 | 
            +
            use crate::server::signal::SIGNAL_HANDLER_CHANNEL;
         | 
| 3 | 
            +
            use crate::server::{lifecycle_event::LifecycleEvent, process_worker::ProcessWorker};
         | 
| 4 | 
            +
            use itsi_error::{ItsiError, Result};
         | 
| 5 | 
            +
            use itsi_rb_helpers::{
         | 
| 6 | 
            +
                call_proc_and_log_errors, call_with_gvl, call_without_gvl, create_ruby_thread,
         | 
| 7 | 
            +
            };
         | 
| 8 | 
            +
            use itsi_tracing::{error, info, warn};
         | 
| 9 | 
            +
            use magnus::Value;
         | 
| 10 | 
            +
            use nix::{libc::exit, unistd::Pid};
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            use std::{
         | 
| 13 | 
            +
                sync::{atomic::AtomicUsize, Arc},
         | 
| 14 | 
            +
                time::{Duration, Instant},
         | 
| 15 | 
            +
            };
         | 
| 16 | 
            +
            use tokio::{
         | 
| 17 | 
            +
                runtime::{Builder as RuntimeBuilder, Runtime},
         | 
| 18 | 
            +
                sync::{broadcast, watch, Mutex},
         | 
| 19 | 
            +
                time::{self, sleep},
         | 
| 20 | 
            +
            };
         | 
| 21 | 
            +
            use tracing::{debug, instrument};
         | 
| 22 | 
            +
            pub(crate) struct ClusterMode {
         | 
| 23 | 
            +
                pub server_config: Arc<ItsiServerConfig>,
         | 
| 24 | 
            +
                pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
         | 
| 25 | 
            +
                pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
         | 
| 26 | 
            +
            }
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            static WORKER_ID: AtomicUsize = AtomicUsize::new(0);
         | 
| 29 | 
            +
            static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
         | 
| 30 | 
            +
                parking_lot::Mutex::new(None);
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            impl ClusterMode {
         | 
| 33 | 
            +
                pub fn new(server_config: Arc<ItsiServerConfig>) -> Self {
         | 
| 34 | 
            +
                    let process_workers = (0..server_config.server_params.read().workers)
         | 
| 35 | 
            +
                        .map(|_| ProcessWorker {
         | 
| 36 | 
            +
                            worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
         | 
| 37 | 
            +
                            ..Default::default()
         | 
| 38 | 
            +
                        })
         | 
| 39 | 
            +
                        .collect();
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                    Self {
         | 
| 42 | 
            +
                        server_config,
         | 
| 43 | 
            +
                        process_workers: parking_lot::Mutex::new(process_workers),
         | 
| 44 | 
            +
                        lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
         | 
| 45 | 
            +
                    }
         | 
| 46 | 
            +
                }
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                pub fn build_runtime(&self) -> Runtime {
         | 
| 49 | 
            +
                    let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
         | 
| 50 | 
            +
                    builder
         | 
| 51 | 
            +
                        .thread_name("itsi-server-accept-loop")
         | 
| 52 | 
            +
                        .thread_stack_size(3 * 1024 * 1024)
         | 
| 53 | 
            +
                        .enable_io()
         | 
| 54 | 
            +
                        .enable_time()
         | 
| 55 | 
            +
                        .build()
         | 
| 56 | 
            +
                        .expect("Failed to build Tokio runtime")
         | 
| 57 | 
            +
                }
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                #[allow(clippy::await_holding_lock)]
         | 
| 60 | 
            +
                pub async fn handle_lifecycle_event(
         | 
| 61 | 
            +
                    self: Arc<Self>,
         | 
| 62 | 
            +
                    lifecycle_event: LifecycleEvent,
         | 
| 63 | 
            +
                ) -> Result<()> {
         | 
| 64 | 
            +
                    match lifecycle_event {
         | 
| 65 | 
            +
                        LifecycleEvent::Start => Ok(()),
         | 
| 66 | 
            +
                        LifecycleEvent::PrintInfo => {
         | 
| 67 | 
            +
                            self.print_info().await?;
         | 
| 68 | 
            +
                            Ok(())
         | 
| 69 | 
            +
                        }
         | 
| 70 | 
            +
                        LifecycleEvent::Shutdown => {
         | 
| 71 | 
            +
                            self.server_config.stop_watcher()?;
         | 
| 72 | 
            +
                            self.shutdown().await?;
         | 
| 73 | 
            +
                            Ok(())
         | 
| 74 | 
            +
                        }
         | 
| 75 | 
            +
                        LifecycleEvent::Restart => {
         | 
| 76 | 
            +
                            self.server_config.dup_fds()?;
         | 
| 77 | 
            +
                            self.shutdown().await.ok();
         | 
| 78 | 
            +
                            info!("Shutdown complete. Calling reload exec");
         | 
| 79 | 
            +
                            self.server_config.reload_exec()?;
         | 
| 80 | 
            +
                            Ok(())
         | 
| 81 | 
            +
                        }
         | 
| 82 | 
            +
                        LifecycleEvent::Reload => {
         | 
| 83 | 
            +
                            let should_reexec = self.server_config.clone().reload(true)?;
         | 
| 84 | 
            +
                            if should_reexec {
         | 
| 85 | 
            +
                                self.server_config.dup_fds()?;
         | 
| 86 | 
            +
                                self.shutdown().await.ok();
         | 
| 87 | 
            +
                                self.server_config.reload_exec()?;
         | 
| 88 | 
            +
                            }
         | 
| 89 | 
            +
                            let mut workers_to_load = self.server_config.server_params.read().workers;
         | 
| 90 | 
            +
                            let mut next_workers = Vec::new();
         | 
| 91 | 
            +
                            for worker in self.process_workers.lock().drain(..) {
         | 
| 92 | 
            +
                                if workers_to_load == 0 {
         | 
| 93 | 
            +
                                    worker.graceful_shutdown(self.clone()).await
         | 
| 94 | 
            +
                                } else {
         | 
| 95 | 
            +
                                    workers_to_load -= 1;
         | 
| 96 | 
            +
                                    worker.reboot(self.clone()).await?;
         | 
| 97 | 
            +
                                    next_workers.push(worker);
         | 
| 98 | 
            +
                                }
         | 
| 99 | 
            +
                            }
         | 
| 100 | 
            +
                            self.process_workers.lock().extend(next_workers);
         | 
| 101 | 
            +
                            while workers_to_load > 0 {
         | 
| 102 | 
            +
                                let mut workers = self.process_workers.lock();
         | 
| 103 | 
            +
                                let worker = ProcessWorker {
         | 
| 104 | 
            +
                                    worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
         | 
| 105 | 
            +
                                    ..Default::default()
         | 
| 106 | 
            +
                                };
         | 
| 107 | 
            +
                                let worker_clone = worker.clone();
         | 
| 108 | 
            +
                                let self_clone = self.clone();
         | 
| 109 | 
            +
                                create_ruby_thread(move || {
         | 
| 110 | 
            +
                                    call_without_gvl(move || {
         | 
| 111 | 
            +
                                        worker_clone.boot(self_clone).ok();
         | 
| 112 | 
            +
                                    })
         | 
| 113 | 
            +
                                });
         | 
| 114 | 
            +
                                workers.push(worker);
         | 
| 115 | 
            +
                                workers_to_load -= 1
         | 
| 116 | 
            +
                            }
         | 
| 117 | 
            +
                            Ok(())
         | 
| 118 | 
            +
                        }
         | 
| 119 | 
            +
                        LifecycleEvent::IncreaseWorkers => {
         | 
| 120 | 
            +
                            let mut workers = self.process_workers.lock();
         | 
| 121 | 
            +
                            let worker = ProcessWorker {
         | 
| 122 | 
            +
                                worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
         | 
| 123 | 
            +
                                ..Default::default()
         | 
| 124 | 
            +
                            };
         | 
| 125 | 
            +
                            let worker_clone = worker.clone();
         | 
| 126 | 
            +
                            let self_clone = self.clone();
         | 
| 127 | 
            +
                            create_ruby_thread(move || {
         | 
| 128 | 
            +
                                call_without_gvl(move || {
         | 
| 129 | 
            +
                                    worker_clone.boot(self_clone).ok();
         | 
| 130 | 
            +
                                })
         | 
| 131 | 
            +
                            });
         | 
| 132 | 
            +
                            workers.push(worker);
         | 
| 133 | 
            +
                            Ok(())
         | 
| 134 | 
            +
                        }
         | 
| 135 | 
            +
                        LifecycleEvent::DecreaseWorkers => {
         | 
| 136 | 
            +
                            let worker = {
         | 
| 137 | 
            +
                                let mut workers = self.process_workers.lock();
         | 
| 138 | 
            +
                                workers.pop()
         | 
| 139 | 
            +
                            };
         | 
| 140 | 
            +
                            if let Some(dropped_worker) = worker {
         | 
| 141 | 
            +
                                dropped_worker.request_shutdown();
         | 
| 142 | 
            +
                                let force_kill_time = Instant::now()
         | 
| 143 | 
            +
                                    + Duration::from_secs_f64(
         | 
| 144 | 
            +
                                        self.server_config.server_params.read().shutdown_timeout,
         | 
| 145 | 
            +
                                    );
         | 
| 146 | 
            +
                                while dropped_worker.is_alive() && force_kill_time > Instant::now() {
         | 
| 147 | 
            +
                                    tokio::time::sleep(Duration::from_millis(100)).await;
         | 
| 148 | 
            +
                                }
         | 
| 149 | 
            +
                                if dropped_worker.is_alive() {
         | 
| 150 | 
            +
                                    dropped_worker.force_kill();
         | 
| 151 | 
            +
                                }
         | 
| 152 | 
            +
                            };
         | 
| 153 | 
            +
                            Ok(())
         | 
| 154 | 
            +
                        }
         | 
| 155 | 
            +
                        LifecycleEvent::ForceShutdown => {
         | 
| 156 | 
            +
                            for worker in self.process_workers.lock().iter() {
         | 
| 157 | 
            +
                                worker.force_kill();
         | 
| 158 | 
            +
                            }
         | 
| 159 | 
            +
                            error!("Force shutdown!");
         | 
| 160 | 
            +
                            unsafe { exit(0) };
         | 
| 161 | 
            +
                        }
         | 
| 162 | 
            +
                        LifecycleEvent::ChildTerminated => {
         | 
| 163 | 
            +
                            CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
         | 
| 164 | 
            +
                                i.send(()).ok();
         | 
| 165 | 
            +
                            });
         | 
| 166 | 
            +
                            Ok(())
         | 
| 167 | 
            +
                        }
         | 
| 168 | 
            +
                    }
         | 
| 169 | 
            +
                }
         | 
| 170 | 
            +
             | 
| 171 | 
            +
                pub async fn shutdown(&self) -> Result<()> {
         | 
| 172 | 
            +
                    let shutdown_timeout = self.server_config.server_params.read().shutdown_timeout;
         | 
| 173 | 
            +
                    let workers = self.process_workers.lock().clone();
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                    workers.iter().for_each(|worker| worker.request_shutdown());
         | 
| 176 | 
            +
             | 
| 177 | 
            +
                    let remaining_children = Arc::new(Mutex::new(workers.len()));
         | 
| 178 | 
            +
                    let monitor_handle = {
         | 
| 179 | 
            +
                        let remaining_children: Arc<Mutex<usize>> = Arc::clone(&remaining_children);
         | 
| 180 | 
            +
                        let mut workers = workers.clone();
         | 
| 181 | 
            +
                        tokio::spawn(async move {
         | 
| 182 | 
            +
                            loop {
         | 
| 183 | 
            +
                                // Check if all workers have exited
         | 
| 184 | 
            +
                                let mut remaining = remaining_children.lock().await;
         | 
| 185 | 
            +
                                workers.retain(|worker| worker.is_alive());
         | 
| 186 | 
            +
                                *remaining = workers.len();
         | 
| 187 | 
            +
                                if *remaining == 0 {
         | 
| 188 | 
            +
                                    break;
         | 
| 189 | 
            +
                                }
         | 
| 190 | 
            +
                                sleep(Duration::from_millis(100)).await;
         | 
| 191 | 
            +
                            }
         | 
| 192 | 
            +
                        })
         | 
| 193 | 
            +
                    };
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                    tokio::select! {
         | 
| 196 | 
            +
                        _ = monitor_handle => {
         | 
| 197 | 
            +
                          debug!("All children exited early, exit normally")
         | 
| 198 | 
            +
                        }
         | 
| 199 | 
            +
                        _ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
         | 
| 200 | 
            +
                            warn!("Graceful shutdown timeout reached, force killing remaining children");
         | 
| 201 | 
            +
                            workers.iter().for_each(|worker| worker.force_kill());
         | 
| 202 | 
            +
                        }
         | 
| 203 | 
            +
                    }
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                    Err(ItsiError::Break())
         | 
| 206 | 
            +
                }
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                pub async fn print_info(self: Arc<Self>) -> Result<()> {
         | 
| 209 | 
            +
                    println!("Itsi Cluster Info:");
         | 
| 210 | 
            +
                    println!("Master PID: {:?}", Pid::this());
         | 
| 211 | 
            +
                    if let Some(memory_limit) = self.server_config.server_params.read().worker_memory_limit {
         | 
| 212 | 
            +
                        println!("Worker Memory Limit: {}", memory_limit);
         | 
| 213 | 
            +
                    }
         | 
| 214 | 
            +
             | 
| 215 | 
            +
                    if self.server_config.watcher_fd.is_some() {
         | 
| 216 | 
            +
                        println!("File Watcher Enabled: true",);
         | 
| 217 | 
            +
                        if let Some(watchers) = self
         | 
| 218 | 
            +
                            .server_config
         | 
| 219 | 
            +
                            .server_params
         | 
| 220 | 
            +
                            .read()
         | 
| 221 | 
            +
                            .notify_watchers
         | 
| 222 | 
            +
                            .as_ref()
         | 
| 223 | 
            +
                        {
         | 
| 224 | 
            +
                            for watcher in watchers {
         | 
| 225 | 
            +
                                println!(
         | 
| 226 | 
            +
                                    "Watching path: {} => {}",
         | 
| 227 | 
            +
                                    watcher.0,
         | 
| 228 | 
            +
                                    watcher
         | 
| 229 | 
            +
                                        .1
         | 
| 230 | 
            +
                                        .iter()
         | 
| 231 | 
            +
                                        .map(|path| path.join(","))
         | 
| 232 | 
            +
                                        .collect::<Vec<String>>()
         | 
| 233 | 
            +
                                        .join(" ")
         | 
| 234 | 
            +
                                );
         | 
| 235 | 
            +
                            }
         | 
| 236 | 
            +
                        }
         | 
| 237 | 
            +
                    }
         | 
| 238 | 
            +
                    println!(
         | 
| 239 | 
            +
                        "Silent Mode: {}",
         | 
| 240 | 
            +
                        self.server_config.server_params.read().silence
         | 
| 241 | 
            +
                    );
         | 
| 242 | 
            +
                    println!(
         | 
| 243 | 
            +
                        "Preload: {}",
         | 
| 244 | 
            +
                        self.server_config.server_params.read().preload
         | 
| 245 | 
            +
                    );
         | 
| 246 | 
            +
                    let workers = self.process_workers.lock().clone();
         | 
| 247 | 
            +
                    for worker in workers {
         | 
| 248 | 
            +
                        worker.print_info()?;
         | 
| 249 | 
            +
                        sleep(Duration::from_millis(50)).await;
         | 
| 250 | 
            +
                    }
         | 
| 251 | 
            +
                    Ok(())
         | 
| 252 | 
            +
                }
         | 
| 253 | 
            +
             | 
| 254 | 
            +
                pub fn stop(&self) -> Result<()> {
         | 
| 255 | 
            +
                    for worker in self.process_workers.lock().iter() {
         | 
| 256 | 
            +
                        if worker.is_alive() {
         | 
| 257 | 
            +
                            worker.force_kill();
         | 
| 258 | 
            +
                        }
         | 
| 259 | 
            +
                    }
         | 
| 260 | 
            +
                    Ok(())
         | 
| 261 | 
            +
                }
         | 
| 262 | 
            +
             | 
| 263 | 
            +
                #[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
         | 
| 264 | 
            +
                pub fn run(self: Arc<Self>) -> Result<()> {
         | 
| 265 | 
            +
                    info!("Starting in Cluster mode");
         | 
| 266 | 
            +
                    if let Some(proc) = self
         | 
| 267 | 
            +
                        .server_config
         | 
| 268 | 
            +
                        .server_params
         | 
| 269 | 
            +
                        .read()
         | 
| 270 | 
            +
                        .hooks
         | 
| 271 | 
            +
                        .get("before_fork")
         | 
| 272 | 
            +
                    {
         | 
| 273 | 
            +
                        call_with_gvl(|_| call_proc_and_log_errors(proc.clone()))
         | 
| 274 | 
            +
                    }
         | 
| 275 | 
            +
                    self.process_workers
         | 
| 276 | 
            +
                        .lock()
         | 
| 277 | 
            +
                        .iter()
         | 
| 278 | 
            +
                        .try_for_each(|worker| worker.boot(Arc::clone(&self)))?;
         | 
| 279 | 
            +
             | 
| 280 | 
            +
                    let (sender, mut receiver) = watch::channel(());
         | 
| 281 | 
            +
                    *CHILD_SIGNAL_SENDER.lock() = Some(sender);
         | 
| 282 | 
            +
             | 
| 283 | 
            +
                    let mut lifecycle_rx = self.lifecycle_channel.subscribe();
         | 
| 284 | 
            +
                    let self_ref = self.clone();
         | 
| 285 | 
            +
             | 
| 286 | 
            +
                    self.build_runtime().block_on(async {
         | 
| 287 | 
            +
                      let self_ref = self_ref.clone();
         | 
| 288 | 
            +
                      let mut memory_check_interval = time::interval(time::Duration::from_secs(2));
         | 
| 289 | 
            +
                      loop {
         | 
| 290 | 
            +
                        tokio::select! {
         | 
| 291 | 
            +
                          _ = receiver.changed() => {
         | 
| 292 | 
            +
                            let mut workers = self_ref.process_workers.lock();
         | 
| 293 | 
            +
                            workers.retain(|worker| {
         | 
| 294 | 
            +
                              worker.boot_if_dead(self_ref.clone())
         | 
| 295 | 
            +
                            });
         | 
| 296 | 
            +
                            if workers.is_empty() {
         | 
| 297 | 
            +
                                warn!("No workers running. Send SIGTTIN to increase worker count");
         | 
| 298 | 
            +
                            }
         | 
| 299 | 
            +
                          }
         | 
| 300 | 
            +
                          _ = memory_check_interval.tick() => {
         | 
| 301 | 
            +
                            let worker_memory_limit = self_ref.server_config.server_params.read().worker_memory_limit;
         | 
| 302 | 
            +
                            if let Some(memory_limit) = worker_memory_limit {
         | 
| 303 | 
            +
                              let largest_worker = {
         | 
| 304 | 
            +
                                let workers = self_ref.process_workers.lock();
         | 
| 305 | 
            +
                                workers.iter().max_by(|wa, wb| wa.memory_usage().cmp(&wb.memory_usage())).cloned()
         | 
| 306 | 
            +
                              };
         | 
| 307 | 
            +
                              if let Some(largest_worker) = largest_worker {
         | 
| 308 | 
            +
                                if let Some(current_mem_usage) = largest_worker.memory_usage(){
         | 
| 309 | 
            +
                                  if current_mem_usage > memory_limit {
         | 
| 310 | 
            +
                                    largest_worker.reboot(self_ref.clone()).await.ok();
         | 
| 311 | 
            +
                                    if let Some(hook) = self_ref.server_config.server_params.read().hooks.get("after_memory_threshold_reached") {
         | 
| 312 | 
            +
                                      call_with_gvl(|_|  hook.call::<_, Value>((largest_worker.pid(),)).ok() );
         | 
| 313 | 
            +
                                    }
         | 
| 314 | 
            +
                                  }
         | 
| 315 | 
            +
                                }
         | 
| 316 | 
            +
                              }
         | 
| 317 | 
            +
                            }
         | 
| 318 | 
            +
                          }
         | 
| 319 | 
            +
                          lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
         | 
| 320 | 
            +
                            Ok(lifecycle_event) => {
         | 
| 321 | 
            +
                              if let Err(e) = self_ref.clone().handle_lifecycle_event(lifecycle_event).await{
         | 
| 322 | 
            +
                                match e {
         | 
| 323 | 
            +
                                  ItsiError::Break() => break,
         | 
| 324 | 
            +
                                  _ => error!("Error in handle_lifecycle_event {:?}", e)
         | 
| 325 | 
            +
                                }
         | 
| 326 | 
            +
                              }
         | 
| 327 | 
            +
             | 
| 328 | 
            +
                            },
         | 
| 329 | 
            +
                            Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
         | 
| 330 | 
            +
                          }
         | 
| 331 | 
            +
                        }
         | 
| 332 | 
            +
                      }
         | 
| 333 | 
            +
                    });
         | 
| 334 | 
            +
             | 
| 335 | 
            +
                    Ok(())
         | 
| 336 | 
            +
                }
         | 
| 337 | 
            +
            }
         | 
| @@ -0,0 +1,30 @@ | |
| 1 | 
            +
            use std::sync::Arc;
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            use cluster_mode::ClusterMode;
         | 
| 4 | 
            +
            use itsi_error::Result;
         | 
| 5 | 
            +
            use single_mode::SingleMode;
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            pub mod cluster_mode;
         | 
| 8 | 
            +
            pub mod single_mode;
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            #[derive(Clone)]
         | 
| 11 | 
            +
            pub(crate) enum ServeStrategy {
         | 
| 12 | 
            +
                Single(Arc<SingleMode>),
         | 
| 13 | 
            +
                Cluster(Arc<ClusterMode>),
         | 
| 14 | 
            +
            }
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            impl ServeStrategy {
         | 
| 17 | 
            +
                pub fn run(self) -> Result<()> {
         | 
| 18 | 
            +
                    match self {
         | 
| 19 | 
            +
                        ServeStrategy::Single(single_router) => single_router.run(),
         | 
| 20 | 
            +
                        ServeStrategy::Cluster(cluster_router) => cluster_router.run(),
         | 
| 21 | 
            +
                    }
         | 
| 22 | 
            +
                }
         | 
| 23 | 
            +
             | 
| 24 | 
            +
                pub(crate) fn stop(&self) -> Result<()> {
         | 
| 25 | 
            +
                    match self {
         | 
| 26 | 
            +
                        ServeStrategy::Single(single_router) => single_router.stop(),
         | 
| 27 | 
            +
                        ServeStrategy::Cluster(cluster_router) => cluster_router.stop(),
         | 
| 28 | 
            +
                    }
         | 
| 29 | 
            +
                }
         | 
| 30 | 
            +
            }
         |