itsi-server 0.1.1 → 0.1.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of itsi-server might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +7 -0
- data/Cargo.lock +4417 -0
- data/Cargo.toml +7 -0
- data/README.md +4 -0
- data/Rakefile +8 -1
- data/_index.md +6 -0
- data/exe/itsi +94 -45
- data/ext/itsi_error/Cargo.toml +2 -0
- data/ext/itsi_error/src/from.rs +68 -0
- data/ext/itsi_error/src/lib.rs +18 -34
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_error/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_error/target/debug/build/rb-sys-49f554618693db24/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-1mmt5sux7jb0i/s-h510z7m8v9-0bxu7yd.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-2vn3jey74oiw0/s-h5113n0e7e-1v5qzs6.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510ykifhe-0tbnep2.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510yyocpj-0tz7ug7.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-37uv9dicz7awp/s-h510z0xc8g-14ol18k.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3g5qf4y7d54uj/s-h5113n0e7d-1trk8on.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3lpfftm45d3e2/s-h510z7m8r3-1pxp20o.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510ykifek-1uxasnk.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510yyocki-11u37qm.lock +0 -0
- data/ext/itsi_error/target/debug/incremental/itsi_error-3o4qownhl3d7n/s-h510z0xc93-0pmy0zm.lock +0 -0
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.toml +3 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +139 -0
- data/ext/itsi_rb_helpers/src/lib.rs +140 -10
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/common.rs +355 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/dynamic.rs +276 -0
- data/ext/itsi_rb_helpers/target/debug/build/clang-sys-da71b0344e568175/out/macros.rs +49 -0
- data/ext/itsi_rb_helpers/target/debug/build/rb-sys-eb9ed4ff3a60f995/out/bindings-0.9.110-mri-arm64-darwin23-3.4.2.rs +8865 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-040pxg6yhb3g3/s-h5113n7a1b-03bwlt4.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h51113xnh3-1eik1ip.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-131g1u4dzkt1a/s-h5111704jj-0g4rj8x.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-1q2d3drtxrzs5/s-h5113n79yl-0bxcqc5.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h51113xoox-10de2hp.lock +0 -0
- data/ext/itsi_rb_helpers/target/debug/incremental/itsi_rb_helpers-374a9h7ovycj0/s-h5111704w7-0vdq7gq.lock +0 -0
- data/ext/itsi_scheduler/Cargo.toml +24 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
- data/ext/itsi_scheduler/src/lib.rs +38 -0
- data/ext/itsi_server/Cargo.lock +2956 -0
- data/ext/itsi_server/Cargo.toml +73 -13
- data/ext/itsi_server/extconf.rb +1 -1
- data/ext/itsi_server/src/env.rs +43 -0
- data/ext/itsi_server/src/lib.rs +100 -40
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/big_bytes.rs +109 -0
- data/ext/itsi_server/src/ruby_types/itsi_body_proxy/mod.rs +141 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_request.rs +147 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_response.rs +19 -0
- data/ext/itsi_server/src/ruby_types/itsi_grpc_stream/mod.rs +216 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_request.rs +282 -0
- data/ext/itsi_server/src/ruby_types/itsi_http_response.rs +388 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
- data/ext/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +355 -0
- data/ext/itsi_server/src/ruby_types/itsi_server.rs +82 -0
- data/ext/itsi_server/src/ruby_types/mod.rs +55 -0
- data/ext/itsi_server/src/server/bind.rs +75 -31
- data/ext/itsi_server/src/server/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/byte_frame.rs +32 -0
- data/ext/itsi_server/src/server/cache_store.rs +74 -0
- data/ext/itsi_server/src/server/io_stream.rs +104 -0
- data/ext/itsi_server/src/server/itsi_service.rs +172 -0
- data/ext/itsi_server/src/server/lifecycle_event.rs +12 -0
- data/ext/itsi_server/src/server/listener.rs +332 -132
- data/ext/itsi_server/src/server/middleware_stack/middleware.rs +153 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +47 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +58 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +321 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +139 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/compression.rs +300 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/cors.rs +287 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +48 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +127 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/etag.rs +191 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +72 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +85 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +195 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/mod.rs +82 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +216 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +124 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +43 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +34 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +93 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +162 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +158 -0
- data/ext/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
- data/ext/itsi_server/src/server/middleware_stack/mod.rs +315 -0
- data/ext/itsi_server/src/server/mod.rs +15 -2
- data/ext/itsi_server/src/server/process_worker.rs +229 -0
- data/ext/itsi_server/src/server/rate_limiter.rs +565 -0
- data/ext/itsi_server/src/server/request_job.rs +11 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +337 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +30 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +421 -0
- data/ext/itsi_server/src/server/signal.rs +93 -0
- data/ext/itsi_server/src/server/static_file_server.rs +984 -0
- data/ext/itsi_server/src/server/thread_worker.rs +444 -0
- data/ext/itsi_server/src/server/tls/locked_dir_cache.rs +132 -0
- data/ext/itsi_server/src/server/tls.rs +187 -60
- data/ext/itsi_server/src/server/types.rs +43 -0
- data/ext/itsi_tracing/Cargo.toml +5 -0
- data/ext/itsi_tracing/src/lib.rs +225 -7
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0994n8rpvvt9m/s-h510hfz1f6-1kbycmq.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-0bob7bf4yq34i/s-h5113125h5-0lh4rag.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2fcodulrxbbxo/s-h510h2infk-0hp5kjw.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2iak63r1woi1l/s-h510h2in4q-0kxfzw1.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2kk4qj9gn5dg2/s-h5113124kv-0enwon2.lock +0 -0
- data/ext/itsi_tracing/target/debug/incremental/itsi_tracing-2mwo0yas7dtw4/s-h510hfz1ha-1udgpei.lock +0 -0
- data/lib/itsi/http_request.rb +87 -0
- data/lib/itsi/http_response.rb +39 -0
- data/lib/itsi/server/Itsi.rb +119 -0
- data/lib/itsi/server/config/dsl.rb +506 -0
- data/lib/itsi/server/config.rb +131 -0
- data/lib/itsi/server/default_app/default_app.rb +38 -0
- data/lib/itsi/server/default_app/index.html +91 -0
- data/lib/itsi/server/grpc_interface.rb +213 -0
- data/lib/itsi/server/rack/handler/itsi.rb +27 -0
- data/lib/itsi/server/rack_interface.rb +94 -0
- data/lib/itsi/server/scheduler_interface.rb +21 -0
- data/lib/itsi/server/scheduler_mode.rb +10 -0
- data/lib/itsi/server/signal_trap.rb +29 -0
- data/lib/itsi/server/version.rb +1 -1
- data/lib/itsi/server.rb +90 -9
- data/lib/itsi/standard_headers.rb +86 -0
- metadata +122 -31
- data/ext/itsi_server/src/request/itsi_request.rs +0 -143
- data/ext/itsi_server/src/request/mod.rs +0 -1
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +0 -32
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +0 -52
- data/ext/itsi_server/src/server/itsi_server.rs +0 -182
- data/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
- data/ext/itsi_server/src/stream_writer/mod.rs +0 -21
- data/lib/itsi/request.rb +0 -39
| @@ -0,0 +1,421 @@ | |
| 1 | 
            +
            use crate::{
         | 
| 2 | 
            +
                ruby_types::itsi_server::itsi_server_config::ItsiServerConfig,
         | 
| 3 | 
            +
                server::{
         | 
| 4 | 
            +
                    io_stream::IoStream,
         | 
| 5 | 
            +
                    itsi_service::{IstiServiceInner, ItsiService},
         | 
| 6 | 
            +
                    lifecycle_event::LifecycleEvent,
         | 
| 7 | 
            +
                    listener::ListenerInfo,
         | 
| 8 | 
            +
                    request_job::RequestJob,
         | 
| 9 | 
            +
                    signal::SIGNAL_HANDLER_CHANNEL,
         | 
| 10 | 
            +
                    thread_worker::{build_thread_workers, ThreadWorker},
         | 
| 11 | 
            +
                },
         | 
| 12 | 
            +
            };
         | 
| 13 | 
            +
            use hyper_util::{
         | 
| 14 | 
            +
                rt::{TokioExecutor, TokioIo, TokioTimer},
         | 
| 15 | 
            +
                server::conn::auto::Builder,
         | 
| 16 | 
            +
            };
         | 
| 17 | 
            +
            use itsi_error::{ItsiError, Result};
         | 
| 18 | 
            +
            use itsi_rb_helpers::{
         | 
| 19 | 
            +
                call_with_gvl, call_without_gvl, create_ruby_thread, funcall_no_ret, print_rb_backtrace,
         | 
| 20 | 
            +
            };
         | 
| 21 | 
            +
            use itsi_tracing::{debug, error, info};
         | 
| 22 | 
            +
            use magnus::value::ReprValue;
         | 
| 23 | 
            +
            use nix::unistd::Pid;
         | 
| 24 | 
            +
            use parking_lot::RwLock;
         | 
| 25 | 
            +
            use std::{
         | 
| 26 | 
            +
                collections::HashMap,
         | 
| 27 | 
            +
                num::NonZeroU8,
         | 
| 28 | 
            +
                pin::Pin,
         | 
| 29 | 
            +
                sync::{
         | 
| 30 | 
            +
                    atomic::{AtomicBool, Ordering},
         | 
| 31 | 
            +
                    Arc,
         | 
| 32 | 
            +
                },
         | 
| 33 | 
            +
                thread::sleep,
         | 
| 34 | 
            +
                time::{Duration, Instant, SystemTime, UNIX_EPOCH},
         | 
| 35 | 
            +
            };
         | 
| 36 | 
            +
            use tokio::{
         | 
| 37 | 
            +
                runtime::{Builder as RuntimeBuilder, Runtime},
         | 
| 38 | 
            +
                sync::{
         | 
| 39 | 
            +
                    broadcast,
         | 
| 40 | 
            +
                    watch::{self},
         | 
| 41 | 
            +
                },
         | 
| 42 | 
            +
                task::JoinSet,
         | 
| 43 | 
            +
            };
         | 
| 44 | 
            +
            use tracing::instrument;
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            pub struct SingleMode {
         | 
| 47 | 
            +
                pub executor: Builder<TokioExecutor>,
         | 
| 48 | 
            +
                pub server_config: Arc<ItsiServerConfig>,
         | 
| 49 | 
            +
                pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
         | 
| 50 | 
            +
                pub restart_requested: AtomicBool,
         | 
| 51 | 
            +
                pub status: RwLock<HashMap<u8, (u64, u64)>>,
         | 
| 52 | 
            +
            }
         | 
| 53 | 
            +
             | 
| 54 | 
            +
            pub enum RunningPhase {
         | 
| 55 | 
            +
                Running,
         | 
| 56 | 
            +
                ShutdownPending,
         | 
| 57 | 
            +
                Shutdown,
         | 
| 58 | 
            +
            }
         | 
| 59 | 
            +
             | 
| 60 | 
            +
            impl SingleMode {
         | 
| 61 | 
            +
                #[instrument(parent=None, skip_all)]
         | 
| 62 | 
            +
                pub fn new(server_config: Arc<ItsiServerConfig>) -> Result<Self> {
         | 
| 63 | 
            +
                    server_config.server_params.read().preload_ruby()?;
         | 
| 64 | 
            +
                    Ok(Self {
         | 
| 65 | 
            +
                        executor: Builder::new(TokioExecutor::new()),
         | 
| 66 | 
            +
                        server_config,
         | 
| 67 | 
            +
                        lifecycle_channel: SIGNAL_HANDLER_CHANNEL.0.clone(),
         | 
| 68 | 
            +
                        restart_requested: AtomicBool::new(false),
         | 
| 69 | 
            +
                        status: RwLock::new(HashMap::new()),
         | 
| 70 | 
            +
                    })
         | 
| 71 | 
            +
                }
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                pub fn build_runtime(&self) -> Runtime {
         | 
| 74 | 
            +
                    let mut builder: RuntimeBuilder = if self
         | 
| 75 | 
            +
                        .server_config
         | 
| 76 | 
            +
                        .server_params
         | 
| 77 | 
            +
                        .read()
         | 
| 78 | 
            +
                        .multithreaded_reactor
         | 
| 79 | 
            +
                    {
         | 
| 80 | 
            +
                        RuntimeBuilder::new_multi_thread()
         | 
| 81 | 
            +
                    } else {
         | 
| 82 | 
            +
                        RuntimeBuilder::new_current_thread()
         | 
| 83 | 
            +
                    };
         | 
| 84 | 
            +
                    builder
         | 
| 85 | 
            +
                        .thread_name("itsi-server-accept-loop")
         | 
| 86 | 
            +
                        .thread_stack_size(3 * 1024 * 1024)
         | 
| 87 | 
            +
                        .enable_io()
         | 
| 88 | 
            +
                        .enable_time()
         | 
| 89 | 
            +
                        .build()
         | 
| 90 | 
            +
                        .expect("Failed to build Tokio runtime")
         | 
| 91 | 
            +
                }
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                pub fn stop(&self) -> Result<()> {
         | 
| 94 | 
            +
                    self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
         | 
| 95 | 
            +
                    Ok(())
         | 
| 96 | 
            +
                }
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                pub async fn print_info(&self, thread_workers: Arc<Vec<Arc<ThreadWorker>>>) -> Result<()> {
         | 
| 99 | 
            +
                    println!(" └─ Worker");
         | 
| 100 | 
            +
                    println!(
         | 
| 101 | 
            +
                        "    - binds: {:?}",
         | 
| 102 | 
            +
                        self.server_config.server_params.read().binds
         | 
| 103 | 
            +
                    );
         | 
| 104 | 
            +
             | 
| 105 | 
            +
                    println!(
         | 
| 106 | 
            +
                        "    ─ script_name: {:?}",
         | 
| 107 | 
            +
                        self.server_config.server_params.read().script_name
         | 
| 108 | 
            +
                    );
         | 
| 109 | 
            +
                    println!(
         | 
| 110 | 
            +
                        "    ─ streaming body: {:?}",
         | 
| 111 | 
            +
                        self.server_config.server_params.read().streamable_body
         | 
| 112 | 
            +
                    );
         | 
| 113 | 
            +
                    println!(
         | 
| 114 | 
            +
                        "    ─ multithreaded runtime: {:?}",
         | 
| 115 | 
            +
                        self.server_config
         | 
| 116 | 
            +
                            .server_params
         | 
| 117 | 
            +
                            .read()
         | 
| 118 | 
            +
                            .multithreaded_reactor
         | 
| 119 | 
            +
                    );
         | 
| 120 | 
            +
                    println!(
         | 
| 121 | 
            +
                        "    ─ scheduler: {:?}",
         | 
| 122 | 
            +
                        self.server_config.server_params.read().scheduler_class
         | 
| 123 | 
            +
                    );
         | 
| 124 | 
            +
                    println!(
         | 
| 125 | 
            +
                        "    ─ OOB GC Response threadhold: {:?}",
         | 
| 126 | 
            +
                        self.server_config
         | 
| 127 | 
            +
                            .server_params
         | 
| 128 | 
            +
                            .read()
         | 
| 129 | 
            +
                            .oob_gc_responses_threshold
         | 
| 130 | 
            +
                    );
         | 
| 131 | 
            +
                    for worker in thread_workers.iter() {
         | 
| 132 | 
            +
                        println!("   └─ - Thread : {:?}", worker.id);
         | 
| 133 | 
            +
                        println!("       - # Requests Processed: {:?}", worker.request_id);
         | 
| 134 | 
            +
                        println!(
         | 
| 135 | 
            +
                            "       - Last Request Started: {:?} ago",
         | 
| 136 | 
            +
                            if worker.current_request_start.load(Ordering::Relaxed) == 0 {
         | 
| 137 | 
            +
                                Duration::from_secs(0)
         | 
| 138 | 
            +
                            } else {
         | 
| 139 | 
            +
                                SystemTime::now()
         | 
| 140 | 
            +
                                    .duration_since(
         | 
| 141 | 
            +
                                        UNIX_EPOCH
         | 
| 142 | 
            +
                                            + Duration::from_secs(
         | 
| 143 | 
            +
                                                worker.current_request_start.load(Ordering::Relaxed),
         | 
| 144 | 
            +
                                            ),
         | 
| 145 | 
            +
                                    )
         | 
| 146 | 
            +
                                    .unwrap_or(Duration::from_secs(0))
         | 
| 147 | 
            +
                            }
         | 
| 148 | 
            +
                        );
         | 
| 149 | 
            +
                        call_with_gvl(|_| {
         | 
| 150 | 
            +
                            if let Some(thread) = worker.thread.read().as_ref() {
         | 
| 151 | 
            +
                                if let Ok(backtrace) = thread.funcall::<_, _, Vec<String>>("backtrace", ()) {
         | 
| 152 | 
            +
                                    println!("       - Backtrace:");
         | 
| 153 | 
            +
                                    for line in backtrace {
         | 
| 154 | 
            +
                                        println!("       -   {}", line);
         | 
| 155 | 
            +
                                    }
         | 
| 156 | 
            +
                                }
         | 
| 157 | 
            +
                            }
         | 
| 158 | 
            +
                        })
         | 
| 159 | 
            +
                    }
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                    Ok(())
         | 
| 162 | 
            +
                }
         | 
| 163 | 
            +
             | 
| 164 | 
            +
                pub fn start_monitors(
         | 
| 165 | 
            +
                    self: Arc<Self>,
         | 
| 166 | 
            +
                    thread_workers: Arc<Vec<Arc<ThreadWorker>>>,
         | 
| 167 | 
            +
                ) -> magnus::Thread {
         | 
| 168 | 
            +
                    call_with_gvl(move |_| {
         | 
| 169 | 
            +
                        create_ruby_thread(move || {
         | 
| 170 | 
            +
                            call_without_gvl(move || {
         | 
| 171 | 
            +
                                let monitor_runtime = RuntimeBuilder::new_current_thread()
         | 
| 172 | 
            +
                                    .enable_time()
         | 
| 173 | 
            +
                                    .build()
         | 
| 174 | 
            +
                                    .unwrap();
         | 
| 175 | 
            +
                                let receiver = self.clone();
         | 
| 176 | 
            +
                                monitor_runtime.block_on({
         | 
| 177 | 
            +
                                    let mut lifecycle_rx = receiver.lifecycle_channel.subscribe();
         | 
| 178 | 
            +
                                    let receiver = receiver.clone();
         | 
| 179 | 
            +
                                    let thread_workers = thread_workers.clone();
         | 
| 180 | 
            +
                                    async move {
         | 
| 181 | 
            +
                                        loop {
         | 
| 182 | 
            +
                                            tokio::select! {
         | 
| 183 | 
            +
                                              _ = tokio::time::sleep(Duration::from_secs(1)) => {
         | 
| 184 | 
            +
                                                let mut status_lock = receiver.status.write();
         | 
| 185 | 
            +
                                                thread_workers.iter().for_each(|worker| {
         | 
| 186 | 
            +
                                                    let worker_entry = status_lock.entry(worker.id);
         | 
| 187 | 
            +
                                                    let data = (
         | 
| 188 | 
            +
                                                        worker.request_id.load(Ordering::Relaxed),
         | 
| 189 | 
            +
                                                        worker.current_request_start.load(Ordering::Relaxed),
         | 
| 190 | 
            +
                                                    );
         | 
| 191 | 
            +
                                                    worker_entry.or_insert(data);
         | 
| 192 | 
            +
                                                });
         | 
| 193 | 
            +
                                              }
         | 
| 194 | 
            +
                                              lifecycle_event = lifecycle_rx.recv() => {
         | 
| 195 | 
            +
                                                  match lifecycle_event {
         | 
| 196 | 
            +
                                                      Ok(LifecycleEvent::Restart) => {
         | 
| 197 | 
            +
                                                          receiver.restart().ok();
         | 
| 198 | 
            +
                                                      }
         | 
| 199 | 
            +
                                                      Ok(LifecycleEvent::Reload) => {
         | 
| 200 | 
            +
                                                          receiver.reload().ok();
         | 
| 201 | 
            +
                                                      }
         | 
| 202 | 
            +
                                                      Ok(LifecycleEvent::Shutdown) => {
         | 
| 203 | 
            +
                                                        break;
         | 
| 204 | 
            +
                                                      }
         | 
| 205 | 
            +
                                                      Ok(LifecycleEvent::PrintInfo) => {
         | 
| 206 | 
            +
                                                        receiver.print_info(thread_workers.clone()).await.ok();
         | 
| 207 | 
            +
                                                      }
         | 
| 208 | 
            +
                                                      _ => {}
         | 
| 209 | 
            +
                                                  }
         | 
| 210 | 
            +
                                              }
         | 
| 211 | 
            +
                                            }
         | 
| 212 | 
            +
                                        }
         | 
| 213 | 
            +
                                    }
         | 
| 214 | 
            +
                                })
         | 
| 215 | 
            +
                            })
         | 
| 216 | 
            +
                        })
         | 
| 217 | 
            +
                    })
         | 
| 218 | 
            +
                }
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                #[instrument(name="worker", parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
         | 
| 221 | 
            +
                pub fn run(self: Arc<Self>) -> Result<()> {
         | 
| 222 | 
            +
                    let mut listener_task_set = JoinSet::new();
         | 
| 223 | 
            +
                    let runtime = self.build_runtime();
         | 
| 224 | 
            +
             | 
| 225 | 
            +
                    let (thread_workers, job_sender) = build_thread_workers(
         | 
| 226 | 
            +
                        self.server_config.server_params.read().clone(),
         | 
| 227 | 
            +
                        Pid::this(),
         | 
| 228 | 
            +
                        NonZeroU8::try_from(self.server_config.server_params.read().threads).unwrap(),
         | 
| 229 | 
            +
                    )
         | 
| 230 | 
            +
                    .inspect_err(|e| {
         | 
| 231 | 
            +
                        if let Some(err_val) = e.value() {
         | 
| 232 | 
            +
                            print_rb_backtrace(err_val);
         | 
| 233 | 
            +
                        }
         | 
| 234 | 
            +
                    })?;
         | 
| 235 | 
            +
             | 
| 236 | 
            +
                    info!(
         | 
| 237 | 
            +
                        pid = format!("{}", Pid::this()),
         | 
| 238 | 
            +
                        threads = thread_workers.len(),
         | 
| 239 | 
            +
                        binds = format!("{:?}", self.server_config.server_params.read().binds)
         | 
| 240 | 
            +
                    );
         | 
| 241 | 
            +
             | 
| 242 | 
            +
                    let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
         | 
| 243 | 
            +
                    let thread = self.clone().start_monitors(thread_workers.clone());
         | 
| 244 | 
            +
                    runtime.block_on(
         | 
| 245 | 
            +
                      async  {
         | 
| 246 | 
            +
                          let server_params = self.server_config.server_params.read().clone();
         | 
| 247 | 
            +
                          server_params.middleware.get().unwrap().initialize_layers().await?;
         | 
| 248 | 
            +
                          let tokio_listeners = server_params.listeners.lock()
         | 
| 249 | 
            +
                              .drain(..)
         | 
| 250 | 
            +
                              .map(|list| {
         | 
| 251 | 
            +
                                Arc::new(list.into_tokio_listener())
         | 
| 252 | 
            +
                              })
         | 
| 253 | 
            +
                              .collect::<Vec<_>>();
         | 
| 254 | 
            +
             | 
| 255 | 
            +
                          for listener in tokio_listeners.iter() {
         | 
| 256 | 
            +
                              let mut lifecycle_rx = self.lifecycle_channel.subscribe();
         | 
| 257 | 
            +
                              let listener_info = Arc::new(listener.listener_info());
         | 
| 258 | 
            +
                              let self_ref = self.clone();
         | 
| 259 | 
            +
                              let listener = listener.clone();
         | 
| 260 | 
            +
                              let shutdown_sender = shutdown_sender.clone();
         | 
| 261 | 
            +
                              let job_sender = job_sender.clone();
         | 
| 262 | 
            +
                              let workers_clone = thread_workers.clone();
         | 
| 263 | 
            +
                              let listener_clone = listener.clone();
         | 
| 264 | 
            +
                              let mut shutdown_receiver = shutdown_sender.subscribe();
         | 
| 265 | 
            +
                              let shutdown_receiver_clone = shutdown_receiver.clone();
         | 
| 266 | 
            +
                              listener_task_set.spawn(async move {
         | 
| 267 | 
            +
                                listener_clone.spawn_state_task(shutdown_receiver_clone).await;
         | 
| 268 | 
            +
                              });
         | 
| 269 | 
            +
             | 
| 270 | 
            +
                              listener_task_set.spawn(async move {
         | 
| 271 | 
            +
                                let strategy_clone = self_ref.clone();
         | 
| 272 | 
            +
                                let mut acceptor_task_set = JoinSet::new();
         | 
| 273 | 
            +
                                loop {
         | 
| 274 | 
            +
                                    tokio::select! {
         | 
| 275 | 
            +
                                        accept_result = listener.accept() => match accept_result {
         | 
| 276 | 
            +
                                          Ok(accept_result) => {
         | 
| 277 | 
            +
                                            let strategy = strategy_clone.clone();
         | 
| 278 | 
            +
                                            let listener_info = listener_info.clone();
         | 
| 279 | 
            +
                                            let shutdown_receiver = shutdown_receiver.clone();
         | 
| 280 | 
            +
                                            let job_sender = job_sender.clone();
         | 
| 281 | 
            +
                                            acceptor_task_set.spawn(async move {
         | 
| 282 | 
            +
                                              strategy.serve_connection(accept_result, job_sender, listener_info, shutdown_receiver).await;
         | 
| 283 | 
            +
                                            });
         | 
| 284 | 
            +
                                          },
         | 
| 285 | 
            +
                                          Err(e) => debug!("Listener.accept failed {:?}", e),
         | 
| 286 | 
            +
                                        },
         | 
| 287 | 
            +
                                        _ = shutdown_receiver.changed() => {
         | 
| 288 | 
            +
                                          break;
         | 
| 289 | 
            +
                                        }
         | 
| 290 | 
            +
                                        lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
         | 
| 291 | 
            +
                                          Ok(LifecycleEvent::Shutdown) => {
         | 
| 292 | 
            +
                                            shutdown_sender.send(RunningPhase::ShutdownPending).unwrap();
         | 
| 293 | 
            +
                                            // Tell any in-progress connections to stop accepting new requests
         | 
| 294 | 
            +
                                            tokio::time::sleep(Duration::from_millis(25)).await;
         | 
| 295 | 
            +
                                            // Tell workers to stop processing requests once they've flushed their buffers.
         | 
| 296 | 
            +
                                            for _i in 0..workers_clone.len() {
         | 
| 297 | 
            +
                                              job_sender.send(RequestJob::Shutdown).await.unwrap();
         | 
| 298 | 
            +
                                            }
         | 
| 299 | 
            +
                                            break;
         | 
| 300 | 
            +
                                          },
         | 
| 301 | 
            +
                                          Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
         | 
| 302 | 
            +
                                          _ => {}
         | 
| 303 | 
            +
                                      }
         | 
| 304 | 
            +
                                    }
         | 
| 305 | 
            +
                                }
         | 
| 306 | 
            +
                                while let Some(_res) = acceptor_task_set.join_next().await {}
         | 
| 307 | 
            +
                            });
         | 
| 308 | 
            +
             | 
| 309 | 
            +
                          }
         | 
| 310 | 
            +
             | 
| 311 | 
            +
                          while let Some(_res) = listener_task_set.join_next().await {}
         | 
| 312 | 
            +
             | 
| 313 | 
            +
                          Ok::<(), ItsiError>(())
         | 
| 314 | 
            +
                      })?;
         | 
| 315 | 
            +
             | 
| 316 | 
            +
                    shutdown_sender.send(RunningPhase::Shutdown).ok();
         | 
| 317 | 
            +
                    let deadline = Instant::now()
         | 
| 318 | 
            +
                        + Duration::from_secs_f64(self.server_config.server_params.read().shutdown_timeout);
         | 
| 319 | 
            +
             | 
| 320 | 
            +
                    runtime.shutdown_timeout(Duration::from_millis(100));
         | 
| 321 | 
            +
             | 
| 322 | 
            +
                    loop {
         | 
| 323 | 
            +
                        if thread_workers
         | 
| 324 | 
            +
                            .iter()
         | 
| 325 | 
            +
                            .all(|worker| call_with_gvl(move |_| !worker.poll_shutdown(deadline)))
         | 
| 326 | 
            +
                        {
         | 
| 327 | 
            +
                            funcall_no_ret(thread, "join", ()).ok();
         | 
| 328 | 
            +
                            break;
         | 
| 329 | 
            +
                        }
         | 
| 330 | 
            +
                        sleep(Duration::from_millis(50));
         | 
| 331 | 
            +
                    }
         | 
| 332 | 
            +
             | 
| 333 | 
            +
                    if self.restart_requested.load(Ordering::SeqCst) {
         | 
| 334 | 
            +
                        self.restart_requested.store(false, Ordering::SeqCst);
         | 
| 335 | 
            +
                        info!("Worker restarting");
         | 
| 336 | 
            +
                        self.run()?;
         | 
| 337 | 
            +
                    }
         | 
| 338 | 
            +
                    debug!("Runtime has shut down");
         | 
| 339 | 
            +
                    Ok(())
         | 
| 340 | 
            +
                }
         | 
| 341 | 
            +
             | 
| 342 | 
            +
                pub(crate) async fn serve_connection(
         | 
| 343 | 
            +
                    &self,
         | 
| 344 | 
            +
                    stream: IoStream,
         | 
| 345 | 
            +
                    job_sender: async_channel::Sender<RequestJob>,
         | 
| 346 | 
            +
                    listener: Arc<ListenerInfo>,
         | 
| 347 | 
            +
                    shutdown_channel: watch::Receiver<RunningPhase>,
         | 
| 348 | 
            +
                ) {
         | 
| 349 | 
            +
                    let addr = stream.addr();
         | 
| 350 | 
            +
                    let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
         | 
| 351 | 
            +
                    let executor = self.executor.clone();
         | 
| 352 | 
            +
                    let mut shutdown_channel_clone = shutdown_channel.clone();
         | 
| 353 | 
            +
                    let mut executor = executor.clone();
         | 
| 354 | 
            +
                    let mut binding = executor.http1();
         | 
| 355 | 
            +
                    let shutdown_channel = shutdown_channel_clone.clone();
         | 
| 356 | 
            +
             | 
| 357 | 
            +
                    let service = ItsiService {
         | 
| 358 | 
            +
                        inner: Arc::new(IstiServiceInner {
         | 
| 359 | 
            +
                            sender: job_sender.clone(),
         | 
| 360 | 
            +
                            server_params: self.server_config.server_params.read().clone(),
         | 
| 361 | 
            +
                            listener,
         | 
| 362 | 
            +
                            addr: addr.to_string(),
         | 
| 363 | 
            +
                            shutdown_channel: shutdown_channel.clone(),
         | 
| 364 | 
            +
                        }),
         | 
| 365 | 
            +
                    };
         | 
| 366 | 
            +
                    let mut serve = Box::pin(
         | 
| 367 | 
            +
                        binding
         | 
| 368 | 
            +
                            .timer(TokioTimer::new()) // your existing timer
         | 
| 369 | 
            +
                            .header_read_timeout(Duration::from_secs(1))
         | 
| 370 | 
            +
                            .serve_connection_with_upgrades(io, service),
         | 
| 371 | 
            +
                    );
         | 
| 372 | 
            +
             | 
| 373 | 
            +
                    tokio::select! {
         | 
| 374 | 
            +
                        // Await the connection finishing naturally.
         | 
| 375 | 
            +
                        res = &mut serve => {
         | 
| 376 | 
            +
                            match res{
         | 
| 377 | 
            +
                                Ok(()) => {
         | 
| 378 | 
            +
                                  debug!("Connection closed normally")
         | 
| 379 | 
            +
                                },
         | 
| 380 | 
            +
                                Err(res) => {
         | 
| 381 | 
            +
                                  debug!("Connection closed abruptly: {:?}", res)
         | 
| 382 | 
            +
                                }
         | 
| 383 | 
            +
                            }
         | 
| 384 | 
            +
                            serve.as_mut().graceful_shutdown();
         | 
| 385 | 
            +
                        },
         | 
| 386 | 
            +
                        // A lifecycle event triggers shutdown.
         | 
| 387 | 
            +
                        _ = shutdown_channel_clone.changed() => {
         | 
| 388 | 
            +
                            // Initiate graceful shutdown.
         | 
| 389 | 
            +
                            info!("Starting graceful shutdown");
         | 
| 390 | 
            +
                            serve.as_mut().graceful_shutdown();
         | 
| 391 | 
            +
             | 
| 392 | 
            +
                            // Now await the connection to finish shutting down.
         | 
| 393 | 
            +
                            if let Err(e) = serve.await {
         | 
| 394 | 
            +
                                debug!("Connection shutdown error: {:?}", e);
         | 
| 395 | 
            +
                            }
         | 
| 396 | 
            +
                        }
         | 
| 397 | 
            +
                    }
         | 
| 398 | 
            +
                }
         | 
| 399 | 
            +
             | 
| 400 | 
            +
                /// Attempts to reload the config "live"
         | 
| 401 | 
            +
                /// Not that when running in single mode this will not unload
         | 
| 402 | 
            +
                /// old code. If you need a clean restart, use the `restart` (SIGHUP) method instead
         | 
| 403 | 
            +
                pub fn reload(&self) -> Result<()> {
         | 
| 404 | 
            +
                    let should_reexec = self.server_config.clone().reload(false)?;
         | 
| 405 | 
            +
                    if should_reexec {
         | 
| 406 | 
            +
                        self.server_config.dup_fds()?;
         | 
| 407 | 
            +
                        self.server_config.reload_exec()?;
         | 
| 408 | 
            +
                    }
         | 
| 409 | 
            +
                    self.restart_requested.store(true, Ordering::SeqCst);
         | 
| 410 | 
            +
                    self.stop()?;
         | 
| 411 | 
            +
                    self.server_config.server_params.read().preload_ruby()?;
         | 
| 412 | 
            +
                    Ok(())
         | 
| 413 | 
            +
                }
         | 
| 414 | 
            +
             | 
| 415 | 
            +
                /// Restart the server while keeping connections open.
         | 
| 416 | 
            +
                pub fn restart(&self) -> Result<()> {
         | 
| 417 | 
            +
                    self.server_config.dup_fds()?;
         | 
| 418 | 
            +
                    self.server_config.reload_exec()?;
         | 
| 419 | 
            +
                    Ok(())
         | 
| 420 | 
            +
                }
         | 
| 421 | 
            +
            }
         | 
| @@ -0,0 +1,93 @@ | |
| 1 | 
            +
            use std::sync::{atomic::AtomicI8, LazyLock};
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            use nix::libc::{self, sighandler_t};
         | 
| 4 | 
            +
            use tokio::sync::{self, broadcast};
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            use super::lifecycle_event::LifecycleEvent;
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
         | 
| 9 | 
            +
                broadcast::Sender<LifecycleEvent>,
         | 
| 10 | 
            +
                broadcast::Receiver<LifecycleEvent>,
         | 
| 11 | 
            +
            )> = LazyLock::new(|| sync::broadcast::channel(5));
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            pub fn send_shutdown_event() {
         | 
| 14 | 
            +
                SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
         | 
| 15 | 
            +
            }
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
         | 
| 18 | 
            +
            fn receive_signal(signum: i32, _: sighandler_t) {
         | 
| 19 | 
            +
                SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
         | 
| 20 | 
            +
                match signum {
         | 
| 21 | 
            +
                    libc::SIGTERM | libc::SIGINT => {
         | 
| 22 | 
            +
                        SIGINT_COUNT.fetch_add(2, std::sync::atomic::Ordering::SeqCst);
         | 
| 23 | 
            +
                        if SIGINT_COUNT.load(std::sync::atomic::Ordering::SeqCst) < 2 {
         | 
| 24 | 
            +
                            SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
         | 
| 25 | 
            +
                        } else {
         | 
| 26 | 
            +
                            // Not messing about. Force shutdown.
         | 
| 27 | 
            +
                            SIGNAL_HANDLER_CHANNEL
         | 
| 28 | 
            +
                                .0
         | 
| 29 | 
            +
                                .send(LifecycleEvent::ForceShutdown)
         | 
| 30 | 
            +
                                .ok();
         | 
| 31 | 
            +
                        }
         | 
| 32 | 
            +
                    }
         | 
| 33 | 
            +
                    libc::SIGUSR2 => {
         | 
| 34 | 
            +
                        SIGNAL_HANDLER_CHANNEL
         | 
| 35 | 
            +
                            .0
         | 
| 36 | 
            +
                            .send(LifecycleEvent::PrintInfo)
         | 
| 37 | 
            +
                            .ok();
         | 
| 38 | 
            +
                    }
         | 
| 39 | 
            +
                    libc::SIGUSR1 => {
         | 
| 40 | 
            +
                        SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Restart).ok();
         | 
| 41 | 
            +
                    }
         | 
| 42 | 
            +
                    libc::SIGHUP => {
         | 
| 43 | 
            +
                        SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Reload).ok();
         | 
| 44 | 
            +
                    }
         | 
| 45 | 
            +
                    libc::SIGTTIN => {
         | 
| 46 | 
            +
                        SIGNAL_HANDLER_CHANNEL
         | 
| 47 | 
            +
                            .0
         | 
| 48 | 
            +
                            .send(LifecycleEvent::IncreaseWorkers)
         | 
| 49 | 
            +
                            .ok();
         | 
| 50 | 
            +
                    }
         | 
| 51 | 
            +
                    libc::SIGTTOU => {
         | 
| 52 | 
            +
                        SIGNAL_HANDLER_CHANNEL
         | 
| 53 | 
            +
                            .0
         | 
| 54 | 
            +
                            .send(LifecycleEvent::DecreaseWorkers)
         | 
| 55 | 
            +
                            .ok();
         | 
| 56 | 
            +
                    }
         | 
| 57 | 
            +
                    libc::SIGCHLD => {
         | 
| 58 | 
            +
                        SIGNAL_HANDLER_CHANNEL
         | 
| 59 | 
            +
                            .0
         | 
| 60 | 
            +
                            .send(LifecycleEvent::ChildTerminated)
         | 
| 61 | 
            +
                            .ok();
         | 
| 62 | 
            +
                    }
         | 
| 63 | 
            +
                    _ => {}
         | 
| 64 | 
            +
                }
         | 
| 65 | 
            +
            }
         | 
| 66 | 
            +
             | 
| 67 | 
            +
            pub fn reset_signal_handlers() -> bool {
         | 
| 68 | 
            +
                SIGINT_COUNT.store(0, std::sync::atomic::Ordering::SeqCst);
         | 
| 69 | 
            +
                unsafe {
         | 
| 70 | 
            +
                    libc::signal(libc::SIGTERM, receive_signal as usize);
         | 
| 71 | 
            +
                    libc::signal(libc::SIGINT, receive_signal as usize);
         | 
| 72 | 
            +
                    libc::signal(libc::SIGUSR2, receive_signal as usize);
         | 
| 73 | 
            +
                    libc::signal(libc::SIGUSR1, receive_signal as usize);
         | 
| 74 | 
            +
                    libc::signal(libc::SIGHUP, receive_signal as usize);
         | 
| 75 | 
            +
                    libc::signal(libc::SIGTTIN, receive_signal as usize);
         | 
| 76 | 
            +
                    libc::signal(libc::SIGTTOU, receive_signal as usize);
         | 
| 77 | 
            +
                    libc::signal(libc::SIGCHLD, receive_signal as usize);
         | 
| 78 | 
            +
                }
         | 
| 79 | 
            +
                true
         | 
| 80 | 
            +
            }
         | 
| 81 | 
            +
             | 
| 82 | 
            +
            pub fn clear_signal_handlers() {
         | 
| 83 | 
            +
                unsafe {
         | 
| 84 | 
            +
                    libc::signal(libc::SIGTERM, libc::SIG_DFL);
         | 
| 85 | 
            +
                    libc::signal(libc::SIGINT, libc::SIG_DFL);
         | 
| 86 | 
            +
                    libc::signal(libc::SIGUSR2, libc::SIG_DFL);
         | 
| 87 | 
            +
                    libc::signal(libc::SIGUSR1, libc::SIG_DFL);
         | 
| 88 | 
            +
                    libc::signal(libc::SIGHUP, libc::SIG_DFL);
         | 
| 89 | 
            +
                    libc::signal(libc::SIGTTIN, libc::SIG_DFL);
         | 
| 90 | 
            +
                    libc::signal(libc::SIGTTOU, libc::SIG_DFL);
         | 
| 91 | 
            +
                    libc::signal(libc::SIGCHLD, libc::SIG_DFL);
         | 
| 92 | 
            +
                }
         | 
| 93 | 
            +
            }
         |