itsi 0.1.11 → 0.1.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Cargo.lock +1535 -45
- data/{sandbox/itsi_itsi_file/Itsi.rb → Itsi.rb} +19 -13
- data/Rakefile +8 -7
- data/crates/itsi_error/src/lib.rs +9 -0
- data/crates/itsi_rb_helpers/Cargo.toml +1 -0
- data/crates/itsi_rb_helpers/src/heap_value.rs +18 -0
- data/crates/itsi_rb_helpers/src/lib.rs +34 -7
- data/crates/itsi_server/Cargo.toml +69 -30
- data/crates/itsi_server/src/lib.rs +79 -147
- data/crates/itsi_server/src/{body_proxy → ruby_types/itsi_body_proxy}/big_bytes.rs +10 -5
- data/crates/itsi_server/src/{body_proxy/itsi_body_proxy.rs → ruby_types/itsi_body_proxy/mod.rs} +22 -3
- data/crates/itsi_server/src/ruby_types/itsi_grpc_request.rs +147 -0
- data/crates/itsi_server/src/ruby_types/itsi_grpc_response.rs +19 -0
- data/crates/itsi_server/src/ruby_types/itsi_grpc_stream/mod.rs +216 -0
- data/{gems/server/ext/itsi_server/src/request/itsi_request.rs → crates/itsi_server/src/ruby_types/itsi_http_request.rs} +101 -117
- data/crates/itsi_server/src/{response/itsi_response.rs → ruby_types/itsi_http_response.rs} +72 -41
- data/crates/itsi_server/src/ruby_types/itsi_server/file_watcher.rs +225 -0
- data/crates/itsi_server/src/ruby_types/itsi_server/itsi_server_config.rs +355 -0
- data/crates/itsi_server/src/ruby_types/itsi_server.rs +82 -0
- data/crates/itsi_server/src/ruby_types/mod.rs +55 -0
- data/crates/itsi_server/src/server/bind.rs +13 -5
- data/crates/itsi_server/src/server/byte_frame.rs +32 -0
- data/crates/itsi_server/src/server/cache_store.rs +74 -0
- data/crates/itsi_server/src/server/itsi_service.rs +172 -0
- data/crates/itsi_server/src/server/lifecycle_event.rs +3 -0
- data/crates/itsi_server/src/server/listener.rs +102 -2
- data/crates/itsi_server/src/server/middleware_stack/middleware.rs +153 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/allow_list.rs +47 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/auth_api_key.rs +58 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/auth_basic.rs +82 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/auth_jwt.rs +321 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/cache_control.rs +139 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/compression.rs +300 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/cors.rs +287 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/deny_list.rs +48 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/error_response.rs +127 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/etag.rs +191 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/grpc_service.rs +72 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/header_interpretation.rs +85 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/intrusion_protection.rs +195 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/log_requests.rs +82 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/mod.rs +82 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/proxy.rs +216 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/rate_limit.rs +124 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/redirect.rs +76 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/request_headers.rs +43 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/response_headers.rs +34 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/ruby_app.rs +93 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/static_assets.rs +162 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/string_rewrite.rs +158 -0
- data/crates/itsi_server/src/server/middleware_stack/middlewares/token_source.rs +12 -0
- data/crates/itsi_server/src/server/middleware_stack/mod.rs +315 -0
- data/crates/itsi_server/src/server/mod.rs +8 -1
- data/crates/itsi_server/src/server/process_worker.rs +38 -12
- data/crates/itsi_server/src/server/rate_limiter.rs +565 -0
- data/crates/itsi_server/src/server/request_job.rs +11 -0
- data/crates/itsi_server/src/server/serve_strategy/cluster_mode.rs +119 -42
- data/crates/itsi_server/src/server/serve_strategy/mod.rs +9 -6
- data/crates/itsi_server/src/server/serve_strategy/single_mode.rs +256 -111
- data/crates/itsi_server/src/server/signal.rs +19 -0
- data/crates/itsi_server/src/server/static_file_server.rs +984 -0
- data/crates/itsi_server/src/server/thread_worker.rs +139 -94
- data/crates/itsi_server/src/server/types.rs +43 -0
- data/crates/itsi_server/test.md +14 -0
- data/crates/itsi_tracing/Cargo.toml +1 -0
- data/crates/itsi_tracing/src/lib.rs +216 -45
- data/docs/.gitignore +7 -0
- data/docs/.gitpod.yml +15 -0
- data/docs/Itsi.rb +17 -0
- data/docs/content/_index.md +17 -0
- data/docs/content/about.md +6 -0
- data/docs/content/docs/_index.md +18 -0
- data/docs/content/docs/first-page.md +9 -0
- data/docs/content/docs/folder/_index.md +10 -0
- data/docs/content/docs/folder/leaf.md +7 -0
- data/docs/go.mod +5 -0
- data/docs/go.sum +2 -0
- data/docs/hugo.yaml +77 -0
- data/examples/static_assets_example.rb +83 -0
- data/gems/_index.md +18 -0
- data/gems/scheduler/CODE_OF_CONDUCT.md +7 -0
- data/gems/scheduler/Cargo.lock +75 -14
- data/gems/scheduler/README.md +5 -0
- data/gems/scheduler/_index.md +7 -0
- data/gems/scheduler/itsi-scheduler.gemspec +4 -1
- data/gems/scheduler/lib/itsi/scheduler/version.rb +1 -1
- data/gems/scheduler/lib/itsi/scheduler.rb +2 -2
- data/gems/scheduler/test/test_file_io.rb +0 -1
- data/gems/scheduler/test/test_itsi_scheduler.rb +1 -1
- data/gems/server/CHANGELOG.md +5 -0
- data/gems/server/CODE_OF_CONDUCT.md +7 -0
- data/gems/server/Cargo.lock +1536 -45
- data/gems/server/README.md +4 -0
- data/gems/server/_index.md +6 -0
- data/gems/server/exe/itsi +33 -74
- data/gems/server/itsi-server.gemspec +3 -2
- data/gems/server/lib/itsi/{request.rb → http_request.rb} +29 -5
- data/gems/server/lib/itsi/http_response.rb +39 -0
- data/gems/server/lib/itsi/server/Itsi.rb +11 -19
- data/gems/server/lib/itsi/server/config/dsl.rb +506 -0
- data/gems/server/lib/itsi/server/config.rb +103 -8
- data/gems/server/lib/itsi/server/default_app/default_app.rb +38 -0
- data/gems/server/lib/itsi/server/grpc_interface.rb +213 -0
- data/gems/server/lib/itsi/server/rack/handler/itsi.rb +8 -17
- data/gems/server/lib/itsi/server/rack_interface.rb +23 -4
- data/gems/server/lib/itsi/server/scheduler_interface.rb +1 -1
- data/gems/server/lib/itsi/server/scheduler_mode.rb +4 -0
- data/gems/server/lib/itsi/server/signal_trap.rb +7 -1
- data/gems/server/lib/itsi/server/version.rb +1 -1
- data/gems/server/lib/itsi/server.rb +74 -63
- data/gems/server/lib/itsi/standard_headers.rb +86 -0
- data/gems/server/test/helpers/test_helper.rb +12 -12
- data/gems/server/test/test_itsi_server.rb +2 -2
- data/lib/itsi/version.rb +1 -1
- data/sandbox/itsi_file/Gemfile +11 -0
- data/sandbox/itsi_file/Gemfile.lock +69 -0
- data/sandbox/itsi_file/Itsi.rb +276 -0
- data/sandbox/itsi_file/error.html +2 -0
- data/sandbox/itsi_file/organisations_controller.rb +20 -0
- data/sandbox/itsi_file/public/assets/image.png +0 -0
- data/sandbox/itsi_file/public/assets/index.html +1 -0
- data/sandbox/itsi_sandbox_hanami/Gemfile.lock +2 -2
- data/sandbox/itsi_sandbox_rack/Gemfile.lock +2 -2
- data/sandbox/itsi_sandbox_rack/config.ru +2 -15
- data/sandbox/itsi_sandbox_rails/.dockerignore +2 -5
- data/sandbox/itsi_sandbox_rails/.github/workflows/ci.yml +1 -1
- data/sandbox/itsi_sandbox_rails/.gitignore +2 -1
- data/sandbox/itsi_sandbox_rails/Dockerfile +6 -9
- data/sandbox/itsi_sandbox_rails/Gemfile +16 -22
- data/sandbox/itsi_sandbox_rails/Gemfile.lock +100 -225
- data/sandbox/itsi_sandbox_rails/app/assets/config/manifest.js +4 -0
- data/sandbox/itsi_sandbox_rails/app/assets/stylesheets/application.css +11 -6
- data/sandbox/itsi_sandbox_rails/app/channels/application_cable/channel.rb +4 -0
- data/sandbox/itsi_sandbox_rails/app/channels/application_cable/connection.rb +4 -0
- data/sandbox/itsi_sandbox_rails/app/controllers/live_controller.rb +7 -8
- data/sandbox/itsi_sandbox_rails/app/controllers/uploads_controller.rb +0 -3
- data/sandbox/itsi_sandbox_rails/app/views/layouts/application.html.erb +2 -7
- data/sandbox/itsi_sandbox_rails/bin/docker-entrypoint +3 -4
- data/sandbox/itsi_sandbox_rails/bin/setup +8 -5
- data/sandbox/itsi_sandbox_rails/config/application.rb +1 -35
- data/sandbox/itsi_sandbox_rails/config/cable.yml +3 -10
- data/sandbox/itsi_sandbox_rails/config/credentials.yml.enc +1 -1
- data/sandbox/itsi_sandbox_rails/config/database.yml +9 -19
- data/sandbox/itsi_sandbox_rails/config/environment.rb +1 -1
- data/sandbox/itsi_sandbox_rails/config/environments/development.rb +21 -12
- data/sandbox/itsi_sandbox_rails/config/environments/production.rb +49 -34
- data/sandbox/itsi_sandbox_rails/config/environments/test.rb +19 -5
- data/sandbox/itsi_sandbox_rails/config/initializers/assets.rb +5 -0
- data/sandbox/itsi_sandbox_rails/config/initializers/filter_parameter_logging.rb +1 -1
- data/sandbox/itsi_sandbox_rails/config/initializers/permissions_policy.rb +13 -0
- data/sandbox/itsi_sandbox_rails/config/puma.rb +2 -9
- data/sandbox/itsi_sandbox_rails/config.ru +0 -1
- data/sandbox/itsi_sandbox_rails/db/migrate/20250301041554_create_posts.rb +1 -1
- data/sandbox/itsi_sandbox_rails/db/schema.rb +2 -2
- data/sandbox/itsi_sandbox_rails/lib/assets/.keep +0 -0
- data/sandbox/itsi_sandbox_rails/public/404.html +66 -113
- data/sandbox/itsi_sandbox_rails/public/406-unsupported-browser.html +65 -113
- data/sandbox/itsi_sandbox_rails/public/422.html +66 -113
- data/sandbox/itsi_sandbox_rails/public/500.html +65 -113
- data/sandbox/itsi_sandbox_rails/public/icon.png +0 -0
- data/sandbox/itsi_sandbox_rails/public/icon.svg +2 -2
- data/sandbox/itsi_sandbox_rails/test/channels/application_cable/connection_test.rb +13 -0
- data/sandbox/itsi_sandbox_roda/Gemfile.lock +3 -10
- data/tasks.txt +72 -35
- metadata +89 -139
- data/crates/itsi_server/src/body_proxy/mod.rs +0 -2
- data/crates/itsi_server/src/request/itsi_request.rs +0 -298
- data/crates/itsi_server/src/request/mod.rs +0 -1
- data/crates/itsi_server/src/response/mod.rs +0 -1
- data/crates/itsi_server/src/server/itsi_server.rs +0 -288
- data/gems/scheduler/ext/itsi_error/Cargo.lock +0 -368
- data/gems/scheduler/ext/itsi_error/Cargo.toml +0 -11
- data/gems/scheduler/ext/itsi_error/src/from.rs +0 -68
- data/gems/scheduler/ext/itsi_error/src/lib.rs +0 -24
- data/gems/scheduler/ext/itsi_instrument_entry/Cargo.toml +0 -15
- data/gems/scheduler/ext/itsi_instrument_entry/src/lib.rs +0 -31
- data/gems/scheduler/ext/itsi_rb_helpers/Cargo.lock +0 -355
- data/gems/scheduler/ext/itsi_rb_helpers/Cargo.toml +0 -10
- data/gems/scheduler/ext/itsi_rb_helpers/src/heap_value.rs +0 -121
- data/gems/scheduler/ext/itsi_rb_helpers/src/lib.rs +0 -201
- data/gems/scheduler/ext/itsi_scheduler/Cargo.toml +0 -24
- data/gems/scheduler/ext/itsi_scheduler/extconf.rb +0 -6
- data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +0 -56
- data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +0 -44
- data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +0 -44
- data/gems/scheduler/ext/itsi_scheduler/src/itsi_scheduler.rs +0 -308
- data/gems/scheduler/ext/itsi_scheduler/src/lib.rs +0 -38
- data/gems/scheduler/ext/itsi_server/Cargo.lock +0 -2956
- data/gems/scheduler/ext/itsi_server/Cargo.toml +0 -50
- data/gems/scheduler/ext/itsi_server/extconf.rb +0 -6
- data/gems/scheduler/ext/itsi_server/src/body_proxy/big_bytes.rs +0 -104
- data/gems/scheduler/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +0 -122
- data/gems/scheduler/ext/itsi_server/src/body_proxy/mod.rs +0 -2
- data/gems/scheduler/ext/itsi_server/src/env.rs +0 -43
- data/gems/scheduler/ext/itsi_server/src/lib.rs +0 -180
- data/gems/scheduler/ext/itsi_server/src/request/itsi_request.rs +0 -298
- data/gems/scheduler/ext/itsi_server/src/request/mod.rs +0 -1
- data/gems/scheduler/ext/itsi_server/src/response/itsi_response.rs +0 -357
- data/gems/scheduler/ext/itsi_server/src/response/mod.rs +0 -1
- data/gems/scheduler/ext/itsi_server/src/server/bind.rs +0 -174
- data/gems/scheduler/ext/itsi_server/src/server/bind_protocol.rs +0 -37
- data/gems/scheduler/ext/itsi_server/src/server/io_stream.rs +0 -104
- data/gems/scheduler/ext/itsi_server/src/server/itsi_server.rs +0 -288
- data/gems/scheduler/ext/itsi_server/src/server/lifecycle_event.rs +0 -9
- data/gems/scheduler/ext/itsi_server/src/server/listener.rs +0 -318
- data/gems/scheduler/ext/itsi_server/src/server/mod.rs +0 -11
- data/gems/scheduler/ext/itsi_server/src/server/process_worker.rs +0 -203
- data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +0 -260
- data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/mod.rs +0 -27
- data/gems/scheduler/ext/itsi_server/src/server/serve_strategy/single_mode.rs +0 -276
- data/gems/scheduler/ext/itsi_server/src/server/signal.rs +0 -74
- data/gems/scheduler/ext/itsi_server/src/server/thread_worker.rs +0 -399
- data/gems/scheduler/ext/itsi_server/src/server/tls/locked_dir_cache.rs +0 -132
- data/gems/scheduler/ext/itsi_server/src/server/tls.rs +0 -265
- data/gems/scheduler/ext/itsi_tracing/Cargo.lock +0 -274
- data/gems/scheduler/ext/itsi_tracing/Cargo.toml +0 -16
- data/gems/scheduler/ext/itsi_tracing/src/lib.rs +0 -58
- data/gems/server/ext/itsi_error/Cargo.lock +0 -368
- data/gems/server/ext/itsi_error/Cargo.toml +0 -11
- data/gems/server/ext/itsi_error/src/from.rs +0 -68
- data/gems/server/ext/itsi_error/src/lib.rs +0 -24
- data/gems/server/ext/itsi_instrument_entry/Cargo.toml +0 -15
- data/gems/server/ext/itsi_instrument_entry/src/lib.rs +0 -31
- data/gems/server/ext/itsi_rb_helpers/Cargo.lock +0 -355
- data/gems/server/ext/itsi_rb_helpers/Cargo.toml +0 -10
- data/gems/server/ext/itsi_rb_helpers/src/heap_value.rs +0 -121
- data/gems/server/ext/itsi_rb_helpers/src/lib.rs +0 -201
- data/gems/server/ext/itsi_scheduler/Cargo.toml +0 -24
- data/gems/server/ext/itsi_scheduler/extconf.rb +0 -6
- data/gems/server/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +0 -56
- data/gems/server/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +0 -44
- data/gems/server/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +0 -44
- data/gems/server/ext/itsi_scheduler/src/itsi_scheduler.rs +0 -308
- data/gems/server/ext/itsi_scheduler/src/lib.rs +0 -38
- data/gems/server/ext/itsi_server/Cargo.lock +0 -2956
- data/gems/server/ext/itsi_server/Cargo.toml +0 -50
- data/gems/server/ext/itsi_server/extconf.rb +0 -6
- data/gems/server/ext/itsi_server/src/body_proxy/big_bytes.rs +0 -104
- data/gems/server/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +0 -122
- data/gems/server/ext/itsi_server/src/body_proxy/mod.rs +0 -2
- data/gems/server/ext/itsi_server/src/env.rs +0 -43
- data/gems/server/ext/itsi_server/src/lib.rs +0 -180
- data/gems/server/ext/itsi_server/src/request/mod.rs +0 -1
- data/gems/server/ext/itsi_server/src/response/itsi_response.rs +0 -357
- data/gems/server/ext/itsi_server/src/response/mod.rs +0 -1
- data/gems/server/ext/itsi_server/src/server/bind.rs +0 -174
- data/gems/server/ext/itsi_server/src/server/bind_protocol.rs +0 -37
- data/gems/server/ext/itsi_server/src/server/io_stream.rs +0 -104
- data/gems/server/ext/itsi_server/src/server/itsi_server.rs +0 -288
- data/gems/server/ext/itsi_server/src/server/lifecycle_event.rs +0 -9
- data/gems/server/ext/itsi_server/src/server/listener.rs +0 -318
- data/gems/server/ext/itsi_server/src/server/mod.rs +0 -11
- data/gems/server/ext/itsi_server/src/server/process_worker.rs +0 -203
- data/gems/server/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +0 -260
- data/gems/server/ext/itsi_server/src/server/serve_strategy/mod.rs +0 -27
- data/gems/server/ext/itsi_server/src/server/serve_strategy/single_mode.rs +0 -276
- data/gems/server/ext/itsi_server/src/server/signal.rs +0 -74
- data/gems/server/ext/itsi_server/src/server/thread_worker.rs +0 -399
- data/gems/server/ext/itsi_server/src/server/tls/locked_dir_cache.rs +0 -132
- data/gems/server/ext/itsi_server/src/server/tls.rs +0 -265
- data/gems/server/ext/itsi_tracing/Cargo.lock +0 -274
- data/gems/server/ext/itsi_tracing/Cargo.toml +0 -16
- data/gems/server/ext/itsi_tracing/src/lib.rs +0 -58
- data/gems/server/lib/itsi/server/options_dsl.rb +0 -401
- data/gems/server/lib/itsi/stream_io.rb +0 -38
- data/location_dsl.rb +0 -381
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/docker-setup.sample +0 -3
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/post-app-boot.sample +0 -3
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/post-deploy.sample +0 -14
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/post-proxy-reboot.sample +0 -3
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/pre-app-boot.sample +0 -3
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/pre-build.sample +0 -51
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/pre-connect.sample +0 -47
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/pre-deploy.sample +0 -109
- data/sandbox/itsi_sandbox_rails/.kamal/hooks/pre-proxy-reboot.sample +0 -3
- data/sandbox/itsi_sandbox_rails/.kamal/secrets +0 -17
- data/sandbox/itsi_sandbox_rails/bin/dev +0 -2
- data/sandbox/itsi_sandbox_rails/bin/jobs +0 -6
- data/sandbox/itsi_sandbox_rails/bin/kamal +0 -27
- data/sandbox/itsi_sandbox_rails/bin/thrust +0 -5
- data/sandbox/itsi_sandbox_rails/config/cache.yml +0 -16
- data/sandbox/itsi_sandbox_rails/config/deploy.yml +0 -116
- data/sandbox/itsi_sandbox_rails/config/queue.yml +0 -18
- data/sandbox/itsi_sandbox_rails/config/recurring.yml +0 -10
- data/sandbox/itsi_sandbox_rails/db/cable_schema.rb +0 -11
- data/sandbox/itsi_sandbox_rails/db/cache_schema.rb +0 -14
- data/sandbox/itsi_sandbox_rails/db/queue_schema.rb +0 -129
- data/sandbox/itsi_sandbox_rails/public/400.html +0 -114
- data/sandbox/itsi_sandbox_rails/test/fixtures/posts.yml +0 -9
- data/sandbox/itsi_sandbox_rails/test/models/post_test.rb +0 -7
- /data/{sandbox/itsi_sandbox_rails/script/.keep → crates/_index.md} +0 -0
- /data/gems/server/lib/itsi/{index.html → server/default_app/index.html} +0 -0
@@ -1,260 +0,0 @@
|
|
1
|
-
use crate::server::{
|
2
|
-
itsi_server::Server, lifecycle_event::LifecycleEvent, listener::Listener,
|
3
|
-
process_worker::ProcessWorker,
|
4
|
-
};
|
5
|
-
use itsi_error::{ItsiError, Result};
|
6
|
-
use itsi_rb_helpers::{
|
7
|
-
call_proc_and_log_errors, call_with_gvl, call_without_gvl, create_ruby_thread,
|
8
|
-
};
|
9
|
-
use itsi_tracing::{error, info, warn};
|
10
|
-
use magnus::Value;
|
11
|
-
use nix::{
|
12
|
-
libc::{self, exit},
|
13
|
-
unistd::Pid,
|
14
|
-
};
|
15
|
-
|
16
|
-
use std::{
|
17
|
-
sync::{atomic::AtomicUsize, Arc},
|
18
|
-
time::{Duration, Instant},
|
19
|
-
};
|
20
|
-
use tokio::{
|
21
|
-
runtime::{Builder as RuntimeBuilder, Runtime},
|
22
|
-
sync::{broadcast, watch, Mutex},
|
23
|
-
time::{self, sleep},
|
24
|
-
};
|
25
|
-
use tracing::{debug, instrument};
|
26
|
-
pub(crate) struct ClusterMode {
|
27
|
-
pub listeners: parking_lot::Mutex<Vec<Listener>>,
|
28
|
-
pub server: Arc<Server>,
|
29
|
-
pub process_workers: parking_lot::Mutex<Vec<ProcessWorker>>,
|
30
|
-
pub lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
31
|
-
}
|
32
|
-
|
33
|
-
static WORKER_ID: AtomicUsize = AtomicUsize::new(0);
|
34
|
-
static CHILD_SIGNAL_SENDER: parking_lot::Mutex<Option<watch::Sender<()>>> =
|
35
|
-
parking_lot::Mutex::new(None);
|
36
|
-
|
37
|
-
impl ClusterMode {
|
38
|
-
pub fn new(
|
39
|
-
server: Arc<Server>,
|
40
|
-
listeners: Vec<Listener>,
|
41
|
-
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
42
|
-
) -> Self {
|
43
|
-
let process_workers = (0..server.workers)
|
44
|
-
.map(|_| ProcessWorker {
|
45
|
-
worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
46
|
-
..Default::default()
|
47
|
-
})
|
48
|
-
.collect();
|
49
|
-
|
50
|
-
Self {
|
51
|
-
listeners: parking_lot::Mutex::new(listeners),
|
52
|
-
server,
|
53
|
-
process_workers: parking_lot::Mutex::new(process_workers),
|
54
|
-
lifecycle_channel,
|
55
|
-
}
|
56
|
-
}
|
57
|
-
|
58
|
-
pub fn build_runtime(&self) -> Runtime {
|
59
|
-
let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
|
60
|
-
builder
|
61
|
-
.thread_name("itsi-server-accept-loop")
|
62
|
-
.thread_stack_size(3 * 1024 * 1024)
|
63
|
-
.enable_io()
|
64
|
-
.enable_time()
|
65
|
-
.build()
|
66
|
-
.expect("Failed to build Tokio runtime")
|
67
|
-
}
|
68
|
-
|
69
|
-
#[allow(clippy::await_holding_lock)]
|
70
|
-
pub async fn handle_lifecycle_event(
|
71
|
-
self: Arc<Self>,
|
72
|
-
lifecycle_event: LifecycleEvent,
|
73
|
-
) -> Result<()> {
|
74
|
-
match lifecycle_event {
|
75
|
-
LifecycleEvent::Start => Ok(()),
|
76
|
-
LifecycleEvent::Shutdown => {
|
77
|
-
self.shutdown().await?;
|
78
|
-
Ok(())
|
79
|
-
}
|
80
|
-
LifecycleEvent::Restart => {
|
81
|
-
for worker in self.process_workers.lock().iter() {
|
82
|
-
worker.reboot(self.clone()).await?;
|
83
|
-
}
|
84
|
-
Ok(())
|
85
|
-
}
|
86
|
-
LifecycleEvent::IncreaseWorkers => {
|
87
|
-
let mut workers = self.process_workers.lock();
|
88
|
-
let worker = ProcessWorker {
|
89
|
-
worker_id: WORKER_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
|
90
|
-
..Default::default()
|
91
|
-
};
|
92
|
-
let worker_clone = worker.clone();
|
93
|
-
let self_clone = self.clone();
|
94
|
-
create_ruby_thread(move || {
|
95
|
-
call_without_gvl(move || {
|
96
|
-
worker_clone.boot(self_clone).ok();
|
97
|
-
})
|
98
|
-
});
|
99
|
-
workers.push(worker);
|
100
|
-
Ok(())
|
101
|
-
}
|
102
|
-
LifecycleEvent::DecreaseWorkers => {
|
103
|
-
let worker = {
|
104
|
-
let mut workers = self.process_workers.lock();
|
105
|
-
workers.pop()
|
106
|
-
};
|
107
|
-
if let Some(dropped_worker) = worker {
|
108
|
-
dropped_worker.request_shutdown();
|
109
|
-
let force_kill_time =
|
110
|
-
Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
|
111
|
-
while dropped_worker.is_alive() && force_kill_time > Instant::now() {
|
112
|
-
tokio::time::sleep(Duration::from_millis(100)).await;
|
113
|
-
}
|
114
|
-
if dropped_worker.is_alive() {
|
115
|
-
dropped_worker.force_kill();
|
116
|
-
}
|
117
|
-
};
|
118
|
-
Ok(())
|
119
|
-
}
|
120
|
-
LifecycleEvent::ForceShutdown => {
|
121
|
-
for worker in self.process_workers.lock().iter() {
|
122
|
-
worker.force_kill();
|
123
|
-
}
|
124
|
-
unsafe { exit(0) };
|
125
|
-
}
|
126
|
-
}
|
127
|
-
}
|
128
|
-
|
129
|
-
pub async fn shutdown(&self) -> Result<()> {
|
130
|
-
let shutdown_timeout = self.server.shutdown_timeout;
|
131
|
-
let workers = self.process_workers.lock().clone();
|
132
|
-
|
133
|
-
workers.iter().for_each(|worker| worker.request_shutdown());
|
134
|
-
|
135
|
-
let remaining_children = Arc::new(Mutex::new(workers.len()));
|
136
|
-
let monitor_handle = {
|
137
|
-
let remaining_children: Arc<Mutex<usize>> = Arc::clone(&remaining_children);
|
138
|
-
let mut workers = workers.clone();
|
139
|
-
tokio::spawn(async move {
|
140
|
-
loop {
|
141
|
-
// Check if all workers have exited
|
142
|
-
let mut remaining = remaining_children.lock().await;
|
143
|
-
workers.retain(|worker| worker.is_alive());
|
144
|
-
*remaining = workers.len();
|
145
|
-
if *remaining == 0 {
|
146
|
-
break;
|
147
|
-
}
|
148
|
-
sleep(Duration::from_millis(100)).await;
|
149
|
-
}
|
150
|
-
})
|
151
|
-
};
|
152
|
-
|
153
|
-
tokio::select! {
|
154
|
-
_ = monitor_handle => {
|
155
|
-
debug!("All children exited early, exit normally")
|
156
|
-
}
|
157
|
-
_ = sleep(Duration::from_secs_f64(shutdown_timeout)) => {
|
158
|
-
warn!("Graceful shutdown timeout reached, force killing remaining children");
|
159
|
-
workers.iter().for_each(|worker| worker.force_kill());
|
160
|
-
}
|
161
|
-
}
|
162
|
-
|
163
|
-
Err(ItsiError::Break())
|
164
|
-
}
|
165
|
-
|
166
|
-
pub fn receive_signal(signal: i32) {
|
167
|
-
match signal {
|
168
|
-
libc::SIGCHLD => {
|
169
|
-
CHILD_SIGNAL_SENDER.lock().as_ref().inspect(|i| {
|
170
|
-
i.send(()).ok();
|
171
|
-
});
|
172
|
-
}
|
173
|
-
_ => {
|
174
|
-
// Handle other signals
|
175
|
-
}
|
176
|
-
}
|
177
|
-
}
|
178
|
-
|
179
|
-
pub fn stop(&self) -> Result<()> {
|
180
|
-
unsafe { libc::signal(libc::SIGCHLD, libc::SIG_DFL) };
|
181
|
-
|
182
|
-
for worker in self.process_workers.lock().iter() {
|
183
|
-
if worker.is_alive() {
|
184
|
-
worker.force_kill();
|
185
|
-
}
|
186
|
-
}
|
187
|
-
|
188
|
-
Ok(())
|
189
|
-
}
|
190
|
-
|
191
|
-
#[instrument(skip(self), fields(mode = "cluster", pid=format!("{:?}", Pid::this())))]
|
192
|
-
pub fn run(self: Arc<Self>) -> Result<()> {
|
193
|
-
info!("Starting in Cluster mode");
|
194
|
-
if let Some(proc) = self.server.hooks.get("before_fork") {
|
195
|
-
call_with_gvl(|_| call_proc_and_log_errors(proc.clone()))
|
196
|
-
}
|
197
|
-
self.process_workers
|
198
|
-
.lock()
|
199
|
-
.iter()
|
200
|
-
.try_for_each(|worker| worker.boot(Arc::clone(&self)))?;
|
201
|
-
|
202
|
-
let (sender, mut receiver) = watch::channel(());
|
203
|
-
*CHILD_SIGNAL_SENDER.lock() = Some(sender);
|
204
|
-
|
205
|
-
unsafe { libc::signal(libc::SIGCHLD, Self::receive_signal as usize) };
|
206
|
-
|
207
|
-
let mut lifecycle_rx = self.lifecycle_channel.subscribe();
|
208
|
-
let self_ref = self.clone();
|
209
|
-
|
210
|
-
self.build_runtime().block_on(async {
|
211
|
-
let self_ref = self_ref.clone();
|
212
|
-
let mut memory_check_interval = time::interval(time::Duration::from_secs(2));
|
213
|
-
loop {
|
214
|
-
tokio::select! {
|
215
|
-
_ = receiver.changed() => {
|
216
|
-
let mut workers = self_ref.process_workers.lock();
|
217
|
-
workers.retain(|worker| {
|
218
|
-
worker.boot_if_dead(Arc::clone(&self_ref))
|
219
|
-
});
|
220
|
-
if workers.is_empty() {
|
221
|
-
warn!("No workers running. Send SIGTTIN to increase worker count");
|
222
|
-
}
|
223
|
-
}
|
224
|
-
_ = memory_check_interval.tick() => {
|
225
|
-
if let Some(memory_limit) = self_ref.server.worker_memory_limit {
|
226
|
-
let largest_worker = {
|
227
|
-
let workers = self_ref.process_workers.lock();
|
228
|
-
workers.iter().max_by(|wa, wb| wa.memory_usage().cmp(&wb.memory_usage())).cloned()
|
229
|
-
};
|
230
|
-
if let Some(largest_worker) = largest_worker {
|
231
|
-
if let Some(current_mem_usage) = largest_worker.memory_usage(){
|
232
|
-
if current_mem_usage > memory_limit {
|
233
|
-
largest_worker.reboot(self_ref.clone()).await.ok();
|
234
|
-
if let Some(hook) = self_ref.server.hooks.get("after_memory_threshold_reached") {
|
235
|
-
call_with_gvl(|_| hook.call::<_, Value>((largest_worker.pid(),)).ok() );
|
236
|
-
}
|
237
|
-
}
|
238
|
-
}
|
239
|
-
}
|
240
|
-
}
|
241
|
-
}
|
242
|
-
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
243
|
-
Ok(lifecycle_event) => {
|
244
|
-
if let Err(e) = self_ref.clone().handle_lifecycle_event(lifecycle_event).await{
|
245
|
-
match e {
|
246
|
-
ItsiError::Break() => break,
|
247
|
-
_ => error!("Error in handle_lifecycle_event {:?}", e)
|
248
|
-
}
|
249
|
-
}
|
250
|
-
|
251
|
-
},
|
252
|
-
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
253
|
-
}
|
254
|
-
}
|
255
|
-
}
|
256
|
-
});
|
257
|
-
|
258
|
-
Ok(())
|
259
|
-
}
|
260
|
-
}
|
@@ -1,27 +0,0 @@
|
|
1
|
-
use cluster_mode::ClusterMode;
|
2
|
-
use itsi_error::Result;
|
3
|
-
use single_mode::SingleMode;
|
4
|
-
use std::sync::Arc;
|
5
|
-
pub mod cluster_mode;
|
6
|
-
pub mod single_mode;
|
7
|
-
|
8
|
-
pub(crate) enum ServeStrategy {
|
9
|
-
Single(Arc<SingleMode>),
|
10
|
-
Cluster(Arc<ClusterMode>),
|
11
|
-
}
|
12
|
-
|
13
|
-
impl ServeStrategy {
|
14
|
-
pub fn run(&self) -> Result<()> {
|
15
|
-
match self {
|
16
|
-
ServeStrategy::Single(single_router) => single_router.clone().run(),
|
17
|
-
ServeStrategy::Cluster(cluster_router) => cluster_router.clone().run(),
|
18
|
-
}
|
19
|
-
}
|
20
|
-
|
21
|
-
pub(crate) fn stop(&self) -> Result<()> {
|
22
|
-
match self {
|
23
|
-
ServeStrategy::Single(single_router) => single_router.clone().stop(),
|
24
|
-
ServeStrategy::Cluster(cluster_router) => cluster_router.clone().stop(),
|
25
|
-
}
|
26
|
-
}
|
27
|
-
}
|
@@ -1,276 +0,0 @@
|
|
1
|
-
use crate::{
|
2
|
-
request::itsi_request::ItsiRequest,
|
3
|
-
server::{
|
4
|
-
io_stream::IoStream,
|
5
|
-
itsi_server::{RequestJob, Server},
|
6
|
-
lifecycle_event::LifecycleEvent,
|
7
|
-
listener::{Listener, ListenerInfo},
|
8
|
-
thread_worker::{build_thread_workers, ThreadWorker},
|
9
|
-
},
|
10
|
-
};
|
11
|
-
use http::Request;
|
12
|
-
use hyper::{body::Incoming, service::service_fn};
|
13
|
-
use hyper_util::{
|
14
|
-
rt::{TokioExecutor, TokioIo, TokioTimer},
|
15
|
-
server::conn::auto::Builder,
|
16
|
-
};
|
17
|
-
use itsi_error::{ItsiError, Result};
|
18
|
-
use itsi_rb_helpers::print_rb_backtrace;
|
19
|
-
use itsi_tracing::{debug, error, info};
|
20
|
-
use nix::unistd::Pid;
|
21
|
-
use parking_lot::Mutex;
|
22
|
-
use std::{
|
23
|
-
num::NonZeroU8,
|
24
|
-
panic,
|
25
|
-
pin::Pin,
|
26
|
-
sync::Arc,
|
27
|
-
time::{Duration, Instant},
|
28
|
-
};
|
29
|
-
use tokio::{
|
30
|
-
runtime::{Builder as RuntimeBuilder, Runtime},
|
31
|
-
sync::{
|
32
|
-
broadcast,
|
33
|
-
watch::{self, Sender},
|
34
|
-
},
|
35
|
-
task::JoinSet,
|
36
|
-
};
|
37
|
-
use tracing::instrument;
|
38
|
-
|
39
|
-
pub struct SingleMode {
|
40
|
-
pub executor: Builder<TokioExecutor>,
|
41
|
-
pub server: Arc<Server>,
|
42
|
-
pub sender: async_channel::Sender<RequestJob>,
|
43
|
-
pub(crate) listeners: Mutex<Vec<Listener>>,
|
44
|
-
pub(crate) thread_workers: Arc<Vec<ThreadWorker>>,
|
45
|
-
pub(crate) lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
46
|
-
}
|
47
|
-
|
48
|
-
pub enum RunningPhase {
|
49
|
-
Running,
|
50
|
-
ShutdownPending,
|
51
|
-
Shutdown,
|
52
|
-
}
|
53
|
-
|
54
|
-
impl SingleMode {
|
55
|
-
#[instrument(parent=None, skip_all, fields(pid=format!("{:?}", Pid::this())))]
|
56
|
-
pub(crate) fn new(
|
57
|
-
server: Arc<Server>,
|
58
|
-
listeners: Vec<Listener>,
|
59
|
-
lifecycle_channel: broadcast::Sender<LifecycleEvent>,
|
60
|
-
) -> Result<Self> {
|
61
|
-
let (thread_workers, sender) = build_thread_workers(
|
62
|
-
server.clone(),
|
63
|
-
Pid::this(),
|
64
|
-
NonZeroU8::try_from(server.threads).unwrap(),
|
65
|
-
server.app.clone(),
|
66
|
-
server.scheduler_class.clone(),
|
67
|
-
)
|
68
|
-
.inspect_err(|e| {
|
69
|
-
if let Some(err_val) = e.value() {
|
70
|
-
print_rb_backtrace(err_val);
|
71
|
-
}
|
72
|
-
})?;
|
73
|
-
Ok(Self {
|
74
|
-
executor: Builder::new(TokioExecutor::new()),
|
75
|
-
listeners: Mutex::new(listeners),
|
76
|
-
server,
|
77
|
-
sender,
|
78
|
-
thread_workers,
|
79
|
-
lifecycle_channel,
|
80
|
-
})
|
81
|
-
}
|
82
|
-
|
83
|
-
pub fn build_runtime(&self) -> Runtime {
|
84
|
-
let mut builder: RuntimeBuilder = RuntimeBuilder::new_current_thread();
|
85
|
-
builder
|
86
|
-
.thread_name("itsi-server-accept-loop")
|
87
|
-
.thread_stack_size(3 * 1024 * 1024)
|
88
|
-
.enable_io()
|
89
|
-
.enable_time()
|
90
|
-
.build()
|
91
|
-
.expect("Failed to build Tokio runtime")
|
92
|
-
}
|
93
|
-
|
94
|
-
pub fn stop(&self) -> Result<()> {
|
95
|
-
self.lifecycle_channel.send(LifecycleEvent::Shutdown).ok();
|
96
|
-
Ok(())
|
97
|
-
}
|
98
|
-
|
99
|
-
#[instrument(parent=None, skip(self), fields(pid=format!("{}", Pid::this())))]
|
100
|
-
pub fn run(self: Arc<Self>) -> Result<()> {
|
101
|
-
let mut listener_task_set = JoinSet::new();
|
102
|
-
let runtime = self.build_runtime();
|
103
|
-
|
104
|
-
runtime.block_on(async {
|
105
|
-
let tokio_listeners = self
|
106
|
-
.listeners.lock()
|
107
|
-
.drain(..)
|
108
|
-
.map(|list| {
|
109
|
-
Arc::new(list.into_tokio_listener())
|
110
|
-
})
|
111
|
-
.collect::<Vec<_>>();
|
112
|
-
let (shutdown_sender, _) = watch::channel(RunningPhase::Running);
|
113
|
-
for listener in tokio_listeners.iter() {
|
114
|
-
let mut lifecycle_rx = self.lifecycle_channel.subscribe();
|
115
|
-
let listener_info = Arc::new(listener.listener_info());
|
116
|
-
let self_ref = self.clone();
|
117
|
-
let listener = listener.clone();
|
118
|
-
let shutdown_sender = shutdown_sender.clone();
|
119
|
-
|
120
|
-
let listener_clone = listener.clone();
|
121
|
-
let mut shutdown_receiver = shutdown_sender.subscribe();
|
122
|
-
let shutdown_receiver_clone = shutdown_receiver.clone();
|
123
|
-
listener_task_set.spawn(async move {
|
124
|
-
listener_clone.spawn_state_task(shutdown_receiver_clone).await;
|
125
|
-
});
|
126
|
-
|
127
|
-
listener_task_set.spawn(async move {
|
128
|
-
let strategy_clone = self_ref.clone();
|
129
|
-
let mut acceptor_task_set = JoinSet::new();
|
130
|
-
loop {
|
131
|
-
tokio::select! {
|
132
|
-
accept_result = listener.accept() => match accept_result {
|
133
|
-
Ok(accept_result) => {
|
134
|
-
let strategy = strategy_clone.clone();
|
135
|
-
let listener_info = listener_info.clone();
|
136
|
-
let shutdown_receiver = shutdown_receiver.clone();
|
137
|
-
acceptor_task_set.spawn(async move {
|
138
|
-
strategy.serve_connection(accept_result, listener_info, shutdown_receiver).await;
|
139
|
-
});
|
140
|
-
},
|
141
|
-
Err(e) => debug!("Listener.accept failed {:?}", e),
|
142
|
-
},
|
143
|
-
_ = shutdown_receiver.changed() => {
|
144
|
-
break;
|
145
|
-
}
|
146
|
-
lifecycle_event = lifecycle_rx.recv() => match lifecycle_event{
|
147
|
-
Ok(lifecycle_event) => {
|
148
|
-
if let Err(e) = self_ref.handle_lifecycle_event(lifecycle_event, shutdown_sender.clone()).await{
|
149
|
-
match e {
|
150
|
-
ItsiError::Break() => break,
|
151
|
-
_ => error!("Error in handle_lifecycle_event {:?}", e)
|
152
|
-
}
|
153
|
-
}
|
154
|
-
|
155
|
-
},
|
156
|
-
Err(e) => error!("Error receiving lifecycle_event: {:?}", e),
|
157
|
-
}
|
158
|
-
}
|
159
|
-
}
|
160
|
-
while let Some(_res) = acceptor_task_set.join_next().await {}
|
161
|
-
});
|
162
|
-
|
163
|
-
}
|
164
|
-
|
165
|
-
while let Some(_res) = listener_task_set.join_next().await {}
|
166
|
-
|
167
|
-
});
|
168
|
-
runtime.shutdown_timeout(Duration::from_millis(100));
|
169
|
-
debug!("Runtime has shut down");
|
170
|
-
Ok(())
|
171
|
-
}
|
172
|
-
|
173
|
-
pub(crate) async fn serve_connection(
|
174
|
-
&self,
|
175
|
-
stream: IoStream,
|
176
|
-
listener: Arc<ListenerInfo>,
|
177
|
-
shutdown_channel: watch::Receiver<RunningPhase>,
|
178
|
-
) {
|
179
|
-
let sender_clone = self.sender.clone();
|
180
|
-
let addr = stream.addr();
|
181
|
-
let io: TokioIo<Pin<Box<IoStream>>> = TokioIo::new(Box::pin(stream));
|
182
|
-
let server = self.server.clone();
|
183
|
-
let executor = self.executor.clone();
|
184
|
-
let mut shutdown_channel_clone = shutdown_channel.clone();
|
185
|
-
let server = server.clone();
|
186
|
-
let mut executor = executor.clone();
|
187
|
-
let mut binding = executor.http1();
|
188
|
-
let shutdown_channel = shutdown_channel_clone.clone();
|
189
|
-
let mut serve = Box::pin(
|
190
|
-
binding
|
191
|
-
.timer(TokioTimer::new())
|
192
|
-
.header_read_timeout(Duration::from_secs(1))
|
193
|
-
.serve_connection_with_upgrades(
|
194
|
-
io,
|
195
|
-
service_fn(move |hyper_request: Request<Incoming>| {
|
196
|
-
ItsiRequest::process_request(
|
197
|
-
hyper_request,
|
198
|
-
sender_clone.clone(),
|
199
|
-
server.clone(),
|
200
|
-
listener.clone(),
|
201
|
-
addr.clone(),
|
202
|
-
shutdown_channel.clone(),
|
203
|
-
)
|
204
|
-
}),
|
205
|
-
),
|
206
|
-
);
|
207
|
-
|
208
|
-
tokio::select! {
|
209
|
-
// Await the connection finishing naturally.
|
210
|
-
res = &mut serve => {
|
211
|
-
match res{
|
212
|
-
Ok(()) => {
|
213
|
-
debug!("Connection closed normally")
|
214
|
-
},
|
215
|
-
Err(res) => {
|
216
|
-
debug!("Connection finished with error: {:?}", res)
|
217
|
-
}
|
218
|
-
}
|
219
|
-
serve.as_mut().graceful_shutdown();
|
220
|
-
},
|
221
|
-
// A lifecycle event triggers shutdown.
|
222
|
-
_ = shutdown_channel_clone.changed() => {
|
223
|
-
// Initiate graceful shutdown.
|
224
|
-
serve.as_mut().graceful_shutdown();
|
225
|
-
|
226
|
-
// Now await the connection to finish shutting down.
|
227
|
-
if let Err(e) = serve.await {
|
228
|
-
debug!("Connection shutdown error: {:?}", e);
|
229
|
-
}
|
230
|
-
}
|
231
|
-
}
|
232
|
-
}
|
233
|
-
|
234
|
-
pub async fn handle_lifecycle_event(
|
235
|
-
&self,
|
236
|
-
lifecycle_event: LifecycleEvent,
|
237
|
-
shutdown_sender: Sender<RunningPhase>,
|
238
|
-
) -> Result<()> {
|
239
|
-
info!("Handling lifecycle event: {:?}", lifecycle_event);
|
240
|
-
if let LifecycleEvent::Shutdown = lifecycle_event {
|
241
|
-
//1. Stop accepting new connections.
|
242
|
-
shutdown_sender.send(RunningPhase::ShutdownPending).ok();
|
243
|
-
tokio::time::sleep(Duration::from_millis(25)).await;
|
244
|
-
|
245
|
-
//2. Break out of work queues.
|
246
|
-
for worker in &*self.thread_workers {
|
247
|
-
worker.request_shutdown().await;
|
248
|
-
}
|
249
|
-
|
250
|
-
tokio::time::sleep(Duration::from_millis(25)).await;
|
251
|
-
|
252
|
-
//3. Wait for all threads to finish.
|
253
|
-
let deadline = Instant::now() + Duration::from_secs_f64(self.server.shutdown_timeout);
|
254
|
-
while Instant::now() < deadline {
|
255
|
-
let alive_threads = self
|
256
|
-
.thread_workers
|
257
|
-
.iter()
|
258
|
-
.filter(|worker| worker.poll_shutdown(deadline))
|
259
|
-
.count();
|
260
|
-
if alive_threads == 0 {
|
261
|
-
break;
|
262
|
-
}
|
263
|
-
tokio::time::sleep(Duration::from_millis(200)).await;
|
264
|
-
}
|
265
|
-
|
266
|
-
//4. Force shutdown any stragglers
|
267
|
-
shutdown_sender.send(RunningPhase::Shutdown).ok();
|
268
|
-
self.thread_workers.iter().for_each(|worker| {
|
269
|
-
worker.poll_shutdown(deadline);
|
270
|
-
});
|
271
|
-
|
272
|
-
return Err(ItsiError::Break());
|
273
|
-
}
|
274
|
-
Ok(())
|
275
|
-
}
|
276
|
-
}
|
@@ -1,74 +0,0 @@
|
|
1
|
-
use std::sync::{atomic::AtomicI8, LazyLock};
|
2
|
-
|
3
|
-
use nix::libc::{self, sighandler_t};
|
4
|
-
use tokio::sync::{self, broadcast};
|
5
|
-
|
6
|
-
use super::lifecycle_event::LifecycleEvent;
|
7
|
-
|
8
|
-
pub static SIGNAL_HANDLER_CHANNEL: LazyLock<(
|
9
|
-
broadcast::Sender<LifecycleEvent>,
|
10
|
-
broadcast::Receiver<LifecycleEvent>,
|
11
|
-
)> = LazyLock::new(|| sync::broadcast::channel(5));
|
12
|
-
|
13
|
-
pub fn send_shutdown_event() {
|
14
|
-
SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
|
15
|
-
}
|
16
|
-
|
17
|
-
pub static SIGINT_COUNT: AtomicI8 = AtomicI8::new(0);
|
18
|
-
fn receive_signal(signum: i32, _: sighandler_t) {
|
19
|
-
SIGINT_COUNT.fetch_add(-1, std::sync::atomic::Ordering::SeqCst);
|
20
|
-
match signum {
|
21
|
-
libc::SIGTERM | libc::SIGINT => {
|
22
|
-
SIGINT_COUNT.fetch_add(2, std::sync::atomic::Ordering::SeqCst);
|
23
|
-
if SIGINT_COUNT.load(std::sync::atomic::Ordering::SeqCst) < 2 {
|
24
|
-
SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Shutdown).ok();
|
25
|
-
} else {
|
26
|
-
// Not messing about. Force shutdown.
|
27
|
-
SIGNAL_HANDLER_CHANNEL
|
28
|
-
.0
|
29
|
-
.send(LifecycleEvent::ForceShutdown)
|
30
|
-
.ok();
|
31
|
-
}
|
32
|
-
}
|
33
|
-
libc::SIGUSR1 => {
|
34
|
-
SIGNAL_HANDLER_CHANNEL.0.send(LifecycleEvent::Restart).ok();
|
35
|
-
}
|
36
|
-
libc::SIGTTIN => {
|
37
|
-
SIGNAL_HANDLER_CHANNEL
|
38
|
-
.0
|
39
|
-
.send(LifecycleEvent::IncreaseWorkers)
|
40
|
-
.ok();
|
41
|
-
}
|
42
|
-
libc::SIGTTOU => {
|
43
|
-
SIGNAL_HANDLER_CHANNEL
|
44
|
-
.0
|
45
|
-
.send(LifecycleEvent::DecreaseWorkers)
|
46
|
-
.ok();
|
47
|
-
}
|
48
|
-
_ => {}
|
49
|
-
}
|
50
|
-
}
|
51
|
-
|
52
|
-
pub fn reset_signal_handlers() -> bool {
|
53
|
-
SIGINT_COUNT.store(0, std::sync::atomic::Ordering::SeqCst);
|
54
|
-
unsafe {
|
55
|
-
libc::signal(libc::SIGTERM, receive_signal as usize);
|
56
|
-
libc::signal(libc::SIGINT, receive_signal as usize);
|
57
|
-
libc::signal(libc::SIGUSR1, receive_signal as usize);
|
58
|
-
libc::signal(libc::SIGUSR2, receive_signal as usize);
|
59
|
-
libc::signal(libc::SIGTTIN, receive_signal as usize);
|
60
|
-
libc::signal(libc::SIGTTOU, receive_signal as usize);
|
61
|
-
}
|
62
|
-
true
|
63
|
-
}
|
64
|
-
|
65
|
-
pub fn clear_signal_handlers() {
|
66
|
-
unsafe {
|
67
|
-
libc::signal(libc::SIGTERM, libc::SIG_DFL);
|
68
|
-
libc::signal(libc::SIGINT, libc::SIG_DFL);
|
69
|
-
libc::signal(libc::SIGUSR1, libc::SIG_DFL);
|
70
|
-
libc::signal(libc::SIGUSR2, libc::SIG_DFL);
|
71
|
-
libc::signal(libc::SIGTTIN, libc::SIG_DFL);
|
72
|
-
libc::signal(libc::SIGTTOU, libc::SIG_DFL);
|
73
|
-
}
|
74
|
-
}
|