hyperion-rb 1.6.2 → 2.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4563 -0
- data/README.md +189 -13
- data/ext/hyperion_h2_codec/Cargo.lock +7 -0
- data/ext/hyperion_h2_codec/Cargo.toml +33 -0
- data/ext/hyperion_h2_codec/extconf.rb +73 -0
- data/ext/hyperion_h2_codec/src/frames.rs +140 -0
- data/ext/hyperion_h2_codec/src/hpack/huffman.rs +161 -0
- data/ext/hyperion_h2_codec/src/hpack.rs +457 -0
- data/ext/hyperion_h2_codec/src/lib.rs +296 -0
- data/ext/hyperion_http/extconf.rb +28 -0
- data/ext/hyperion_http/h2_codec_glue.c +408 -0
- data/ext/hyperion_http/page_cache.c +1125 -0
- data/ext/hyperion_http/parser.c +473 -38
- data/ext/hyperion_http/sendfile.c +982 -0
- data/ext/hyperion_http/websocket.c +493 -0
- data/ext/hyperion_io_uring/Cargo.lock +33 -0
- data/ext/hyperion_io_uring/Cargo.toml +34 -0
- data/ext/hyperion_io_uring/extconf.rb +74 -0
- data/ext/hyperion_io_uring/src/lib.rs +316 -0
- data/lib/hyperion/adapter/rack.rb +370 -42
- data/lib/hyperion/admin_listener.rb +207 -0
- data/lib/hyperion/admin_middleware.rb +36 -7
- data/lib/hyperion/cli.rb +310 -11
- data/lib/hyperion/config.rb +440 -14
- data/lib/hyperion/connection.rb +679 -22
- data/lib/hyperion/deprecations.rb +81 -0
- data/lib/hyperion/dispatch_mode.rb +165 -0
- data/lib/hyperion/fiber_local.rb +75 -13
- data/lib/hyperion/h2_admission.rb +77 -0
- data/lib/hyperion/h2_codec.rb +452 -0
- data/lib/hyperion/http/page_cache.rb +122 -0
- data/lib/hyperion/http/sendfile.rb +696 -0
- data/lib/hyperion/http2/native_hpack_adapter.rb +70 -0
- data/lib/hyperion/http2_handler.rb +368 -9
- data/lib/hyperion/io_uring.rb +317 -0
- data/lib/hyperion/lint_wrapper_pool.rb +126 -0
- data/lib/hyperion/master.rb +96 -9
- data/lib/hyperion/metrics/path_templater.rb +68 -0
- data/lib/hyperion/metrics.rb +256 -0
- data/lib/hyperion/prometheus_exporter.rb +150 -0
- data/lib/hyperion/request.rb +13 -0
- data/lib/hyperion/response_writer.rb +477 -16
- data/lib/hyperion/runtime.rb +195 -0
- data/lib/hyperion/server/route_table.rb +179 -0
- data/lib/hyperion/server.rb +519 -55
- data/lib/hyperion/static_preload.rb +133 -0
- data/lib/hyperion/thread_pool.rb +61 -7
- data/lib/hyperion/tls.rb +343 -1
- data/lib/hyperion/version.rb +1 -1
- data/lib/hyperion/websocket/close_codes.rb +71 -0
- data/lib/hyperion/websocket/connection.rb +876 -0
- data/lib/hyperion/websocket/frame.rb +356 -0
- data/lib/hyperion/websocket/handshake.rb +525 -0
- data/lib/hyperion/worker.rb +111 -9
- data/lib/hyperion.rb +137 -3
- metadata +50 -1
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Hyperion
|
|
4
|
+
# 1.8.0 deprecation-warn helper. RFC §3 requires a one-shot warn per
|
|
5
|
+
# deprecated API call site / key per process — emitted via the runtime
|
|
6
|
+
# logger when available, falling back to $stderr at very-early boot
|
|
7
|
+
# (before `Hyperion::Runtime.default.logger` is reachable).
|
|
8
|
+
#
|
|
9
|
+
# The deprecated APIs themselves keep working untouched in 1.8.0; the
|
|
10
|
+
# warn is purely informational. Removal lands in 2.0.0 per the RFC §3
|
|
11
|
+
# release plan.
|
|
12
|
+
#
|
|
13
|
+
# Tests that trip the deprecation paths intentionally can capture the
|
|
14
|
+
# output by swapping `Hyperion::Runtime.default.logger`; tests that
|
|
15
|
+
# want silence call `Deprecations.silence!` in a `before(:each)` and
|
|
16
|
+
# `Deprecations.reset!` in `after(:each)` to start with a clean slate.
|
|
17
|
+
module Deprecations
|
|
18
|
+
@warned = {}
|
|
19
|
+
@silenced = false
|
|
20
|
+
MUTEX = Mutex.new
|
|
21
|
+
|
|
22
|
+
module_function
|
|
23
|
+
|
|
24
|
+
# Emit a one-shot deprecation warn for `key`. Subsequent calls with
|
|
25
|
+
# the same key in the same process are no-ops. Thread-safe (the
|
|
26
|
+
# check-and-record runs under a Mutex) so two workers initializing
|
|
27
|
+
# at once don't double-emit on the same key.
|
|
28
|
+
def warn_once(key, message)
|
|
29
|
+
return if @silenced
|
|
30
|
+
|
|
31
|
+
MUTEX.synchronize do
|
|
32
|
+
return if @warned[key]
|
|
33
|
+
|
|
34
|
+
@warned[key] = true
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
emit("[hyperion] DEPRECATION: #{message}")
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
# Test seam: clear the dedup table so a spec can re-trigger a warn
|
|
41
|
+
# it just exercised. Combined with `silence!`/`unsilence!` tests can
|
|
42
|
+
# both assert the dedup behaviour and avoid noise on baseline runs.
|
|
43
|
+
def reset!
|
|
44
|
+
MUTEX.synchronize { @warned.clear }
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
# Test seam: suppress all warns until `unsilence!` is called. Used
|
|
48
|
+
# by the broad test suite which intentionally exercises the
|
|
49
|
+
# deprecated DSL surface and would otherwise flood output.
|
|
50
|
+
def silence!
|
|
51
|
+
@silenced = true
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def unsilence!
|
|
55
|
+
@silenced = false
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def silenced?
|
|
59
|
+
@silenced
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
# Visibility for assertion: did we already warn on `key`?
|
|
63
|
+
def warned?(key)
|
|
64
|
+
MUTEX.synchronize { @warned.key?(key) }
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
def emit(line)
|
|
68
|
+
logger = Hyperion::Runtime.default.logger if defined?(Hyperion::Runtime)
|
|
69
|
+
if logger.respond_to?(:warn)
|
|
70
|
+
logger.warn { { message: 'deprecation', detail: line } }
|
|
71
|
+
else
|
|
72
|
+
warn(line)
|
|
73
|
+
end
|
|
74
|
+
rescue StandardError
|
|
75
|
+
# Logger swap mid-emit / very-early boot — fall back to $stderr so
|
|
76
|
+
# the operator at least sees something on the console.
|
|
77
|
+
warn(line)
|
|
78
|
+
end
|
|
79
|
+
private_class_method :emit
|
|
80
|
+
end
|
|
81
|
+
end
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Hyperion
|
|
4
|
+
# Internal value object replacing the 4-flag / 5-output if/elsif state
|
|
5
|
+
# machine that lived in `Server#dispatch` and `Server#inline_h1_dispatch?`.
|
|
6
|
+
# Pre-1.7 the matrix (`@tls`, `@async_io ∈ {nil, true, false}`,
|
|
7
|
+
# `@thread_count`, ALPN) was prose-only — no enum, no boot validation,
|
|
8
|
+
# operators verified the shape by reading the 1.4.0 changelog.
|
|
9
|
+
#
|
|
10
|
+
# 1.7 ships the value object internally + plumbs it at the dispatch call
|
|
11
|
+
# sites. Operators don't see it — they read modes via per-mode counters
|
|
12
|
+
# (`Hyperion.stats[:requests_dispatch_<mode>]`). Two reasons it is NOT
|
|
13
|
+
# public surface:
|
|
14
|
+
#
|
|
15
|
+
# 1. The `name` set is small but expected to grow as we add transports
|
|
16
|
+
# (HTTP/3, h2c upgrade); locking it now would force a major-bump
|
|
17
|
+
# churn for every new mode.
|
|
18
|
+
# 2. Operators using stats keys instead of reflection are insulated
|
|
19
|
+
# from rename refactors — `:requests_dispatch_threadpool` keeps
|
|
20
|
+
# working even if the internal symbol changes.
|
|
21
|
+
#
|
|
22
|
+
# Frozen after construction so a caller can't mutate the mode out from
|
|
23
|
+
# under a hot-path branch. Equality + hash are by `name` so it slots
|
|
24
|
+
# cleanly into per-mode metric keys without surprising identity checks.
|
|
25
|
+
class DispatchMode
|
|
26
|
+
# The 6 dispatch shapes Hyperion currently honours. Names mirror the
|
|
27
|
+
# RFC's wording so readers can map between the two without translation:
|
|
28
|
+
# :tls_h2 — TLS connection that ALPN-picked HTTP/2
|
|
29
|
+
# :tls_h1_inline — TLS HTTP/1.1, served inline on accept fiber
|
|
30
|
+
# (1.4.0+ default; preserves Async scheduler
|
|
31
|
+
# for hyperion-async-pg / async-redis)
|
|
32
|
+
# :async_io_h1_inline — Plain HTTP/1.1 with `async_io: true`,
|
|
33
|
+
# served inline on the calling fiber
|
|
34
|
+
# :threadpool_h1 — Plain or TLS HTTP/1.1 dispatched to the
|
|
35
|
+
# worker thread pool (`-t N`, default)
|
|
36
|
+
# :inline_h1_no_pool — Plain HTTP/1.1, no pool (`-t 0`); served
|
|
37
|
+
# inline on the accept thread/fiber
|
|
38
|
+
# :inline_blocking — 2.6-C: Puma-style serial-per-thread response
|
|
39
|
+
# write for static-file routes. Connection's
|
|
40
|
+
# connection-wide mode (typically threadpool)
|
|
41
|
+
# stays unchanged; `:inline_blocking` is opt-
|
|
42
|
+
# in PER RESPONSE for bodies that respond to
|
|
43
|
+
# `:to_path`. No fiber yield, no per-chunk
|
|
44
|
+
# EAGAIN dance — the OS thread parks on the
|
|
45
|
+
# kernel write under the GVL. Operator-level
|
|
46
|
+
# escape hatch via `env['hyperion.dispatch_mode']
|
|
47
|
+
# = :inline_blocking` for routes the auto-
|
|
48
|
+
# detect doesn't catch.
|
|
49
|
+
MODES = %i[tls_h2 tls_h1_inline async_io_h1_inline threadpool_h1 inline_h1_no_pool
|
|
50
|
+
inline_blocking].freeze
|
|
51
|
+
|
|
52
|
+
INLINE_MODES = %i[tls_h1_inline async_io_h1_inline inline_h1_no_pool inline_blocking].freeze
|
|
53
|
+
|
|
54
|
+
attr_reader :name
|
|
55
|
+
|
|
56
|
+
# Resolve the mode for a single dispatch from the four signals that
|
|
57
|
+
# drive the matrix. ALPN is only relevant when TLS is in play; the
|
|
58
|
+
# caller passes nil for plain HTTP. `thread_count` is a positive
|
|
59
|
+
# integer (pool present) or 0 (no pool, dispatch inline).
|
|
60
|
+
#
|
|
61
|
+
# Semantics intentionally mirror the pre-1.7 if/elsif chain in
|
|
62
|
+
# `Server#dispatch` so the refactor is behaviour-preserving.
|
|
63
|
+
def self.resolve(tls:, async_io:, thread_count:, alpn: nil)
|
|
64
|
+
return new(:tls_h2) if tls && alpn == 'h2'
|
|
65
|
+
return new(:tls_h1_inline) if tls && async_io != false
|
|
66
|
+
return new(:async_io_h1_inline) if !tls && async_io == true
|
|
67
|
+
return new(:threadpool_h1) if thread_count.to_i.positive?
|
|
68
|
+
|
|
69
|
+
new(:inline_h1_no_pool)
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
def initialize(name)
|
|
73
|
+
raise ArgumentError, "unknown DispatchMode #{name.inspect}" unless MODES.include?(name)
|
|
74
|
+
|
|
75
|
+
@name = name
|
|
76
|
+
freeze
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
# Inline-on-fiber dispatch (no thread-pool hop). Three shapes qualify:
|
|
80
|
+
# tls_h1_inline (default for TLS h1), async_io_h1_inline (operator
|
|
81
|
+
# opted into fiber I/O on plain h1), inline_h1_no_pool (`-t 0`).
|
|
82
|
+
def inline?
|
|
83
|
+
INLINE_MODES.include?(@name)
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def threadpool?
|
|
87
|
+
@name == :threadpool_h1
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def h2?
|
|
91
|
+
@name == :tls_h2
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
# 2.6-C — Puma-style serial-per-thread response write for static-
|
|
95
|
+
# file routes. Per-response opt-in (NOT a connection-wide mode);
|
|
96
|
+
# the underlying connection still dispatches via its configured
|
|
97
|
+
# mode (`:async_io_h1_inline`, `:threadpool_h1`, `:tls_h1_inline`,
|
|
98
|
+
# `:inline_h1_no_pool`). When this mode engages, the response-
|
|
99
|
+
# write path uses `Sendfile.copy_to_socket_blocking` instead of
|
|
100
|
+
# the fiber-yielding `copy_to_socket` — the OS thread parks on
|
|
101
|
+
# the kernel write under the GVL, no per-chunk EAGAIN-yield
|
|
102
|
+
# round-trip.
|
|
103
|
+
def inline_blocking?
|
|
104
|
+
@name == :inline_blocking
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
# 2.6-C — whether the response is dispatched on a fiber that may
|
|
108
|
+
# yield cooperatively to the scheduler. False for `:inline_blocking`
|
|
109
|
+
# (the whole point — block the OS thread on write rather than yield
|
|
110
|
+
# the fiber) and for `:threadpool_h1` / `:inline_h1_no_pool` (no
|
|
111
|
+
# scheduler in scope). True for the three async-scheduler shapes
|
|
112
|
+
# (`:tls_h1_inline`, `:async_io_h1_inline`, `:tls_h2`).
|
|
113
|
+
def fiber_dispatched?
|
|
114
|
+
@name == :tls_h2 || @name == :tls_h1_inline || @name == :async_io_h1_inline
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
# Whether dispatch yields cooperatively (Async scheduler current on
|
|
118
|
+
# the calling fiber). True for TLS h1 inline (TLS already wraps the
|
|
119
|
+
# accept loop in Async), async_io_h1_inline (operator opted in), and
|
|
120
|
+
# h2 (per-stream fibers). False for threadpool dispatch (worker
|
|
121
|
+
# thread, no scheduler), `-t 0` plain HTTP, and `:inline_blocking`
|
|
122
|
+
# (per-response opt-in that explicitly disables fiber yield on the
|
|
123
|
+
# response-write path).
|
|
124
|
+
def async?
|
|
125
|
+
@name == :tls_h2 || @name == :tls_h1_inline || @name == :async_io_h1_inline
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
# Whether the dispatch goes through `ThreadPool#submit_connection`
|
|
129
|
+
# (or `ThreadPool#call` on the h2 per-stream path).
|
|
130
|
+
def pooled?
|
|
131
|
+
@name == :threadpool_h1
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
# Per-mode metric key. Stable across releases — operators alert on
|
|
135
|
+
# `:requests_dispatch_threadpool` etc. directly. The full set is
|
|
136
|
+
# documented in the README's Metrics section.
|
|
137
|
+
def metric_key
|
|
138
|
+
:"requests_dispatch_#{@name}"
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
def ==(other)
|
|
142
|
+
other.is_a?(DispatchMode) && other.name == @name
|
|
143
|
+
end
|
|
144
|
+
alias eql? ==
|
|
145
|
+
|
|
146
|
+
# Symbol#hash — DispatchMode is a value object keyed on `name`, so
|
|
147
|
+
# rehashing under the underlying symbol gives correct Hash bucket
|
|
148
|
+
# placement without allocating.
|
|
149
|
+
def hash
|
|
150
|
+
n = @name
|
|
151
|
+
n.hash
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
# -- this gem has no ActiveSupport on
|
|
155
|
+
# its dependency graph; `delegate` is unavailable. Plain method.
|
|
156
|
+
def to_s
|
|
157
|
+
n = @name
|
|
158
|
+
n.to_s
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
def inspect
|
|
162
|
+
"#<Hyperion::DispatchMode #{@name}>"
|
|
163
|
+
end
|
|
164
|
+
end
|
|
165
|
+
end
|
data/lib/hyperion/fiber_local.rb
CHANGED
|
@@ -25,11 +25,38 @@ module Hyperion
|
|
|
25
25
|
# current Ruby actually isolates `Thread.current[:k]` per-fiber. Raises if
|
|
26
26
|
# not (which would only happen on Ruby < 3.2).
|
|
27
27
|
#
|
|
28
|
-
# `Hyperion::FiberLocal.install
|
|
29
|
-
# `thread_variable_get`/`thread_variable_set` to fiber storage. Use
|
|
30
|
-
# if
|
|
31
|
-
#
|
|
28
|
+
# `Hyperion::FiberLocal.install!(async_io:)` — opt-in monkey-patch that
|
|
29
|
+
# routes `thread_variable_get`/`thread_variable_set` to fiber storage. Use
|
|
30
|
+
# only if your app uses `thread_variable_set` for request scope under
|
|
31
|
+
# fiber-per-request concurrency.
|
|
32
|
+
#
|
|
33
|
+
# ## 1.4.x compat — the regression this gates against
|
|
34
|
+
#
|
|
35
|
+
# 1.4.x fixed a bug where Hyperion's own Logger access buffer + Metrics
|
|
36
|
+
# counters were stranded under `Async::Scheduler` because they were stored
|
|
37
|
+
# on `Thread.current[:k]` (which is fiber-local in Ruby 3.2+). The fix
|
|
38
|
+
# switched those to `Thread#thread_variable_*`, which is the only TRUE
|
|
39
|
+
# thread-local storage in CRuby (commits f987462 + e8db450). A blanket
|
|
40
|
+
# FiberLocal monkey-patch would re-route those calls to fiber storage and
|
|
41
|
+
# restage the exact bug 1.4.x fixed. To stay compatible:
|
|
42
|
+
#
|
|
43
|
+
# 1. When `async_io` is OFF (the default — single-thread or thread-pool
|
|
44
|
+
# mode, no scheduler in play), `install!` is a no-op. The shim has no
|
|
45
|
+
# purpose without fibers, and patching only risks re-introducing the
|
|
46
|
+
# 1.4.x stranded-counter bug if a thread pool ever runs job N and
|
|
47
|
+
# job N+1 in distinct fibers on the same OS thread.
|
|
48
|
+
# 2. When `async_io` is ON, the patched `thread_variable_*` reserves the
|
|
49
|
+
# `__hyperion_*` symbol keys for true thread-local storage so Hyperion's
|
|
50
|
+
# Logger/Metrics keep aggregating correctly. Everything else routes to
|
|
51
|
+
# `Fiber.current.storage` for fiber-per-request isolation.
|
|
32
52
|
module FiberLocal
|
|
53
|
+
# Symbol keys with this prefix bypass the fiber-storage routing and use
|
|
54
|
+
# the original `thread_variable_*` semantics. Hyperion's internal
|
|
55
|
+
# Logger access buffer + ts-cache and Metrics counters all live behind
|
|
56
|
+
# this prefix and rely on TRUE thread-local storage to survive fiber
|
|
57
|
+
# scheduling on the same OS thread (1.4.x guarantee).
|
|
58
|
+
HYPERION_KEY_PREFIX = '__hyperion_'
|
|
59
|
+
|
|
33
60
|
@installed = false
|
|
34
61
|
|
|
35
62
|
class << self
|
|
@@ -59,27 +86,62 @@ module Hyperion
|
|
|
59
86
|
end
|
|
60
87
|
|
|
61
88
|
# Opt-in patch that routes thread_variable_get/set to fiber storage.
|
|
62
|
-
#
|
|
63
|
-
#
|
|
64
|
-
#
|
|
65
|
-
|
|
89
|
+
#
|
|
90
|
+
# `async_io:` MUST be true to install the shim. With async_io off there
|
|
91
|
+
# are no fibers in flight and patching only risks the 1.4.x regression
|
|
92
|
+
# (stranded Logger/Metrics counters when a thread pool runs successive
|
|
93
|
+
# jobs in different fibers). When async_io is off we log a warning and
|
|
94
|
+
# leave thread_variable_* on its original (truly thread-local) path.
|
|
95
|
+
#
|
|
96
|
+
# Even with the shim installed, `__hyperion_*` symbol keys still route
|
|
97
|
+
# to the original thread_variable_* — Hyperion's own Logger and Metrics
|
|
98
|
+
# depend on true thread-local storage and must not be redirected to
|
|
99
|
+
# fiber storage. See the module docstring for the full rationale.
|
|
100
|
+
def install!(async_io: false)
|
|
66
101
|
return if @installed
|
|
67
102
|
|
|
103
|
+
unless async_io
|
|
104
|
+
# 1.4.x compat: with no fibers in play the shim has no purpose,
|
|
105
|
+
# and patching `thread_variable_*` to fiber storage would
|
|
106
|
+
# re-introduce the bug 1.4.x fixed (Logger/Metrics counters
|
|
107
|
+
# stranded across thread-pool jobs that happen to run in distinct
|
|
108
|
+
# fibers on the same OS thread). Make this a no-op and tell the
|
|
109
|
+
# operator we ignored their flag.
|
|
110
|
+
Hyperion.logger.warn do
|
|
111
|
+
{ message: 'FiberLocal.install! ignored — async_io is off',
|
|
112
|
+
hint: 'The shim only matters under fiber-per-request concurrency. ' \
|
|
113
|
+
'Enable async_io: true (or pass --async-io) to opt in.' }
|
|
114
|
+
end
|
|
115
|
+
return
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
prefix = HYPERION_KEY_PREFIX
|
|
119
|
+
|
|
68
120
|
::Thread.class_eval do
|
|
69
121
|
alias_method :__hyperion_orig_tvar_get, :thread_variable_get
|
|
70
122
|
alias_method :__hyperion_orig_tvar_set, :thread_variable_set
|
|
71
123
|
|
|
72
124
|
define_method(:thread_variable_get) do |key|
|
|
73
125
|
sym = key.to_sym
|
|
74
|
-
|
|
75
|
-
|
|
126
|
+
# Hyperion-internal keys always use TRUE thread-local storage
|
|
127
|
+
# to preserve the 1.4.x guarantee for Logger/Metrics.
|
|
128
|
+
return __hyperion_orig_tvar_get(sym) if sym.to_s.start_with?(prefix)
|
|
76
129
|
|
|
77
|
-
|
|
130
|
+
# Fiber#storage returns a COPY, so the canonical fiber-local
|
|
131
|
+
# access path is `Fiber[]` — it reads through to the underlying
|
|
132
|
+
# storage and falls back to inherited storage on parent fibers.
|
|
133
|
+
::Fiber[sym]
|
|
78
134
|
end
|
|
79
135
|
|
|
80
136
|
define_method(:thread_variable_set) do |key, value|
|
|
81
|
-
|
|
82
|
-
|
|
137
|
+
sym = key.to_sym
|
|
138
|
+
# Hyperion-internal keys always use TRUE thread-local storage
|
|
139
|
+
# to preserve the 1.4.x guarantee for Logger/Metrics.
|
|
140
|
+
return __hyperion_orig_tvar_set(sym, value) if sym.to_s.start_with?(prefix)
|
|
141
|
+
|
|
142
|
+
# Use `Fiber[]=` (not `Fiber.current.storage[k] = v`) — the
|
|
143
|
+
# latter mutates a copy and does not persist across reads.
|
|
144
|
+
::Fiber[sym] = value
|
|
83
145
|
end
|
|
84
146
|
end
|
|
85
147
|
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Hyperion
|
|
4
|
+
# Process-wide HTTP/2 stream admission control (RFC A7).
|
|
5
|
+
#
|
|
6
|
+
# **Problem.** `h2_max_concurrent_streams` (default 128) caps streams
|
|
7
|
+
# *per connection*. An abuser can open 5,000 connections × 128 streams
|
|
8
|
+
# = 640k fibers → OOM → master respawns → abuser reconnects. The 1.6.0
|
|
9
|
+
# backpressure cap on bytes-in-queue is 16 MiB *per connection*, so it
|
|
10
|
+
# doesn't bound aggregate fiber count either. Real DoS vector,
|
|
11
|
+
# currently no built-in defence.
|
|
12
|
+
#
|
|
13
|
+
# **Shape.** A single per-process atomic counter shared across all
|
|
14
|
+
# `Http2Handler` instances within a worker. Each new stream calls
|
|
15
|
+
# `#admit` before invoking the app; the call returns true when the
|
|
16
|
+
# slot was reserved and false when the cap is hit. False → caller
|
|
17
|
+
# sends `RST_STREAM REFUSED_STREAM` (RFC 7540 §11 / RFC 9113 §5.4.1).
|
|
18
|
+
# Slot is freed by `#release` from the dispatch ensure block.
|
|
19
|
+
#
|
|
20
|
+
# **Default.** `max_total_streams: nil` — admission disabled, every
|
|
21
|
+
# `#admit` returns true. `Server` only constructs an `H2Admission`
|
|
22
|
+
# when the operator passes a positive cap. The 1.7.0 default is `nil`;
|
|
23
|
+
# 2.0 flips to `h2_max_concurrent_streams × workers × 4` (RFC §3
|
|
24
|
+
# 1.x-vs-2.0 split).
|
|
25
|
+
#
|
|
26
|
+
# **Concurrency.** Mutex hold time is "increment + compare", in the
|
|
27
|
+
# tens of nanoseconds. The mutex is contention-bounded by the actual
|
|
28
|
+
# rate of new stream admits, which is much lower than dispatch rate
|
|
29
|
+
# (one mutex acquire per stream, not per frame). On the abuser's
|
|
30
|
+
# path this is also where they hit the wall — by design.
|
|
31
|
+
class H2Admission
|
|
32
|
+
attr_reader :max
|
|
33
|
+
|
|
34
|
+
def initialize(max_total_streams:)
|
|
35
|
+
@max = max_total_streams
|
|
36
|
+
@count = 0
|
|
37
|
+
@rejected = 0
|
|
38
|
+
@mutex = Mutex.new
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
# Try to acquire one stream slot. Returns true when admitted, false
|
|
42
|
+
# when the cap is hit. nil cap (admission disabled) returns true
|
|
43
|
+
# without taking the mutex — keeps the hot path branchless when
|
|
44
|
+
# admission is off.
|
|
45
|
+
def admit
|
|
46
|
+
return true if @max.nil?
|
|
47
|
+
|
|
48
|
+
@mutex.synchronize do
|
|
49
|
+
if @count >= @max
|
|
50
|
+
@rejected += 1
|
|
51
|
+
false
|
|
52
|
+
else
|
|
53
|
+
@count += 1
|
|
54
|
+
true
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Release a previously-admitted slot. Idempotent: if the count is
|
|
60
|
+
# already zero (paranoia: double-release on a programming bug) this
|
|
61
|
+
# is a no-op. nil cap is a no-op (admission disabled).
|
|
62
|
+
def release
|
|
63
|
+
return if @max.nil?
|
|
64
|
+
|
|
65
|
+
@mutex.synchronize { @count -= 1 if @count.positive? }
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
# Snapshot the admission state. `in_flight` = streams currently
|
|
69
|
+
# holding a slot, `rejected` = cumulative count of REFUSED_STREAM
|
|
70
|
+
# events served by this gate, `max` = configured cap. Used by
|
|
71
|
+
# operator dashboards via `Hyperion.stats[:h2_admission_*]` keys
|
|
72
|
+
# (the stats publisher pulls these out and surfaces them).
|
|
73
|
+
def stats
|
|
74
|
+
@mutex.synchronize { { in_flight: @count, rejected: @rejected, max: @max } }
|
|
75
|
+
end
|
|
76
|
+
end
|
|
77
|
+
end
|