hyperion-rb 1.0.0.rc17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +133 -0
- data/LICENSE +21 -0
- data/README.md +260 -0
- data/bin/hyperion +6 -0
- data/ext/hyperion_http/extconf.rb +19 -0
- data/ext/hyperion_http/llhttp/api.c +509 -0
- data/ext/hyperion_http/llhttp/http.c +170 -0
- data/ext/hyperion_http/llhttp/llhttp.c +10103 -0
- data/ext/hyperion_http/llhttp/llhttp.h +907 -0
- data/ext/hyperion_http/parser.c +428 -0
- data/lib/hyperion/adapter/rack.rb +143 -0
- data/lib/hyperion/c_parser.rb +19 -0
- data/lib/hyperion/cli.rb +151 -0
- data/lib/hyperion/config.rb +107 -0
- data/lib/hyperion/connection.rb +338 -0
- data/lib/hyperion/fiber_local.rb +104 -0
- data/lib/hyperion/http2_handler.rb +312 -0
- data/lib/hyperion/logger.rb +269 -0
- data/lib/hyperion/master.rb +221 -0
- data/lib/hyperion/metrics.rb +68 -0
- data/lib/hyperion/parser.rb +128 -0
- data/lib/hyperion/pool.rb +34 -0
- data/lib/hyperion/request.rb +25 -0
- data/lib/hyperion/response_writer.rb +98 -0
- data/lib/hyperion/server.rb +198 -0
- data/lib/hyperion/thread_pool.rb +116 -0
- data/lib/hyperion/tls.rb +29 -0
- data/lib/hyperion/version.rb +5 -0
- data/lib/hyperion/worker.rb +91 -0
- data/lib/hyperion.rb +82 -0
- metadata +193 -0
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'etc'
|
|
4
|
+
require 'rbconfig'
|
|
5
|
+
require 'socket'
|
|
6
|
+
require 'openssl'
|
|
7
|
+
|
|
8
|
+
module Hyperion
|
|
9
|
+
# Pre-fork master process. Owns the supervision loop. Each worker is a
|
|
10
|
+
# full fiber-scheduler `Hyperion::Server` running its own accept loop.
|
|
11
|
+
#
|
|
12
|
+
# rc15 — per-OS worker model. There are two ways to give N children a
|
|
13
|
+
# listening socket on the same port:
|
|
14
|
+
#
|
|
15
|
+
# 1. `:reuseport` (Linux): each worker binds its OWN socket with
|
|
16
|
+
# SO_REUSEPORT. The kernel hashes incoming connections across the
|
|
17
|
+
# sibling sockets — no thundering herd, no shared accept lock,
|
|
18
|
+
# linear scaling with worker count. The master never binds.
|
|
19
|
+
#
|
|
20
|
+
# 2. `:share` (macOS / BSD / everything else): the master binds a
|
|
21
|
+
# single TCPServer (or SSLServer) BEFORE fork. Children inherit the
|
|
22
|
+
# fd via fork(2) and race on `accept(2)` — whichever child wins gets
|
|
23
|
+
# the connection. This is Puma's model. We use it on Darwin because
|
|
24
|
+
# Darwin's SO_REUSEPORT distributor hashes unevenly: at `-w 4`
|
|
25
|
+
# against a real Rails app a single curl probe cannot get answered
|
|
26
|
+
# inside 120s in the worst case, because the kernel keeps routing
|
|
27
|
+
# to a worker whose accept queue is already full.
|
|
28
|
+
#
|
|
29
|
+
# Detection: `RbConfig::CONFIG['host_os']` matching `linux` picks
|
|
30
|
+
# `:reuseport`; everything else picks `:share`. Operators can pin the
|
|
31
|
+
# mode explicitly with `HYPERION_WORKER_MODEL=share|reuseport` (used by
|
|
32
|
+
# the test suite to exercise both paths on a single host).
|
|
33
|
+
class Master
|
|
34
|
+
DEFAULT_WORKER_COUNT = nil # nil → Etc.nprocessors
|
|
35
|
+
GRACEFUL_TIMEOUT_SECONDS = 30
|
|
36
|
+
|
|
37
|
+
WORKER_MODELS = %i[reuseport share].freeze
|
|
38
|
+
|
|
39
|
+
def self.detect_worker_model
|
|
40
|
+
override = ENV['HYPERION_WORKER_MODEL']&.to_sym
|
|
41
|
+
return override if WORKER_MODELS.include?(override)
|
|
42
|
+
|
|
43
|
+
host_os = RbConfig::CONFIG['host_os'].to_s
|
|
44
|
+
case host_os
|
|
45
|
+
when /linux/ then :reuseport
|
|
46
|
+
else :share # macOS, BSD, anything else: shared-FD model (Puma-style)
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def initialize(host:, port:, app:, workers: DEFAULT_WORKER_COUNT,
|
|
51
|
+
read_timeout: Server::DEFAULT_READ_TIMEOUT_SECONDS, tls: nil,
|
|
52
|
+
thread_count: Server::DEFAULT_THREAD_COUNT, config: nil)
|
|
53
|
+
@host = host
|
|
54
|
+
@port = port
|
|
55
|
+
@app = app
|
|
56
|
+
@workers = workers || Etc.nprocessors
|
|
57
|
+
@read_timeout = read_timeout
|
|
58
|
+
@tls = tls
|
|
59
|
+
@thread_count = thread_count
|
|
60
|
+
@config = config || Hyperion::Config.new
|
|
61
|
+
@graceful_timeout = @config.graceful_timeout || GRACEFUL_TIMEOUT_SECONDS
|
|
62
|
+
@children = {} # pid => worker_index
|
|
63
|
+
@next_index = 0
|
|
64
|
+
@stopping = false
|
|
65
|
+
@worker_model = self.class.detect_worker_model
|
|
66
|
+
@listener = nil # populated only in :share mode
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def run
|
|
70
|
+
install_signal_handlers
|
|
71
|
+
bind_master_listener if @worker_model == :share
|
|
72
|
+
Hyperion.logger.info do
|
|
73
|
+
{
|
|
74
|
+
message: 'master starting',
|
|
75
|
+
pid: Process.pid,
|
|
76
|
+
workers: @workers,
|
|
77
|
+
host: @host,
|
|
78
|
+
port: @port,
|
|
79
|
+
worker_model: @worker_model
|
|
80
|
+
}
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
# `before_fork` runs ONCE in the master before any worker is forked.
|
|
84
|
+
# Operators use it to close shared resources (DB pools, Redis sockets)
|
|
85
|
+
# so each child gets fresh connections rather than inheriting the
|
|
86
|
+
# parent's open fds. Mirrors Puma's hook of the same name.
|
|
87
|
+
@config.before_fork.each(&:call)
|
|
88
|
+
|
|
89
|
+
@workers.times { spawn_worker }
|
|
90
|
+
|
|
91
|
+
supervise
|
|
92
|
+
ensure
|
|
93
|
+
# The master keeps the listener open across its lifetime so it can
|
|
94
|
+
# respawn workers (the new fork inherits the same fd). It only gets
|
|
95
|
+
# closed here once the master itself is exiting.
|
|
96
|
+
@listener&.close
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
private
|
|
100
|
+
|
|
101
|
+
def install_signal_handlers
|
|
102
|
+
shutdown_r, shutdown_w = IO.pipe
|
|
103
|
+
%w[INT TERM].each do |sig|
|
|
104
|
+
Signal.trap(sig) do
|
|
105
|
+
shutdown_w.write_nonblock('!')
|
|
106
|
+
rescue StandardError
|
|
107
|
+
nil
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
@shutdown_pipe = shutdown_r
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
# Bind the listening socket in the master so children inherit the fd
|
|
114
|
+
# via fork. Only used in :share mode (macOS / BSD).
|
|
115
|
+
def bind_master_listener
|
|
116
|
+
tcp = ::TCPServer.new(@host, @port)
|
|
117
|
+
# Honour port: 0 (let kernel pick) — propagate the chosen port so
|
|
118
|
+
# log lines and worker args reflect reality.
|
|
119
|
+
@port = tcp.addr[1]
|
|
120
|
+
|
|
121
|
+
if @tls
|
|
122
|
+
ctx = TLS.context(cert: @tls[:cert], key: @tls[:key])
|
|
123
|
+
ssl_server = ::OpenSSL::SSL::SSLServer.new(tcp, ctx)
|
|
124
|
+
ssl_server.start_immediately = false
|
|
125
|
+
@listener = ssl_server
|
|
126
|
+
else
|
|
127
|
+
@listener = tcp
|
|
128
|
+
end
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
def spawn_worker
|
|
132
|
+
worker_index = @next_index
|
|
133
|
+
@next_index += 1
|
|
134
|
+
pid = fork do
|
|
135
|
+
# Inside the child: clean signal traps; the worker installs its own.
|
|
136
|
+
Signal.trap('INT', 'DEFAULT')
|
|
137
|
+
Signal.trap('TERM', 'DEFAULT')
|
|
138
|
+
worker_args = {
|
|
139
|
+
host: @host, port: @port, app: @app,
|
|
140
|
+
read_timeout: @read_timeout, tls: @tls,
|
|
141
|
+
thread_count: @thread_count, config: @config,
|
|
142
|
+
worker_index: worker_index
|
|
143
|
+
}
|
|
144
|
+
# Hand the inherited socket to the worker in :share mode. In
|
|
145
|
+
# :reuseport mode the worker binds its own with SO_REUSEPORT.
|
|
146
|
+
worker_args[:listener] = @listener if @worker_model == :share
|
|
147
|
+
Worker.new(**worker_args).run
|
|
148
|
+
end
|
|
149
|
+
@children[pid] = worker_index
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
def supervise
|
|
153
|
+
until @stopping
|
|
154
|
+
# Block on the shutdown pipe + reap dead children.
|
|
155
|
+
ready, = IO.select([@shutdown_pipe], nil, nil, 1.0)
|
|
156
|
+
|
|
157
|
+
if ready
|
|
158
|
+
begin
|
|
159
|
+
@shutdown_pipe.read_nonblock(64)
|
|
160
|
+
rescue StandardError
|
|
161
|
+
nil
|
|
162
|
+
end
|
|
163
|
+
@stopping = true
|
|
164
|
+
break
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
reap_and_respawn
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
shutdown_children
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
def reap_and_respawn
|
|
174
|
+
while (result = Process.waitpid2(-1, Process::WNOHANG))
|
|
175
|
+
pid, _status = result
|
|
176
|
+
next unless @children.key?(pid)
|
|
177
|
+
|
|
178
|
+
Hyperion.logger.warn { { message: 'worker died, respawning', worker_pid: pid } }
|
|
179
|
+
@children.delete(pid)
|
|
180
|
+
spawn_worker unless @stopping
|
|
181
|
+
end
|
|
182
|
+
rescue Errno::ECHILD
|
|
183
|
+
# No children — happens during shutdown.
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
def shutdown_children
|
|
187
|
+
Hyperion.logger.info do
|
|
188
|
+
{ message: 'master draining', graceful_timeout: @graceful_timeout }
|
|
189
|
+
end
|
|
190
|
+
@children.each_key do |pid|
|
|
191
|
+
Process.kill('TERM', pid)
|
|
192
|
+
rescue StandardError
|
|
193
|
+
nil
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
deadline = Time.now + @graceful_timeout
|
|
197
|
+
until @children.empty? || Time.now > deadline
|
|
198
|
+
begin
|
|
199
|
+
pid, _status = Process.waitpid2(-1, Process::WNOHANG)
|
|
200
|
+
if pid
|
|
201
|
+
@children.delete(pid)
|
|
202
|
+
else
|
|
203
|
+
sleep 0.1
|
|
204
|
+
end
|
|
205
|
+
rescue Errno::ECHILD
|
|
206
|
+
break
|
|
207
|
+
end
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
# Force-kill stragglers.
|
|
211
|
+
@children.each_key do |pid|
|
|
212
|
+
Process.kill('KILL', pid)
|
|
213
|
+
rescue StandardError
|
|
214
|
+
nil
|
|
215
|
+
end
|
|
216
|
+
@children.clear
|
|
217
|
+
|
|
218
|
+
Hyperion.logger.info { { message: 'master exiting' } }
|
|
219
|
+
end
|
|
220
|
+
end
|
|
221
|
+
end
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Hyperion
|
|
4
|
+
# Lock-free per-thread counters. Each worker thread mutates its own Hash
|
|
5
|
+
# on the hot path — no mutex acquire/release on every increment, no
|
|
6
|
+
# contention across the thread pool. `snapshot` aggregates lazily across
|
|
7
|
+
# all threads that have ever incremented (one short mutex section, only
|
|
8
|
+
# taken when the operator asks for stats).
|
|
9
|
+
#
|
|
10
|
+
# Reset semantics: counters monotonically increase. Operators that want
|
|
11
|
+
# rate-of-change should snapshot, sleep, snapshot, diff.
|
|
12
|
+
#
|
|
13
|
+
# Public API:
|
|
14
|
+
# Hyperion.stats -> Hash with all current values across all threads.
|
|
15
|
+
class Metrics
|
|
16
|
+
def initialize
|
|
17
|
+
@threads = Set.new
|
|
18
|
+
@threads_mutex = Mutex.new
|
|
19
|
+
# Each Metrics instance has its own thread-local key so spec runs that
|
|
20
|
+
# build fresh Metrics objects don't share state across examples.
|
|
21
|
+
@thread_key = :"__hyperion_metrics_#{object_id}__"
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
# Hot path: one TLS lookup + one hash op. No mutex.
|
|
25
|
+
def increment(key, by = 1)
|
|
26
|
+
counters = Thread.current[@thread_key] ||= register_thread_counters
|
|
27
|
+
counters[key] += by
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def decrement(key, by = 1)
|
|
31
|
+
increment(key, -by)
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def increment_status(code)
|
|
35
|
+
increment(:"responses_#{code}")
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def snapshot
|
|
39
|
+
result = Hash.new(0)
|
|
40
|
+
@threads_mutex.synchronize do
|
|
41
|
+
@threads.delete_if { |t| !t.alive? }
|
|
42
|
+
@threads.each do |t|
|
|
43
|
+
counters = t[@thread_key]
|
|
44
|
+
next unless counters
|
|
45
|
+
|
|
46
|
+
counters.each { |k, v| result[k] += v }
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
result.default = nil
|
|
50
|
+
result
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
# Tests can call .reset! between examples to avoid cross-spec leakage.
|
|
54
|
+
def reset!
|
|
55
|
+
@threads_mutex.synchronize do
|
|
56
|
+
@threads.each { |t| t[@thread_key]&.clear }
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
private
|
|
61
|
+
|
|
62
|
+
def register_thread_counters
|
|
63
|
+
counters = Hash.new(0)
|
|
64
|
+
@threads_mutex.synchronize { @threads << Thread.current }
|
|
65
|
+
counters
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
end
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Hyperion
|
|
4
|
+
# Pure-Ruby HTTP/1.1 parser.
|
|
5
|
+
# Phase 4 replaces this with a C extension wrapping llhttp; the interface
|
|
6
|
+
# (parse(buffer) -> [Request, end_offset] | raise ParseError | raise UnsupportedError)
|
|
7
|
+
# stays stable.
|
|
8
|
+
class Parser
|
|
9
|
+
REQUEST_LINE_RE = %r{\A([A-Z]+) ([^ ?]+)(?:\?([^ ]*))? (HTTP/\d\.\d)\r\n}
|
|
10
|
+
HEADER_RE = /\G([!-9;-~]+):[ \t]*(.*?)[ \t]*\r\n/
|
|
11
|
+
|
|
12
|
+
# Returns [Request, end_offset] where end_offset is the byte index just AFTER
|
|
13
|
+
# the last byte consumed by parsing. The caller (Connection) uses end_offset
|
|
14
|
+
# to compute carry-over for pipelining.
|
|
15
|
+
def parse(buffer)
|
|
16
|
+
m = REQUEST_LINE_RE.match(buffer)
|
|
17
|
+
raise ParseError, 'invalid request line' unless m
|
|
18
|
+
|
|
19
|
+
method, path, query, version = m.captures
|
|
20
|
+
offset = m.end(0)
|
|
21
|
+
|
|
22
|
+
headers = {}
|
|
23
|
+
loop do
|
|
24
|
+
if buffer.byteslice(offset, 2) == "\r\n"
|
|
25
|
+
offset += 2
|
|
26
|
+
break
|
|
27
|
+
end
|
|
28
|
+
h = HEADER_RE.match(buffer, offset)
|
|
29
|
+
raise ParseError, 'invalid header line' unless h && h.begin(0) == offset
|
|
30
|
+
|
|
31
|
+
headers[h[1].downcase] = h[2]
|
|
32
|
+
offset = h.end(0)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
headers_end = offset
|
|
36
|
+
|
|
37
|
+
has_content_length = headers.key?('content-length')
|
|
38
|
+
has_transfer_encoding = headers.key?('transfer-encoding')
|
|
39
|
+
|
|
40
|
+
# RFC 9112 §6.1: a sender MUST NOT send a message containing both
|
|
41
|
+
# Content-Length and Transfer-Encoding. Refuse rather than risk
|
|
42
|
+
# request smuggling.
|
|
43
|
+
if has_content_length && has_transfer_encoding
|
|
44
|
+
raise ParseError, 'both Content-Length and Transfer-Encoding present'
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
if has_transfer_encoding
|
|
48
|
+
encodings = headers['transfer-encoding'].split(',').map { |e| e.strip.downcase }
|
|
49
|
+
unless encodings.last == 'chunked'
|
|
50
|
+
raise UnsupportedError,
|
|
51
|
+
"Transfer-Encoding #{headers['transfer-encoding'].inspect} not supported"
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
result = dechunk(buffer, headers_end)
|
|
55
|
+
raise ParseError, 'truncated chunked body' if result.nil?
|
|
56
|
+
|
|
57
|
+
body, end_offset = result
|
|
58
|
+
request = Request.new(
|
|
59
|
+
method: method,
|
|
60
|
+
path: path,
|
|
61
|
+
query_string: query || '',
|
|
62
|
+
http_version: version,
|
|
63
|
+
headers: headers,
|
|
64
|
+
body: body
|
|
65
|
+
)
|
|
66
|
+
return [request, end_offset]
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
content_length = headers['content-length']&.to_i || 0
|
|
70
|
+
body = buffer.byteslice(headers_end, content_length) || ''
|
|
71
|
+
raise ParseError, "content-length mismatch (declared #{content_length}, got #{body.bytesize})" \
|
|
72
|
+
if body.bytesize != content_length
|
|
73
|
+
|
|
74
|
+
end_offset = headers_end + content_length
|
|
75
|
+
request = Request.new(
|
|
76
|
+
method: method,
|
|
77
|
+
path: path,
|
|
78
|
+
query_string: query || '',
|
|
79
|
+
http_version: version,
|
|
80
|
+
headers: headers,
|
|
81
|
+
body: body
|
|
82
|
+
)
|
|
83
|
+
[request, end_offset]
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
private
|
|
87
|
+
|
|
88
|
+
# Decode RFC 9112 §7.1 chunked body starting at `start` in `buffer`.
|
|
89
|
+
# Returns [body_bytes, end_offset] on success. Returns nil if buffer is
|
|
90
|
+
# truncated (caller treats as ParseError).
|
|
91
|
+
def dechunk(buffer, start)
|
|
92
|
+
body = +''
|
|
93
|
+
cursor = start
|
|
94
|
+
|
|
95
|
+
loop do
|
|
96
|
+
line_end = buffer.index("\r\n", cursor)
|
|
97
|
+
return nil unless line_end
|
|
98
|
+
|
|
99
|
+
size_line = buffer.byteslice(cursor, line_end - cursor)
|
|
100
|
+
size_token = size_line.split(';').first.to_s.strip
|
|
101
|
+
return nil if size_token.empty?
|
|
102
|
+
|
|
103
|
+
size = size_token.to_i(16)
|
|
104
|
+
cursor = line_end + 2
|
|
105
|
+
|
|
106
|
+
if size.zero?
|
|
107
|
+
# Skip optional trailer headers until blank line.
|
|
108
|
+
loop do
|
|
109
|
+
nl = buffer.index("\r\n", cursor)
|
|
110
|
+
return nil unless nl
|
|
111
|
+
return [body, cursor + 2] if nl == cursor
|
|
112
|
+
|
|
113
|
+
cursor = nl + 2
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
return nil if buffer.bytesize < cursor + size + 2
|
|
118
|
+
|
|
119
|
+
body << buffer.byteslice(cursor, size)
|
|
120
|
+
cursor += size
|
|
121
|
+
|
|
122
|
+
return nil unless buffer.byteslice(cursor, 2) == "\r\n"
|
|
123
|
+
|
|
124
|
+
cursor += 2
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
end
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Hyperion
|
|
4
|
+
# Single-thread object pool with a maximum size.
|
|
5
|
+
# Acquire returns an existing object (mutated for reuse) or constructs a new one.
|
|
6
|
+
# Release returns the object to the pool unless the pool is full.
|
|
7
|
+
#
|
|
8
|
+
# Not thread-safe. Each Hyperion worker process runs one fiber scheduler on
|
|
9
|
+
# one thread, so a per-process pool is contention-free.
|
|
10
|
+
class Pool
|
|
11
|
+
def initialize(max_size:, factory:, reset: nil)
|
|
12
|
+
@max_size = max_size
|
|
13
|
+
@factory = factory
|
|
14
|
+
@reset = reset
|
|
15
|
+
@free = []
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def acquire
|
|
19
|
+
obj = @free.pop || @factory.call
|
|
20
|
+
@reset&.call(obj)
|
|
21
|
+
obj
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def release(obj)
|
|
25
|
+
return if @free.size >= @max_size
|
|
26
|
+
|
|
27
|
+
@free.push(obj)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def size # rubocop:disable Rails/Delegate
|
|
31
|
+
@free.size
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Hyperion
|
|
4
|
+
# Immutable parsed-request value object.
|
|
5
|
+
# Phase 5 (object pooling) will redesign this with explicit reset semantics;
|
|
6
|
+
# for Phase 1 we freeze on construction to prevent accidental mutation.
|
|
7
|
+
class Request
|
|
8
|
+
attr_reader :method, :path, :query_string, :http_version, :headers, :body, :peer_address
|
|
9
|
+
|
|
10
|
+
def initialize(method:, path:, query_string:, http_version:, headers:, body:, peer_address: nil)
|
|
11
|
+
@method = method
|
|
12
|
+
@path = path
|
|
13
|
+
@query_string = query_string
|
|
14
|
+
@http_version = http_version
|
|
15
|
+
@headers = headers.freeze
|
|
16
|
+
@body = body
|
|
17
|
+
@peer_address = peer_address
|
|
18
|
+
freeze
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def header(name)
|
|
22
|
+
@headers[name.downcase]
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'time'
|
|
4
|
+
|
|
5
|
+
module Hyperion
|
|
6
|
+
# Serializes a Rack [status, headers, body] tuple to an HTTP/1.1 wire stream.
|
|
7
|
+
# Phase 5 replaces this with an io_buffer-batched writer; Phase 7 adds a
|
|
8
|
+
# sibling Http2ResponseWriter. Public surface (#write) stays stable.
|
|
9
|
+
class ResponseWriter
|
|
10
|
+
REASONS = {
|
|
11
|
+
200 => 'OK',
|
|
12
|
+
201 => 'Created',
|
|
13
|
+
204 => 'No Content',
|
|
14
|
+
301 => 'Moved Permanently',
|
|
15
|
+
302 => 'Found',
|
|
16
|
+
304 => 'Not Modified',
|
|
17
|
+
400 => 'Bad Request',
|
|
18
|
+
401 => 'Unauthorized',
|
|
19
|
+
403 => 'Forbidden',
|
|
20
|
+
404 => 'Not Found',
|
|
21
|
+
405 => 'Method Not Allowed',
|
|
22
|
+
408 => 'Request Timeout',
|
|
23
|
+
409 => 'Conflict',
|
|
24
|
+
410 => 'Gone',
|
|
25
|
+
413 => 'Payload Too Large',
|
|
26
|
+
414 => 'URI Too Long',
|
|
27
|
+
422 => 'Unprocessable Entity',
|
|
28
|
+
429 => 'Too Many Requests',
|
|
29
|
+
500 => 'Internal Server Error',
|
|
30
|
+
501 => 'Not Implemented',
|
|
31
|
+
502 => 'Bad Gateway',
|
|
32
|
+
503 => 'Service Unavailable',
|
|
33
|
+
504 => 'Gateway Timeout'
|
|
34
|
+
}.freeze
|
|
35
|
+
|
|
36
|
+
CRLF_HEADER_VALUE = /[\r\n]/
|
|
37
|
+
|
|
38
|
+
def write(io, status, headers, body, keep_alive: false)
|
|
39
|
+
# Phase 1 buffers the full body so Content-Length is exact.
|
|
40
|
+
# Phase 2 introduces chunked transfer-encoding for streaming bodies;
|
|
41
|
+
# Phase 5 batches via IO::Buffer to avoid this intermediate String.
|
|
42
|
+
buffered = +''
|
|
43
|
+
body.each { |chunk| buffered << chunk }
|
|
44
|
+
|
|
45
|
+
reason = REASONS[status] || 'Unknown'
|
|
46
|
+
date_str = Time.now.httpdate
|
|
47
|
+
|
|
48
|
+
head = build_head(status, reason, headers, buffered.bytesize, keep_alive, date_str)
|
|
49
|
+
|
|
50
|
+
# Phase 8 perf fix: coalesce status line + all headers + body into a
|
|
51
|
+
# SINGLE io.write call. Each syscall round-trip is ~1 usec on macOS
|
|
52
|
+
# kqueue; before this change we issued (1 status) + (N headers) + (1 blank)
|
|
53
|
+
# + (1 body) = 8+ syscalls per response. Now: 1 syscall.
|
|
54
|
+
if buffered.empty?
|
|
55
|
+
io.write(head)
|
|
56
|
+
else
|
|
57
|
+
# Concatenate into the head buffer (which is already a fresh +'' from
|
|
58
|
+
# the C builder or the Ruby fallback) so we still emit a single write.
|
|
59
|
+
head << buffered
|
|
60
|
+
io.write(head)
|
|
61
|
+
end
|
|
62
|
+
ensure
|
|
63
|
+
body.close if body.respond_to?(:close)
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
private
|
|
67
|
+
|
|
68
|
+
# rc17: prefer the C extension when available — eliminates the per-response
|
|
69
|
+
# status-line interpolation, normalized hash, and per-header String#<<
|
|
70
|
+
# allocations. Pure-Ruby fallback covers JRuby/TruffleRuby/build failures.
|
|
71
|
+
def build_head(status, reason, headers, body_size, keep_alive, date_str)
|
|
72
|
+
if defined?(::Hyperion::CParser) && ::Hyperion::CParser.respond_to?(:build_response_head)
|
|
73
|
+
::Hyperion::CParser.build_response_head(status, reason, headers, body_size, keep_alive, date_str)
|
|
74
|
+
else
|
|
75
|
+
build_head_ruby(status, reason, headers, body_size, keep_alive, date_str)
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
def build_head_ruby(status, reason, headers, body_size, keep_alive, date_str)
|
|
80
|
+
normalized = {}
|
|
81
|
+
headers.each { |k, v| normalized[k.to_s.downcase] = v }
|
|
82
|
+
normalized['content-length'] = body_size.to_s
|
|
83
|
+
# Keep-alive negotiated by Connection layer; ResponseWriter just emits it.
|
|
84
|
+
normalized['connection'] = keep_alive ? 'keep-alive' : 'close'
|
|
85
|
+
normalized['date'] ||= date_str
|
|
86
|
+
|
|
87
|
+
buf = +"HTTP/1.1 #{status} #{reason}\r\n"
|
|
88
|
+
normalized.each do |k, v|
|
|
89
|
+
value = v.to_s
|
|
90
|
+
raise ArgumentError, "header #{k.inspect} contains CR/LF" if value.match?(CRLF_HEADER_VALUE)
|
|
91
|
+
|
|
92
|
+
buf << k << ': ' << value << "\r\n"
|
|
93
|
+
end
|
|
94
|
+
buf << "\r\n"
|
|
95
|
+
buf
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|