quicsilver 0.1.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +41 -0
- data/.gitignore +3 -1
- data/CHANGELOG.md +76 -5
- data/Gemfile.lock +18 -4
- data/LICENSE +21 -0
- data/README.md +33 -53
- data/Rakefile +29 -2
- data/benchmarks/components.rb +191 -0
- data/benchmarks/concurrent.rb +110 -0
- data/benchmarks/helpers.rb +88 -0
- data/benchmarks/quicsilver_server.rb +46 -0
- data/benchmarks/rails.rb +170 -0
- data/benchmarks/throughput.rb +113 -0
- data/examples/minimal_http3_server.rb +0 -6
- data/examples/rack_http3_server.rb +0 -6
- data/examples/simple_client_test.rb +26 -0
- data/ext/quicsilver/quicsilver.c +615 -138
- data/lib/quicsilver/client/client.rb +250 -0
- data/lib/quicsilver/client/request.rb +98 -0
- data/lib/quicsilver/protocol/frames.rb +327 -0
- data/lib/quicsilver/protocol/qpack/decoder.rb +165 -0
- data/lib/quicsilver/protocol/qpack/encoder.rb +189 -0
- data/lib/quicsilver/protocol/qpack/header_block_decoder.rb +125 -0
- data/lib/quicsilver/protocol/qpack/huffman.rb +459 -0
- data/lib/quicsilver/protocol/request_encoder.rb +47 -0
- data/lib/quicsilver/protocol/request_parser.rb +387 -0
- data/lib/quicsilver/protocol/response_encoder.rb +72 -0
- data/lib/quicsilver/protocol/response_parser.rb +249 -0
- data/lib/quicsilver/server/listener_data.rb +14 -0
- data/lib/quicsilver/server/request_handler.rb +86 -0
- data/lib/quicsilver/server/request_registry.rb +50 -0
- data/lib/quicsilver/server/server.rb +336 -0
- data/lib/quicsilver/transport/configuration.rb +132 -0
- data/lib/quicsilver/transport/connection.rb +350 -0
- data/lib/quicsilver/transport/event_loop.rb +38 -0
- data/lib/quicsilver/transport/inbound_stream.rb +33 -0
- data/lib/quicsilver/transport/stream.rb +28 -0
- data/lib/quicsilver/transport/stream_event.rb +26 -0
- data/lib/quicsilver/version.rb +1 -1
- data/lib/quicsilver.rb +49 -9
- data/lib/rackup/handler/quicsilver.rb +77 -0
- data/quicsilver.gemspec +10 -3
- metadata +122 -17
- data/examples/minimal_http3_client.rb +0 -89
- data/lib/quicsilver/client.rb +0 -191
- data/lib/quicsilver/http3/request_encoder.rb +0 -112
- data/lib/quicsilver/http3/request_parser.rb +0 -158
- data/lib/quicsilver/http3/response_encoder.rb +0 -73
- data/lib/quicsilver/http3.rb +0 -68
- data/lib/quicsilver/listener_data.rb +0 -29
- data/lib/quicsilver/server.rb +0 -258
- data/lib/quicsilver/server_configuration.rb +0 -49
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Quicsilver
|
|
4
|
+
class Server
|
|
5
|
+
class ListenerData
|
|
6
|
+
attr_reader :listener_handle, :context_handle
|
|
7
|
+
|
|
8
|
+
def initialize(listener_handle, context_handle)
|
|
9
|
+
@listener_handle = listener_handle # The MSQUIC listener handle
|
|
10
|
+
@context_handle = context_handle # The C context pointer
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
end
|
|
14
|
+
end
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Quicsilver
|
|
4
|
+
class Server
|
|
5
|
+
class RequestHandler
|
|
6
|
+
# Safe HTTP methods allowed in 0-RTT early data (RFC 9110 §9.2.1)
|
|
7
|
+
SAFE_METHODS = %w[GET HEAD OPTIONS].freeze
|
|
8
|
+
|
|
9
|
+
def initialize(app:, configuration:, request_registry:, cancelled_streams:, cancelled_mutex:)
|
|
10
|
+
@app = app
|
|
11
|
+
@configuration = configuration
|
|
12
|
+
@request_registry = request_registry
|
|
13
|
+
@cancelled_streams = cancelled_streams
|
|
14
|
+
@cancelled_mutex = cancelled_mutex
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def call(connection, stream, early_data: false)
|
|
18
|
+
parser = Protocol::RequestParser.new(
|
|
19
|
+
stream.data,
|
|
20
|
+
max_body_size: @configuration.max_body_size,
|
|
21
|
+
max_header_size: @configuration.max_header_size,
|
|
22
|
+
max_header_count: @configuration.max_header_count,
|
|
23
|
+
max_frame_payload_size: @configuration.max_frame_payload_size
|
|
24
|
+
)
|
|
25
|
+
parser.parse
|
|
26
|
+
parser.validate_headers! # raises MessageError for missing/invalid pseudo-headers
|
|
27
|
+
env = parser.to_rack_env
|
|
28
|
+
|
|
29
|
+
if env && @app
|
|
30
|
+
env["quicsilver.early_data"] = early_data
|
|
31
|
+
|
|
32
|
+
# RFC 8470: reject unsafe methods on 0-RTT unless app opted in
|
|
33
|
+
if @configuration.early_data_policy == :reject &&
|
|
34
|
+
early_data && !SAFE_METHODS.include?(env["REQUEST_METHOD"])
|
|
35
|
+
connection.send_error(stream, 425, "Too Early") if stream.writable?
|
|
36
|
+
return
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
@request_registry.track(
|
|
40
|
+
stream.stream_id,
|
|
41
|
+
connection.handle,
|
|
42
|
+
path: env["PATH_INFO"] || "/",
|
|
43
|
+
method: env["REQUEST_METHOD"] || "GET"
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
status, headers, body = @app.call(env)
|
|
47
|
+
|
|
48
|
+
if cancelled_stream?(stream.stream_id)
|
|
49
|
+
Quicsilver.logger.debug("Skipping response for cancelled stream #{stream.stream_id}")
|
|
50
|
+
return
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
raise "Stream handle not found for stream #{stream.stream_id}" unless stream.writable?
|
|
54
|
+
|
|
55
|
+
connection.send_response(stream, status, headers, body, head_request: env["REQUEST_METHOD"] == "HEAD")
|
|
56
|
+
@request_registry.complete(stream.stream_id)
|
|
57
|
+
else
|
|
58
|
+
connection.send_error(stream, 400, "Bad Request") if stream.writable?
|
|
59
|
+
end
|
|
60
|
+
rescue Server::DrainTimeoutError
|
|
61
|
+
Quicsilver.logger.debug("Request interrupted by drain: stream #{stream.stream_id}")
|
|
62
|
+
rescue Protocol::FrameError => e
|
|
63
|
+
# Frame errors are connection-level: signal via CONNECTION_CLOSE with H3 error code
|
|
64
|
+
Quicsilver.logger.error("Frame error: #{e.message} (0x#{e.error_code.to_s(16)})")
|
|
65
|
+
Quicsilver.connection_shutdown(connection.handle, e.error_code, false) rescue nil
|
|
66
|
+
rescue Protocol::MessageError => e
|
|
67
|
+
# Message errors are stream-level: signal via RESET_STREAM with H3 error code
|
|
68
|
+
Quicsilver.logger.error("Message error: #{e.message} (0x#{e.error_code.to_s(16)})")
|
|
69
|
+
Quicsilver.stream_reset(stream.stream_handle, e.error_code) if stream.writable?
|
|
70
|
+
rescue => e
|
|
71
|
+
Quicsilver.logger.error("Error handling request: #{e.class} - #{e.message}")
|
|
72
|
+
Quicsilver.logger.debug(e.backtrace.first(5).join("\n"))
|
|
73
|
+
connection.send_error(stream, 500, "Internal Server Error") if stream.writable?
|
|
74
|
+
ensure
|
|
75
|
+
@request_registry.complete(stream.stream_id) if @request_registry.include?(stream.stream_id)
|
|
76
|
+
@cancelled_mutex.synchronize { @cancelled_streams.delete(stream.stream_id) }
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
private
|
|
80
|
+
|
|
81
|
+
def cancelled_stream?(stream_id)
|
|
82
|
+
@cancelled_mutex.synchronize { @cancelled_streams.include?(stream_id) }
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Quicsilver
|
|
4
|
+
class Server
|
|
5
|
+
class RequestRegistry
|
|
6
|
+
def initialize
|
|
7
|
+
@requests = {}
|
|
8
|
+
@mutex = Mutex.new
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def track(stream_id, connection_handle, path:, method:, started_at: Time.now)
|
|
12
|
+
@mutex.synchronize do
|
|
13
|
+
@requests[stream_id] = {
|
|
14
|
+
connection_handle: connection_handle,
|
|
15
|
+
path: path,
|
|
16
|
+
method: method,
|
|
17
|
+
started_at: started_at
|
|
18
|
+
}
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def complete(stream_id)
|
|
23
|
+
@mutex.synchronize { @requests.delete(stream_id) }
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def active_count
|
|
27
|
+
@mutex.synchronize { @requests.size }
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def active_requests
|
|
31
|
+
@mutex.synchronize { @requests.dup }
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def requests_older_than(seconds)
|
|
35
|
+
cutoff = Time.now - seconds
|
|
36
|
+
@mutex.synchronize do
|
|
37
|
+
@requests.select { |_, r| r[:started_at] < cutoff }
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def empty?
|
|
42
|
+
@mutex.synchronize { @requests.empty? }
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def include?(stream_id)
|
|
46
|
+
@mutex.synchronize { @requests.key?(stream_id) }
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Quicsilver
|
|
4
|
+
class Server
|
|
5
|
+
attr_reader :address, :port, :server_configuration, :running, :connections, :request_registry, :shutting_down, :max_queue_size, :max_connections
|
|
6
|
+
|
|
7
|
+
STREAM_EVENT_RECEIVE = "RECEIVE"
|
|
8
|
+
STREAM_EVENT_RECEIVE_FIN = "RECEIVE_FIN"
|
|
9
|
+
STREAM_EVENT_CONNECTION_ESTABLISHED = "CONNECTION_ESTABLISHED"
|
|
10
|
+
STREAM_EVENT_SEND_COMPLETE = "SEND_COMPLETE"
|
|
11
|
+
STREAM_EVENT_CONNECTION_CLOSED = "CONNECTION_CLOSED"
|
|
12
|
+
STREAM_EVENT_STREAM_RESET = "STREAM_RESET"
|
|
13
|
+
STREAM_EVENT_STOP_SENDING = "STOP_SENDING"
|
|
14
|
+
|
|
15
|
+
ServerStopError = Class.new(StandardError)
|
|
16
|
+
DrainTimeoutError = Class.new(StandardError)
|
|
17
|
+
|
|
18
|
+
class << self
|
|
19
|
+
attr_accessor :instance
|
|
20
|
+
|
|
21
|
+
# Callback from C extension - delegates to server instance
|
|
22
|
+
def handle_stream(connection_data, stream_id, event, data, early_data)
|
|
23
|
+
instance&.handle_stream_event(connection_data, stream_id, event, data, early_data)
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
DEFAULT_THREAD_POOL_SIZE = 5
|
|
28
|
+
DEFAULT_QUEUE_MULTIPLIER = 4
|
|
29
|
+
DEFAULT_MAX_CONNECTIONS = 100
|
|
30
|
+
|
|
31
|
+
def initialize(port = 4433, address: "0.0.0.0", app: nil, server_configuration: nil, threads: DEFAULT_THREAD_POOL_SIZE, max_queue_size: nil, max_connections: DEFAULT_MAX_CONNECTIONS)
|
|
32
|
+
@port = port
|
|
33
|
+
@address = address
|
|
34
|
+
@app = app || default_rack_app
|
|
35
|
+
@server_configuration = server_configuration || Transport::Configuration.new
|
|
36
|
+
@running = false
|
|
37
|
+
@shutting_down = false
|
|
38
|
+
@listener_data = nil
|
|
39
|
+
@config_handle = nil
|
|
40
|
+
@connections = {}
|
|
41
|
+
@request_registry = RequestRegistry.new
|
|
42
|
+
@handler_threads = []
|
|
43
|
+
@handler_mutex = Mutex.new
|
|
44
|
+
@thread_pool_size = threads
|
|
45
|
+
@max_queue_size = max_queue_size || threads * DEFAULT_QUEUE_MULTIPLIER
|
|
46
|
+
@work_queue = Queue.new
|
|
47
|
+
@max_connections = max_connections
|
|
48
|
+
@cancelled_streams = Set.new
|
|
49
|
+
@cancelled_mutex = Mutex.new
|
|
50
|
+
|
|
51
|
+
@request_handler = RequestHandler.new(
|
|
52
|
+
app: @app,
|
|
53
|
+
configuration: @server_configuration,
|
|
54
|
+
request_registry: @request_registry,
|
|
55
|
+
cancelled_streams: @cancelled_streams,
|
|
56
|
+
cancelled_mutex: @cancelled_mutex
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
self.class.instance = self
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def start
|
|
63
|
+
raise ServerIsRunningError, "Server is already running" if @running
|
|
64
|
+
|
|
65
|
+
Quicsilver.open_connection
|
|
66
|
+
@config_handle = Quicsilver.create_server_configuration(@server_configuration.to_h)
|
|
67
|
+
raise ServerConfigurationError, "Failed to create server configuration" unless @config_handle
|
|
68
|
+
|
|
69
|
+
result = Quicsilver.create_listener(@config_handle)
|
|
70
|
+
@listener_data = ListenerData.new(result[0], result[1])
|
|
71
|
+
raise ServerListenerError, "Failed to create listener #{@address}:#{@port}" unless @listener_data
|
|
72
|
+
|
|
73
|
+
unless Quicsilver.start_listener(@listener_data.listener_handle, @address, @port, @server_configuration.alpn)
|
|
74
|
+
Quicsilver.close_configuration(@config_handle)
|
|
75
|
+
@config_handle = nil
|
|
76
|
+
cleanup_failed_server
|
|
77
|
+
raise ServerListenerError, "Failed to start listener on #{@address}:#{@port}"
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
@running = true
|
|
81
|
+
|
|
82
|
+
setup_signal_handlers
|
|
83
|
+
start_worker_pool
|
|
84
|
+
Quicsilver.event_loop.start
|
|
85
|
+
Quicsilver.event_loop.join # Block until shutdown
|
|
86
|
+
rescue ServerConfigurationError, ServerListenerError => e
|
|
87
|
+
cleanup_failed_server
|
|
88
|
+
@running = false
|
|
89
|
+
raise e
|
|
90
|
+
rescue => e
|
|
91
|
+
cleanup_failed_server
|
|
92
|
+
@running = false
|
|
93
|
+
|
|
94
|
+
error_msg = case e.message
|
|
95
|
+
when /0x16/
|
|
96
|
+
"Invalid parameter error - check certificate files and network configuration"
|
|
97
|
+
when /0x30/
|
|
98
|
+
"Address already in use - port #{@port} may be occupied"
|
|
99
|
+
else
|
|
100
|
+
e.message
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
raise ServerError, "Server start failed: #{error_msg}"
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
def stop
|
|
107
|
+
return unless @running
|
|
108
|
+
|
|
109
|
+
drain
|
|
110
|
+
|
|
111
|
+
if @listener_data && @listener_data.listener_handle
|
|
112
|
+
Quicsilver.stop_listener(@listener_data.listener_handle)
|
|
113
|
+
Quicsilver.close_listener([@listener_data.listener_handle, @listener_data.context_handle])
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
if @config_handle
|
|
117
|
+
Quicsilver.close_configuration(@config_handle)
|
|
118
|
+
@config_handle = nil
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
Quicsilver.event_loop.stop
|
|
122
|
+
@running = false
|
|
123
|
+
@listener_data = nil
|
|
124
|
+
rescue => e
|
|
125
|
+
@listener_data = nil
|
|
126
|
+
@running = false
|
|
127
|
+
raise ServerStopError, "Failed to stop server: #{e.message}"
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
def running?
|
|
131
|
+
@running
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def cancelled_stream?(stream_id)
|
|
135
|
+
@cancelled_mutex.synchronize { @cancelled_streams.include?(stream_id) }
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
# Wait for work queue to drain, then shut down the pool
|
|
139
|
+
def drain(timeout: 5)
|
|
140
|
+
Quicsilver.logger.debug("Draining work queue (#{@work_queue.size} pending)")
|
|
141
|
+
|
|
142
|
+
deadline = Time.now + timeout
|
|
143
|
+
|
|
144
|
+
# Wait for work queue to empty
|
|
145
|
+
while @work_queue.size > 0 && Time.now < deadline
|
|
146
|
+
sleep 0.05
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
# Signal workers to exit
|
|
150
|
+
stop_worker_pool
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
# Graceful shutdown: send GOAWAY, drain requests, then stop
|
|
154
|
+
def shutdown(timeout: 30)
|
|
155
|
+
return unless @running
|
|
156
|
+
return if @shutting_down
|
|
157
|
+
|
|
158
|
+
@shutting_down = true
|
|
159
|
+
Quicsilver.logger.info("Initiating graceful shutdown (timeout: #{timeout}s)")
|
|
160
|
+
|
|
161
|
+
# Phase 1: Send GOAWAY - tell clients to stop sending new requests
|
|
162
|
+
@connections.each_value { |c| c.send_goaway(Protocol::MAX_STREAM_ID) }
|
|
163
|
+
|
|
164
|
+
# Phase 2: Drain in-flight requests
|
|
165
|
+
drain(timeout: timeout)
|
|
166
|
+
|
|
167
|
+
# Grace period: let pending responses reach clients
|
|
168
|
+
sleep 0.5
|
|
169
|
+
|
|
170
|
+
# Log any requests that didn't complete
|
|
171
|
+
unless @request_registry.empty?
|
|
172
|
+
@request_registry.active_requests.each do |stream_id, req|
|
|
173
|
+
elapsed = Time.now - req[:started_at]
|
|
174
|
+
Quicsilver.logger.warn("Force-closing request: #{req[:method]} #{req[:path]} (stream: #{stream_id}, elapsed: #{elapsed.round(2)}s)")
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
# Phase 3: Shutdown connections
|
|
179
|
+
@connections.each_value(&:shutdown)
|
|
180
|
+
sleep 0.1
|
|
181
|
+
|
|
182
|
+
# Phase 4: Hard stop
|
|
183
|
+
stop
|
|
184
|
+
@shutting_down = false
|
|
185
|
+
|
|
186
|
+
Quicsilver.logger.info("Graceful shutdown complete")
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
def handle_stream_event(connection_data, stream_id, event, data, early_data) # :nodoc:
|
|
190
|
+
connection_handle = connection_data[0]
|
|
191
|
+
|
|
192
|
+
case event
|
|
193
|
+
when STREAM_EVENT_CONNECTION_ESTABLISHED
|
|
194
|
+
if @connections.size >= @max_connections
|
|
195
|
+
Quicsilver.logger.warn("Connection limit reached (#{@max_connections}), rejecting connection")
|
|
196
|
+
Quicsilver.connection_shutdown(connection_handle, Protocol::H3_EXCESSIVE_LOAD, false)
|
|
197
|
+
return
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
connection = Transport::Connection.new(connection_handle, connection_data)
|
|
201
|
+
@connections[connection_handle] = connection
|
|
202
|
+
connection.setup_http3_streams
|
|
203
|
+
|
|
204
|
+
when STREAM_EVENT_CONNECTION_CLOSED
|
|
205
|
+
@connections.delete(connection_handle)&.streams&.clear
|
|
206
|
+
Quicsilver.close_server_connection(connection_handle)
|
|
207
|
+
|
|
208
|
+
when STREAM_EVENT_SEND_COMPLETE
|
|
209
|
+
# Buffer cleanup handled in C extension
|
|
210
|
+
|
|
211
|
+
when STREAM_EVENT_RECEIVE
|
|
212
|
+
return unless (connection = @connections[connection_handle])
|
|
213
|
+
|
|
214
|
+
# Unidirectional streams (control, QPACK) must be processed incrementally —
|
|
215
|
+
# they never send FIN, so waiting for RECEIVE_FIN would mean never parsing.
|
|
216
|
+
if (stream_id & 0x02) != 0 # unidirectional
|
|
217
|
+
begin
|
|
218
|
+
connection.receive_unidirectional_data(stream_id, data)
|
|
219
|
+
rescue Protocol::FrameError => e
|
|
220
|
+
Quicsilver.logger.error("Control stream error: #{e.message} (0x#{e.error_code.to_s(16)})")
|
|
221
|
+
Quicsilver.connection_shutdown(connection_handle, e.error_code, false) rescue nil
|
|
222
|
+
end
|
|
223
|
+
else
|
|
224
|
+
connection.buffer_data(stream_id, data)
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
when STREAM_EVENT_RECEIVE_FIN
|
|
228
|
+
return unless (connection = @connections[connection_handle])
|
|
229
|
+
|
|
230
|
+
event = Transport::StreamEvent.new(data, "RECEIVE_FIN")
|
|
231
|
+
|
|
232
|
+
full_data = connection.complete_stream(stream_id, event.data)
|
|
233
|
+
stream = Transport::InboundStream.new(stream_id)
|
|
234
|
+
stream.stream_handle = event.handle
|
|
235
|
+
stream.append_data(full_data)
|
|
236
|
+
|
|
237
|
+
if stream.bidirectional?
|
|
238
|
+
connection.track_client_stream(stream_id)
|
|
239
|
+
dispatch_request(connection, stream, early_data: early_data)
|
|
240
|
+
else
|
|
241
|
+
begin
|
|
242
|
+
connection.handle_unidirectional_stream(stream)
|
|
243
|
+
rescue Protocol::FrameError => e
|
|
244
|
+
Quicsilver.logger.error("Control stream error: #{e.message} (0x#{e.error_code.to_s(16)})")
|
|
245
|
+
Quicsilver.connection_shutdown(connection_handle, e.error_code, false) rescue nil
|
|
246
|
+
end
|
|
247
|
+
end
|
|
248
|
+
|
|
249
|
+
when STREAM_EVENT_STREAM_RESET
|
|
250
|
+
return unless (connection = @connections[connection_handle])
|
|
251
|
+
event = Transport::StreamEvent.new(data, "STREAM_RESET")
|
|
252
|
+
Quicsilver.logger.debug("Stream #{stream_id} reset by peer with error code: 0x#{event.error_code.to_s(16)}")
|
|
253
|
+
|
|
254
|
+
# Closing a critical unidirectional stream is a connection error (RFC 9114 §6.2.1)
|
|
255
|
+
if connection.critical_stream?(stream_id)
|
|
256
|
+
Quicsilver.logger.error("Critical stream #{stream_id} reset by peer")
|
|
257
|
+
Quicsilver.connection_shutdown(connection_handle, Protocol::H3_CLOSED_CRITICAL_STREAM, false) rescue nil
|
|
258
|
+
else
|
|
259
|
+
@cancelled_mutex.synchronize { @cancelled_streams.add(stream_id) }
|
|
260
|
+
@request_registry.complete(stream_id)
|
|
261
|
+
end
|
|
262
|
+
|
|
263
|
+
when STREAM_EVENT_STOP_SENDING
|
|
264
|
+
return unless @connections[connection_handle]
|
|
265
|
+
event = Transport::StreamEvent.new(data, "STOP_SENDING")
|
|
266
|
+
Quicsilver.logger.debug("Stream #{stream_id} stop sending requested with error code: 0x#{event.error_code.to_s(16)}")
|
|
267
|
+
@cancelled_mutex.synchronize { @cancelled_streams.add(stream_id) }
|
|
268
|
+
Quicsilver.stream_reset(event.handle, Protocol::H3_REQUEST_CANCELLED)
|
|
269
|
+
@request_registry.complete(stream_id)
|
|
270
|
+
end
|
|
271
|
+
end
|
|
272
|
+
|
|
273
|
+
private
|
|
274
|
+
|
|
275
|
+
def setup_signal_handlers
|
|
276
|
+
%w[INT TERM].each do |signal|
|
|
277
|
+
trap(signal) { Thread.new { shutdown } }
|
|
278
|
+
end
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
def default_rack_app
|
|
282
|
+
->(env) {
|
|
283
|
+
[200,
|
|
284
|
+
{"Content-Type" => "text/plain"},
|
|
285
|
+
["Hello from Quicsilver!\nMethod: #{env['REQUEST_METHOD']}\nPath: #{env['PATH_INFO']}\n"]]
|
|
286
|
+
}
|
|
287
|
+
end
|
|
288
|
+
|
|
289
|
+
def cleanup_failed_server
|
|
290
|
+
if @listener_data
|
|
291
|
+
begin
|
|
292
|
+
Quicsilver.stop_listener(@listener_data.listener_handle) if @listener_data.listener_handle
|
|
293
|
+
Quicsilver.close_listener([@listener_data.listener_handle, @listener_data.context_handle]) if @listener_data.listener_handle
|
|
294
|
+
rescue
|
|
295
|
+
# Ignore cleanup errors
|
|
296
|
+
ensure
|
|
297
|
+
@listener_data = nil
|
|
298
|
+
end
|
|
299
|
+
end
|
|
300
|
+
end
|
|
301
|
+
|
|
302
|
+
attr_reader :work_queue
|
|
303
|
+
|
|
304
|
+
def dispatch_request(connection, stream, early_data: false)
|
|
305
|
+
if @work_queue.size >= @max_queue_size
|
|
306
|
+
Quicsilver.logger.warn("Work queue full (#{@max_queue_size}), rejecting request")
|
|
307
|
+
connection.send_error(stream, 503, "Service Unavailable") if stream.writable?
|
|
308
|
+
else
|
|
309
|
+
@work_queue.push([connection, stream, early_data])
|
|
310
|
+
end
|
|
311
|
+
end
|
|
312
|
+
|
|
313
|
+
def start_worker_pool
|
|
314
|
+
@thread_pool_size.times do
|
|
315
|
+
thread = Thread.new do
|
|
316
|
+
while (work = @work_queue.pop)
|
|
317
|
+
break if work == :shutdown
|
|
318
|
+
connection, stream, early_data = work
|
|
319
|
+
@request_handler.call(connection, stream, early_data: early_data)
|
|
320
|
+
end
|
|
321
|
+
end
|
|
322
|
+
@handler_mutex.synchronize { @handler_threads << thread }
|
|
323
|
+
end
|
|
324
|
+
end
|
|
325
|
+
|
|
326
|
+
def stop_worker_pool
|
|
327
|
+
@thread_pool_size.times { @work_queue.push(:shutdown) }
|
|
328
|
+
@handler_mutex.synchronize do
|
|
329
|
+
@handler_threads.each { |t| t.join(2) }
|
|
330
|
+
# Raise into any stuck workers
|
|
331
|
+
@handler_threads.each { |t| t.raise(DrainTimeoutError, "drain timeout") if t.alive? }
|
|
332
|
+
@handler_threads.clear
|
|
333
|
+
end
|
|
334
|
+
end
|
|
335
|
+
end
|
|
336
|
+
end
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Quicsilver
|
|
4
|
+
module Transport
|
|
5
|
+
class Configuration
|
|
6
|
+
attr_reader :cert_file, :key_file, :idle_timeout_ms, :server_resumption_level, :max_concurrent_requests,
|
|
7
|
+
:max_unidirectional_streams, :stream_receive_window, :stream_receive_buffer, :connection_flow_control_window,
|
|
8
|
+
:pacing_enabled, :send_buffering_enabled, :initial_rtt_ms, :initial_window_packets, :max_ack_delay_ms,
|
|
9
|
+
:keep_alive_interval_ms, :congestion_control_algorithm, :migration_enabled,
|
|
10
|
+
:disconnect_timeout_ms, :handshake_idle_timeout_ms,
|
|
11
|
+
:max_body_size, :max_header_size, :max_header_count, :max_frame_payload_size,
|
|
12
|
+
:early_data_policy
|
|
13
|
+
|
|
14
|
+
QUIC_SERVER_RESUME_AND_ZERORTT = 1
|
|
15
|
+
QUIC_SERVER_RESUME_ONLY = 2
|
|
16
|
+
QUIC_SERVER_RESUME_AND_REUSE = 3
|
|
17
|
+
QUIC_SERVER_RESUME_AND_REUSE_ZERORTT = 4
|
|
18
|
+
|
|
19
|
+
# Congestion control algorithms
|
|
20
|
+
CONGESTION_CONTROL_CUBIC = 0
|
|
21
|
+
CONGESTION_CONTROL_BBR = 1
|
|
22
|
+
|
|
23
|
+
DEFAULT_CERT_FILE = "certificates/server.crt"
|
|
24
|
+
DEFAULT_KEY_FILE = "certificates/server.key"
|
|
25
|
+
DEFAULT_ALPN = "h3"
|
|
26
|
+
|
|
27
|
+
# Flow control defaults — cross-referenced with quiche, quic-go, lsquic, RFC 9000
|
|
28
|
+
# See: https://github.com/microsoft/msquic/blob/main/docs/Settings.md
|
|
29
|
+
DEFAULT_STREAM_RECEIVE_WINDOW = 262_144 # 256KB (quiche/quic-go use 1MB, MsQuic default 64KB)
|
|
30
|
+
DEFAULT_STREAM_RECEIVE_BUFFER = 32_768 # 32KB (MsQuic default 4KB — too small for typical responses)
|
|
31
|
+
DEFAULT_CONNECTION_FLOW_CONTROL_WINDOW = 16_777_216 # 16MB - connection-wide flow control
|
|
32
|
+
|
|
33
|
+
# Throughput defaults
|
|
34
|
+
DEFAULT_PACING_ENABLED = true # RFC 9002: MUST pace or limit bursts
|
|
35
|
+
DEFAULT_SEND_BUFFERING_ENABLED = true # MsQuic recommended — coalesces small writes
|
|
36
|
+
DEFAULT_INITIAL_RTT_MS = 100 # MsQuic default 333ms is satellite-grade; 100ms matches Chromium
|
|
37
|
+
DEFAULT_INITIAL_WINDOW_PACKETS = 10 # Matches RFC 9002 recommendation
|
|
38
|
+
DEFAULT_MAX_ACK_DELAY_MS = 25 # Matches RFC 9000 default
|
|
39
|
+
|
|
40
|
+
# Connection management defaults
|
|
41
|
+
DEFAULT_KEEP_ALIVE_INTERVAL_MS = 0 # 0 = disabled. Set to 20000 for NAT traversal
|
|
42
|
+
DEFAULT_CONGESTION_CONTROL_ALGORITHM = CONGESTION_CONTROL_CUBIC # CUBIC (0) or BBR (1)
|
|
43
|
+
DEFAULT_MIGRATION_ENABLED = true # Client IP migration. Disable behind load balancers
|
|
44
|
+
DEFAULT_DISCONNECT_TIMEOUT_MS = 16_000 # How long to wait for ACK before path declared dead
|
|
45
|
+
DEFAULT_HANDSHAKE_IDLE_TIMEOUT_MS = 10_000 # Handshake timeout (separate from connection idle)
|
|
46
|
+
|
|
47
|
+
def initialize(cert_file = nil, key_file = nil, options = {})
|
|
48
|
+
@idle_timeout_ms = options.fetch(:idle_timeout_ms, 10000)
|
|
49
|
+
@server_resumption_level = options.fetch(:server_resumption_level, QUIC_SERVER_RESUME_AND_ZERORTT)
|
|
50
|
+
@max_concurrent_requests = options.fetch(:max_concurrent_requests, 100)
|
|
51
|
+
@max_unidirectional_streams = options.fetch(:max_unidirectional_streams, 10)
|
|
52
|
+
@alpn = options.fetch(:alpn, DEFAULT_ALPN)
|
|
53
|
+
|
|
54
|
+
# Flow control
|
|
55
|
+
@stream_receive_window = options.fetch(:stream_receive_window, DEFAULT_STREAM_RECEIVE_WINDOW)
|
|
56
|
+
@stream_receive_buffer = options.fetch(:stream_receive_buffer, DEFAULT_STREAM_RECEIVE_BUFFER)
|
|
57
|
+
@connection_flow_control_window = options.fetch(:connection_flow_control_window, DEFAULT_CONNECTION_FLOW_CONTROL_WINDOW)
|
|
58
|
+
|
|
59
|
+
# Throughput
|
|
60
|
+
@pacing_enabled = options.fetch(:pacing_enabled, DEFAULT_PACING_ENABLED)
|
|
61
|
+
@send_buffering_enabled = options.fetch(:send_buffering_enabled, DEFAULT_SEND_BUFFERING_ENABLED)
|
|
62
|
+
@initial_rtt_ms = options.fetch(:initial_rtt_ms, DEFAULT_INITIAL_RTT_MS)
|
|
63
|
+
@initial_window_packets = options.fetch(:initial_window_packets, DEFAULT_INITIAL_WINDOW_PACKETS)
|
|
64
|
+
@max_ack_delay_ms = options.fetch(:max_ack_delay_ms, DEFAULT_MAX_ACK_DELAY_MS)
|
|
65
|
+
|
|
66
|
+
# Connection management
|
|
67
|
+
@keep_alive_interval_ms = options.fetch(:keep_alive_interval_ms, DEFAULT_KEEP_ALIVE_INTERVAL_MS)
|
|
68
|
+
@congestion_control_algorithm = options.fetch(:congestion_control_algorithm, DEFAULT_CONGESTION_CONTROL_ALGORITHM)
|
|
69
|
+
@migration_enabled = options.fetch(:migration_enabled, DEFAULT_MIGRATION_ENABLED)
|
|
70
|
+
@disconnect_timeout_ms = options.fetch(:disconnect_timeout_ms, DEFAULT_DISCONNECT_TIMEOUT_MS)
|
|
71
|
+
@handshake_idle_timeout_ms = options.fetch(:handshake_idle_timeout_ms, DEFAULT_HANDSHAKE_IDLE_TIMEOUT_MS)
|
|
72
|
+
|
|
73
|
+
# HTTP/3 parser limits (nil = unlimited)
|
|
74
|
+
@max_body_size = options[:max_body_size]
|
|
75
|
+
@max_header_size = options[:max_header_size]
|
|
76
|
+
@max_header_count = options[:max_header_count]
|
|
77
|
+
@max_frame_payload_size = options[:max_frame_payload_size]
|
|
78
|
+
|
|
79
|
+
# 0-RTT early data policy (RFC 8470)
|
|
80
|
+
# :reject (default) — send 425 Too Early for unsafe methods on 0-RTT
|
|
81
|
+
# :allow — pass all 0-RTT requests to the Rack app with env["quicsilver.early_data"]
|
|
82
|
+
@early_data_policy = options.fetch(:early_data_policy, :reject)
|
|
83
|
+
unless %i[reject allow].include?(@early_data_policy)
|
|
84
|
+
raise ServerConfigurationError, "Invalid early_data_policy: #{@early_data_policy.inspect} (must be :reject or :allow)"
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
@cert_file = cert_file.nil? ? DEFAULT_CERT_FILE : cert_file
|
|
88
|
+
@key_file = key_file.nil? ? DEFAULT_KEY_FILE : key_file
|
|
89
|
+
|
|
90
|
+
unless File.exist?(@cert_file)
|
|
91
|
+
raise ServerConfigurationError, "Certificate file not found: #{@cert_file}"
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
unless File.exist?(@key_file)
|
|
95
|
+
raise ServerConfigurationError, "Key file not found: #{@key_file}"
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
# Common HTTP/3 ALPN Values:
|
|
100
|
+
# "h3" - HTTP/3 (most common)
|
|
101
|
+
# "h3-29" - HTTP/3 draft version 29
|
|
102
|
+
def alpn
|
|
103
|
+
@alpn
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
def to_h
|
|
107
|
+
{
|
|
108
|
+
cert_file: @cert_file,
|
|
109
|
+
key_file: @key_file,
|
|
110
|
+
idle_timeout_ms: @idle_timeout_ms,
|
|
111
|
+
server_resumption_level: @server_resumption_level,
|
|
112
|
+
max_concurrent_requests: @max_concurrent_requests,
|
|
113
|
+
max_unidirectional_streams: @max_unidirectional_streams,
|
|
114
|
+
alpn: alpn,
|
|
115
|
+
stream_receive_window: @stream_receive_window,
|
|
116
|
+
stream_receive_buffer: @stream_receive_buffer,
|
|
117
|
+
connection_flow_control_window: @connection_flow_control_window,
|
|
118
|
+
pacing_enabled: @pacing_enabled ? 1 : 0,
|
|
119
|
+
send_buffering_enabled: @send_buffering_enabled ? 1 : 0,
|
|
120
|
+
initial_rtt_ms: @initial_rtt_ms,
|
|
121
|
+
initial_window_packets: @initial_window_packets,
|
|
122
|
+
max_ack_delay_ms: @max_ack_delay_ms,
|
|
123
|
+
keep_alive_interval_ms: @keep_alive_interval_ms,
|
|
124
|
+
congestion_control_algorithm: @congestion_control_algorithm,
|
|
125
|
+
migration_enabled: @migration_enabled ? 1 : 0,
|
|
126
|
+
disconnect_timeout_ms: @disconnect_timeout_ms,
|
|
127
|
+
handshake_idle_timeout_ms: @handshake_idle_timeout_ms
|
|
128
|
+
}
|
|
129
|
+
end
|
|
130
|
+
end
|
|
131
|
+
end
|
|
132
|
+
end
|