quicsilver 0.2.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +4 -5
- data/.github/workflows/cibuildgem.yaml +93 -0
- data/.gitignore +3 -1
- data/CHANGELOG.md +81 -0
- data/Gemfile.lock +26 -4
- data/README.md +95 -31
- data/Rakefile +95 -3
- data/benchmarks/components.rb +191 -0
- data/benchmarks/concurrent.rb +110 -0
- data/benchmarks/helpers.rb +88 -0
- data/benchmarks/quicsilver_server.rb +1 -1
- data/benchmarks/rails.rb +170 -0
- data/benchmarks/throughput.rb +113 -0
- data/examples/README.md +44 -91
- data/examples/benchmark.rb +111 -0
- data/examples/connection_pool_demo.rb +47 -0
- data/examples/example_helper.rb +18 -0
- data/examples/falcon_middleware.rb +44 -0
- data/examples/feature_demo.rb +125 -0
- data/examples/grpc_style.rb +97 -0
- data/examples/minimal_http3_server.rb +6 -18
- data/examples/priorities.rb +60 -0
- data/examples/protocol_http_server.rb +31 -0
- data/examples/rack_http3_server.rb +8 -20
- data/examples/rails_feature_test.rb +260 -0
- data/examples/simple_client_test.rb +2 -2
- data/examples/streaming_sse.rb +33 -0
- data/examples/trailers.rb +69 -0
- data/ext/quicsilver/extconf.rb +14 -0
- data/ext/quicsilver/quicsilver.c +568 -181
- data/lib/quicsilver/client/client.rb +349 -0
- data/lib/quicsilver/client/connection_pool.rb +106 -0
- data/lib/quicsilver/client/request.rb +98 -0
- data/lib/quicsilver/libmsquic.2.dylib +0 -0
- data/lib/quicsilver/protocol/adapter.rb +176 -0
- data/lib/quicsilver/protocol/control_stream_parser.rb +106 -0
- data/lib/quicsilver/protocol/frame_parser.rb +142 -0
- data/lib/quicsilver/protocol/frame_reader.rb +55 -0
- data/lib/quicsilver/{http3.rb → protocol/frames.rb} +146 -30
- data/lib/quicsilver/protocol/priority.rb +56 -0
- data/lib/quicsilver/protocol/qpack/decoder.rb +165 -0
- data/lib/quicsilver/protocol/qpack/encoder.rb +227 -0
- data/lib/quicsilver/protocol/qpack/header_block_decoder.rb +140 -0
- data/lib/quicsilver/protocol/qpack/huffman.rb +459 -0
- data/lib/quicsilver/protocol/request_encoder.rb +47 -0
- data/lib/quicsilver/protocol/request_parser.rb +275 -0
- data/lib/quicsilver/protocol/response_encoder.rb +97 -0
- data/lib/quicsilver/protocol/response_parser.rb +141 -0
- data/lib/quicsilver/protocol/stream_input.rb +98 -0
- data/lib/quicsilver/protocol/stream_output.rb +59 -0
- data/lib/quicsilver/quicsilver.bundle +0 -0
- data/lib/quicsilver/server/listener_data.rb +14 -0
- data/lib/quicsilver/server/request_handler.rb +138 -0
- data/lib/quicsilver/server/request_registry.rb +50 -0
- data/lib/quicsilver/server/server.rb +610 -0
- data/lib/quicsilver/transport/configuration.rb +141 -0
- data/lib/quicsilver/transport/connection.rb +379 -0
- data/lib/quicsilver/transport/event_loop.rb +38 -0
- data/lib/quicsilver/transport/inbound_stream.rb +33 -0
- data/lib/quicsilver/transport/stream.rb +28 -0
- data/lib/quicsilver/transport/stream_event.rb +26 -0
- data/lib/quicsilver/version.rb +1 -1
- data/lib/quicsilver.rb +55 -14
- data/lib/rackup/handler/quicsilver.rb +1 -2
- data/quicsilver.gemspec +13 -3
- metadata +125 -21
- data/benchmarks/benchmark.rb +0 -68
- data/examples/setup_certs.sh +0 -57
- data/lib/quicsilver/client.rb +0 -261
- data/lib/quicsilver/connection.rb +0 -42
- data/lib/quicsilver/event_loop.rb +0 -38
- data/lib/quicsilver/http3/request_encoder.rb +0 -133
- data/lib/quicsilver/http3/request_parser.rb +0 -176
- data/lib/quicsilver/http3/response_encoder.rb +0 -186
- data/lib/quicsilver/http3/response_parser.rb +0 -160
- data/lib/quicsilver/listener_data.rb +0 -29
- data/lib/quicsilver/quic_stream.rb +0 -36
- data/lib/quicsilver/request_registry.rb +0 -48
- data/lib/quicsilver/server.rb +0 -355
- data/lib/quicsilver/server_configuration.rb +0 -78
|
@@ -0,0 +1,610 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Quicsilver
|
|
4
|
+
class Server
|
|
5
|
+
attr_reader :address, :port, :server_configuration, :running, :connections, :request_registry, :shutting_down, :max_queue_size, :max_connections
|
|
6
|
+
|
|
7
|
+
STREAM_EVENT_RECEIVE = "RECEIVE"
|
|
8
|
+
STREAM_EVENT_RECEIVE_FIN = "RECEIVE_FIN"
|
|
9
|
+
STREAM_EVENT_CONNECTION_ESTABLISHED = "CONNECTION_ESTABLISHED"
|
|
10
|
+
STREAM_EVENT_SEND_COMPLETE = "SEND_COMPLETE"
|
|
11
|
+
STREAM_EVENT_CONNECTION_CLOSED = "CONNECTION_CLOSED"
|
|
12
|
+
STREAM_EVENT_STREAM_RESET = "STREAM_RESET"
|
|
13
|
+
STREAM_EVENT_STOP_SENDING = "STOP_SENDING"
|
|
14
|
+
|
|
15
|
+
ServerStopError = Class.new(StandardError)
|
|
16
|
+
DrainTimeoutError = Class.new(StandardError)
|
|
17
|
+
|
|
18
|
+
# Tracks an in-flight streaming request between RECEIVE and RECEIVE_FIN.
|
|
19
|
+
# The stream handle arrives at RECEIVE_FIN; the worker thread waits for it.
|
|
20
|
+
PendingStream = Struct.new(:connection, :body, :request, :stream_id, :stream_handle, :handle_ready, :frame_buffer, :priority, keyword_init: true) do
|
|
21
|
+
def initialize(**)
|
|
22
|
+
super
|
|
23
|
+
self.handle_ready = Queue.new
|
|
24
|
+
self.frame_buffer = "".b
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# Called by RECEIVE_FIN handler to provide the stream handle
|
|
28
|
+
def complete(handle)
|
|
29
|
+
self.stream_handle = handle
|
|
30
|
+
handle_ready.push(true)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Called by worker thread to wait for the stream handle
|
|
34
|
+
def wait_for_handle(timeout: 30)
|
|
35
|
+
handle_ready.pop(timeout: timeout)
|
|
36
|
+
stream_handle
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
class << self
|
|
41
|
+
attr_accessor :instance
|
|
42
|
+
|
|
43
|
+
# Callback from C extension - delegates to server instance
|
|
44
|
+
def handle_stream(connection_data, stream_id, event, data, early_data)
|
|
45
|
+
instance&.handle_stream_event(connection_data, stream_id, event, data, early_data)
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
DEFAULT_THREAD_POOL_SIZE = 5
|
|
50
|
+
DEFAULT_QUEUE_MULTIPLIER = 4
|
|
51
|
+
DEFAULT_MAX_CONNECTIONS = 100
|
|
52
|
+
|
|
53
|
+
def initialize(port = 4433, address: "0.0.0.0", app: nil, server_configuration: nil, threads: DEFAULT_THREAD_POOL_SIZE, max_queue_size: nil, max_connections: DEFAULT_MAX_CONNECTIONS)
|
|
54
|
+
@port = port
|
|
55
|
+
@address = address
|
|
56
|
+
@app = app || default_rack_app
|
|
57
|
+
@server_configuration = server_configuration || Transport::Configuration.new
|
|
58
|
+
@running = false
|
|
59
|
+
@shutting_down = false
|
|
60
|
+
@listener_data = nil
|
|
61
|
+
@config_handle = nil
|
|
62
|
+
@connections = {}
|
|
63
|
+
@request_registry = RequestRegistry.new
|
|
64
|
+
@handler_threads = []
|
|
65
|
+
@handler_mutex = Mutex.new
|
|
66
|
+
@thread_pool_size = threads
|
|
67
|
+
@max_queue_size = max_queue_size || threads * DEFAULT_QUEUE_MULTIPLIER
|
|
68
|
+
@work_queue = Queue.new
|
|
69
|
+
@max_connections = max_connections
|
|
70
|
+
@cancelled_streams = Set.new
|
|
71
|
+
@cancelled_mutex = Mutex.new
|
|
72
|
+
@pending_streams = {} # stream_id => PendingStream (for streaming dispatch)
|
|
73
|
+
@pending_mutex = Mutex.new
|
|
74
|
+
|
|
75
|
+
protocol_app = wrap_app(@app, @server_configuration.mode)
|
|
76
|
+
|
|
77
|
+
@request_handler = RequestHandler.new(
|
|
78
|
+
app: protocol_app,
|
|
79
|
+
configuration: @server_configuration,
|
|
80
|
+
request_registry: @request_registry,
|
|
81
|
+
cancelled_streams: @cancelled_streams,
|
|
82
|
+
cancelled_mutex: @cancelled_mutex
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
self.class.instance = self
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def start
|
|
89
|
+
raise ServerIsRunningError, "Server is already running" if @running
|
|
90
|
+
|
|
91
|
+
Quicsilver.open_connection
|
|
92
|
+
@config_handle = Quicsilver.create_server_configuration(@server_configuration.to_h)
|
|
93
|
+
raise ServerConfigurationError, "Failed to create server configuration" unless @config_handle
|
|
94
|
+
|
|
95
|
+
result = Quicsilver.create_listener(@config_handle)
|
|
96
|
+
@listener_data = ListenerData.new(result[0], result[1])
|
|
97
|
+
raise ServerListenerError, "Failed to create listener #{@address}:#{@port}" unless @listener_data
|
|
98
|
+
|
|
99
|
+
unless Quicsilver.start_listener(@listener_data.listener_handle, @address, @port, @server_configuration.alpn)
|
|
100
|
+
Quicsilver.close_configuration(@config_handle)
|
|
101
|
+
@config_handle = nil
|
|
102
|
+
cleanup_failed_server
|
|
103
|
+
raise ServerListenerError, "Failed to start listener on #{@address}:#{@port}"
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
@running = true
|
|
107
|
+
|
|
108
|
+
setup_signal_handlers
|
|
109
|
+
start_worker_pool
|
|
110
|
+
Quicsilver.event_loop.start
|
|
111
|
+
Quicsilver.event_loop.join # Block until shutdown
|
|
112
|
+
rescue ServerConfigurationError, ServerListenerError => e
|
|
113
|
+
cleanup_failed_server
|
|
114
|
+
@running = false
|
|
115
|
+
raise e
|
|
116
|
+
rescue => e
|
|
117
|
+
cleanup_failed_server
|
|
118
|
+
@running = false
|
|
119
|
+
|
|
120
|
+
error_msg = case e.message
|
|
121
|
+
when /0x16/
|
|
122
|
+
"Invalid parameter error - check certificate files and network configuration"
|
|
123
|
+
when /0x30/
|
|
124
|
+
"Address already in use - port #{@port} may be occupied"
|
|
125
|
+
else
|
|
126
|
+
e.message
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
raise ServerError, "Server start failed: #{error_msg}"
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
def stop
|
|
133
|
+
return unless @running
|
|
134
|
+
|
|
135
|
+
drain
|
|
136
|
+
|
|
137
|
+
if @listener_data && @listener_data.listener_handle
|
|
138
|
+
Quicsilver.stop_listener(@listener_data.listener_handle)
|
|
139
|
+
Quicsilver.close_listener([@listener_data.listener_handle, @listener_data.context_handle])
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
if @config_handle
|
|
143
|
+
Quicsilver.close_configuration(@config_handle)
|
|
144
|
+
@config_handle = nil
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
Quicsilver.event_loop.stop
|
|
148
|
+
@running = false
|
|
149
|
+
@listener_data = nil
|
|
150
|
+
rescue => e
|
|
151
|
+
@listener_data = nil
|
|
152
|
+
@running = false
|
|
153
|
+
raise ServerStopError, "Failed to stop server: #{e.message}"
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
def running?
|
|
157
|
+
@running
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
def cancelled_stream?(stream_id)
|
|
161
|
+
@cancelled_mutex.synchronize { @cancelled_streams.include?(stream_id) }
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
# Wait for work queue to drain, then shut down the pool
|
|
165
|
+
def drain(timeout: 5)
|
|
166
|
+
Quicsilver.logger.debug("Draining work queue (#{@work_queue.size} pending)")
|
|
167
|
+
|
|
168
|
+
deadline = Time.now + timeout
|
|
169
|
+
|
|
170
|
+
# Wait for work queue to empty
|
|
171
|
+
while @work_queue.size > 0 && Time.now < deadline
|
|
172
|
+
sleep 0.05
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
# Signal workers to exit
|
|
176
|
+
stop_worker_pool
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
# Graceful shutdown: send GOAWAY, drain requests, then stop
|
|
180
|
+
def shutdown(timeout: 30)
|
|
181
|
+
return unless @running
|
|
182
|
+
return if @shutting_down
|
|
183
|
+
|
|
184
|
+
@shutting_down = true
|
|
185
|
+
Quicsilver.logger.info("Initiating graceful shutdown (timeout: #{timeout}s)")
|
|
186
|
+
|
|
187
|
+
# Phase 1: Send GOAWAY - tell clients to stop sending new requests
|
|
188
|
+
@connections.each_value { |c| c.send_goaway(Protocol::MAX_STREAM_ID) }
|
|
189
|
+
|
|
190
|
+
# Phase 2: Drain in-flight requests
|
|
191
|
+
drain(timeout: timeout)
|
|
192
|
+
|
|
193
|
+
# Phase 2b: Send final GOAWAY with actual last processed stream ID (RFC 9114 §5.2)
|
|
194
|
+
@connections.each_value do |c|
|
|
195
|
+
c.send_goaway
|
|
196
|
+
rescue => e
|
|
197
|
+
Quicsilver.logger.debug("Second GOAWAY failed: #{e.message}")
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
# Grace period: let pending responses reach clients
|
|
201
|
+
sleep [0.5, timeout * 0.1].min
|
|
202
|
+
|
|
203
|
+
# Log any requests that didn't complete
|
|
204
|
+
unless @request_registry.empty?
|
|
205
|
+
@request_registry.active_requests.each do |stream_id, req|
|
|
206
|
+
elapsed = Time.now - req[:started_at]
|
|
207
|
+
Quicsilver.logger.warn("Force-closing request: #{req[:method]} #{req[:path]} (stream: #{stream_id}, elapsed: #{elapsed.round(2)}s)")
|
|
208
|
+
end
|
|
209
|
+
end
|
|
210
|
+
|
|
211
|
+
# Phase 3: Shutdown connections
|
|
212
|
+
@connections.each_value(&:shutdown)
|
|
213
|
+
sleep [0.1, timeout * 0.05].min
|
|
214
|
+
|
|
215
|
+
# Phase 4: Hard stop
|
|
216
|
+
stop
|
|
217
|
+
@shutting_down = false
|
|
218
|
+
|
|
219
|
+
Quicsilver.logger.info("Graceful shutdown complete")
|
|
220
|
+
end
|
|
221
|
+
|
|
222
|
+
def handle_stream_event(connection_data, stream_id, event, data, early_data) # :nodoc:
|
|
223
|
+
connection_handle = connection_data[0]
|
|
224
|
+
|
|
225
|
+
case event
|
|
226
|
+
when STREAM_EVENT_CONNECTION_ESTABLISHED
|
|
227
|
+
if @connections.size >= @max_connections
|
|
228
|
+
Quicsilver.logger.warn("Connection limit reached (#{@max_connections}), rejecting connection")
|
|
229
|
+
Quicsilver.connection_shutdown(connection_handle, Protocol::H3_EXCESSIVE_LOAD, false)
|
|
230
|
+
return
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
connection = Transport::Connection.new(connection_handle, connection_data,
|
|
234
|
+
max_header_size: @server_configuration.max_header_size)
|
|
235
|
+
@connections[connection_handle] = connection
|
|
236
|
+
connection.setup_http3_streams
|
|
237
|
+
|
|
238
|
+
when STREAM_EVENT_CONNECTION_CLOSED
|
|
239
|
+
@connections.delete(connection_handle)&.streams&.clear
|
|
240
|
+
Quicsilver.close_server_connection(connection_handle)
|
|
241
|
+
|
|
242
|
+
when STREAM_EVENT_SEND_COMPLETE
|
|
243
|
+
# Buffer cleanup handled in C extension
|
|
244
|
+
when STREAM_EVENT_RECEIVE
|
|
245
|
+
return unless (connection = @connections[connection_handle])
|
|
246
|
+
handle_receive(connection, connection_handle, stream_id, data, early_data: early_data)
|
|
247
|
+
when STREAM_EVENT_RECEIVE_FIN
|
|
248
|
+
return unless (connection = @connections[connection_handle])
|
|
249
|
+
handle_receive_fin(connection, connection_handle, stream_id, data, early_data: early_data)
|
|
250
|
+
when STREAM_EVENT_STREAM_RESET
|
|
251
|
+
return unless (connection = @connections[connection_handle])
|
|
252
|
+
event = Transport::StreamEvent.new(data, "STREAM_RESET")
|
|
253
|
+
Quicsilver.logger.debug("Stream #{stream_id} reset by peer with error code: 0x#{event.error_code.to_s(16)}")
|
|
254
|
+
|
|
255
|
+
# Closing a critical unidirectional stream is a connection error (RFC 9114 §6.2.1)
|
|
256
|
+
if connection.critical_stream?(stream_id)
|
|
257
|
+
Quicsilver.logger.error("Critical stream #{stream_id} reset by peer")
|
|
258
|
+
Quicsilver.connection_shutdown(connection_handle, Protocol::H3_CLOSED_CRITICAL_STREAM, false) rescue nil
|
|
259
|
+
else
|
|
260
|
+
@cancelled_mutex.synchronize { @cancelled_streams.add(stream_id) }
|
|
261
|
+
pending = @pending_mutex.synchronize { @pending_streams.delete(stream_id) }
|
|
262
|
+
pending&.body&.close(RuntimeError.new("Stream #{stream_id} reset by peer"))
|
|
263
|
+
@request_registry.complete(stream_id)
|
|
264
|
+
end
|
|
265
|
+
when STREAM_EVENT_STOP_SENDING
|
|
266
|
+
return unless @connections[connection_handle]
|
|
267
|
+
event = Transport::StreamEvent.new(data, "STOP_SENDING")
|
|
268
|
+
Quicsilver.logger.debug("Stream #{stream_id} stop sending requested with error code: 0x#{event.error_code.to_s(16)}")
|
|
269
|
+
@cancelled_mutex.synchronize { @cancelled_streams.add(stream_id) }
|
|
270
|
+
Quicsilver.stream_reset(event.handle, Protocol::H3_REQUEST_CANCELLED)
|
|
271
|
+
@request_registry.complete(stream_id)
|
|
272
|
+
end
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
private
|
|
276
|
+
|
|
277
|
+
# Wrap the user's app for the configured mode.
|
|
278
|
+
# Rack mode: inject rack.early_hints support, then wrap with protocol-rack.
|
|
279
|
+
# Falcon mode: pass through as-is (native protocol-http app).
|
|
280
|
+
def wrap_app(app, mode)
|
|
281
|
+
case mode
|
|
282
|
+
when :falcon then app
|
|
283
|
+
else ::Protocol::Rack::Adapter.new(with_early_hints(app))
|
|
284
|
+
end
|
|
285
|
+
end
|
|
286
|
+
|
|
287
|
+
# Bridges protocol-http's interim_response to Rack's rack.early_hints.
|
|
288
|
+
# In a Rails controller: send_early_hints("link" => '</style.css>; rel=preload')
|
|
289
|
+
def with_early_hints(app)
|
|
290
|
+
->(env) {
|
|
291
|
+
request = env["protocol.http.request"]
|
|
292
|
+
if request&.respond_to?(:interim_response) && request.interim_response
|
|
293
|
+
env["rack.early_hints"] = ->(headers) {
|
|
294
|
+
request.send_interim_response(103, ::Protocol::HTTP::Headers[headers.map { |k, v| [k, v] }])
|
|
295
|
+
}
|
|
296
|
+
end
|
|
297
|
+
app.call(env)
|
|
298
|
+
}
|
|
299
|
+
end
|
|
300
|
+
|
|
301
|
+
def setup_signal_handlers
|
|
302
|
+
%w[INT TERM].each do |signal|
|
|
303
|
+
trap(signal) { Thread.new { shutdown } }
|
|
304
|
+
end
|
|
305
|
+
end
|
|
306
|
+
|
|
307
|
+
def default_rack_app
|
|
308
|
+
->(env) {
|
|
309
|
+
[200,
|
|
310
|
+
{"Content-Type" => "text/plain"},
|
|
311
|
+
["Hello from Quicsilver!\nMethod: #{env['REQUEST_METHOD']}\nPath: #{env['PATH_INFO']}\n"]]
|
|
312
|
+
}
|
|
313
|
+
end
|
|
314
|
+
|
|
315
|
+
def cleanup_failed_server
|
|
316
|
+
if @listener_data
|
|
317
|
+
begin
|
|
318
|
+
Quicsilver.stop_listener(@listener_data.listener_handle) if @listener_data.listener_handle
|
|
319
|
+
Quicsilver.close_listener([@listener_data.listener_handle, @listener_data.context_handle]) if @listener_data.listener_handle
|
|
320
|
+
rescue
|
|
321
|
+
# Ignore cleanup errors
|
|
322
|
+
ensure
|
|
323
|
+
@listener_data = nil
|
|
324
|
+
end
|
|
325
|
+
end
|
|
326
|
+
end
|
|
327
|
+
|
|
328
|
+
attr_reader :work_queue
|
|
329
|
+
|
|
330
|
+
def handle_receive(connection, connection_handle, stream_id, data, early_data: false)
|
|
331
|
+
# Unidirectional streams (control, QPACK) must be processed incrementally —
|
|
332
|
+
# they never send FIN, so waiting for RECEIVE_FIN would mean never parsing.
|
|
333
|
+
if (stream_id & 0x02) != 0 # unidirectional
|
|
334
|
+
begin
|
|
335
|
+
connection.receive_unidirectional_data(stream_id, data)
|
|
336
|
+
rescue Protocol::FrameError => e
|
|
337
|
+
Quicsilver.logger.error("Control stream error: #{e.message} (0x#{e.error_code.to_s(16)})")
|
|
338
|
+
Quicsilver.connection_shutdown(connection_handle, e.error_code, false) rescue nil
|
|
339
|
+
end
|
|
340
|
+
else
|
|
341
|
+
handle_bidi_receive(connection, connection_handle, stream_id, data, early_data: early_data)
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
def handle_bidi_receive(connection, connection_handle, stream_id, data, early_data: false)
|
|
346
|
+
pending = @pending_mutex.synchronize { @pending_streams[stream_id] }
|
|
347
|
+
if pending
|
|
348
|
+
# Subsequent RECEIVE — append to frame buffer and extract complete DATA payloads.
|
|
349
|
+
# MsQuic splits data at arbitrary boundaries, so frames may span callbacks.
|
|
350
|
+
pending.frame_buffer << data
|
|
351
|
+
drain_data_frames(pending)
|
|
352
|
+
elsif contains_headers_frame?(data)
|
|
353
|
+
dispatch_streaming(connection, connection_handle, stream_id, data, early_data: early_data)
|
|
354
|
+
else
|
|
355
|
+
connection.buffer_data(stream_id, data)
|
|
356
|
+
end
|
|
357
|
+
end
|
|
358
|
+
|
|
359
|
+
def handle_receive_fin(connection, connection_handle, stream_id, data, early_data: false)
|
|
360
|
+
event = Transport::StreamEvent.new(data, "RECEIVE_FIN")
|
|
361
|
+
|
|
362
|
+
pending = @pending_mutex.synchronize { @pending_streams[stream_id] }
|
|
363
|
+
if pending
|
|
364
|
+
complete_streaming_request(pending, event)
|
|
365
|
+
else
|
|
366
|
+
complete_buffered_request(connection, connection_handle, stream_id, event, early_data: early_data)
|
|
367
|
+
end
|
|
368
|
+
end
|
|
369
|
+
|
|
370
|
+
def complete_streaming_request(pending, event)
|
|
371
|
+
if event.data && !event.data.empty?
|
|
372
|
+
pending.frame_buffer << event.data
|
|
373
|
+
drain_data_frames(pending)
|
|
374
|
+
end
|
|
375
|
+
pending.body.close_write
|
|
376
|
+
pending.complete(event.handle)
|
|
377
|
+
end
|
|
378
|
+
|
|
379
|
+
def complete_buffered_request(connection, connection_handle, stream_id, event, early_data: false)
|
|
380
|
+
full_data = connection.complete_stream(stream_id, event.data)
|
|
381
|
+
stream = Transport::InboundStream.new(stream_id)
|
|
382
|
+
stream.stream_handle = event.handle
|
|
383
|
+
stream.append_data(full_data)
|
|
384
|
+
|
|
385
|
+
if stream.bidirectional?
|
|
386
|
+
connection.track_client_stream(stream_id)
|
|
387
|
+
dispatch_request(connection, stream, early_data: early_data)
|
|
388
|
+
else
|
|
389
|
+
begin
|
|
390
|
+
connection.handle_unidirectional_stream(stream)
|
|
391
|
+
rescue Protocol::FrameError => e
|
|
392
|
+
Quicsilver.logger.error("Control stream error: #{e.message} (0x#{e.error_code.to_s(16)})")
|
|
393
|
+
Quicsilver.connection_shutdown(connection_handle, e.error_code, false) rescue nil
|
|
394
|
+
end
|
|
395
|
+
end
|
|
396
|
+
end
|
|
397
|
+
|
|
398
|
+
def dispatch_request(connection, stream, early_data: false)
|
|
399
|
+
if @work_queue.size >= @max_queue_size
|
|
400
|
+
Quicsilver.logger.warn("Work queue full (#{@max_queue_size}), rejecting request")
|
|
401
|
+
connection.send_error(stream, 503, "Service Unavailable") if stream.writable?
|
|
402
|
+
else
|
|
403
|
+
@work_queue.push([connection, stream, early_data])
|
|
404
|
+
end
|
|
405
|
+
end
|
|
406
|
+
|
|
407
|
+
def start_worker_pool
|
|
408
|
+
@thread_pool_size.times do
|
|
409
|
+
thread = Thread.new do
|
|
410
|
+
while (work = @work_queue.pop)
|
|
411
|
+
break if work == :shutdown
|
|
412
|
+
|
|
413
|
+
if work.is_a?(Array) && work[0] == :streaming
|
|
414
|
+
handle_streaming_request(work[1])
|
|
415
|
+
else
|
|
416
|
+
connection, stream, early_data = work
|
|
417
|
+
@request_handler.call(connection, stream, early_data: early_data)
|
|
418
|
+
end
|
|
419
|
+
end
|
|
420
|
+
end
|
|
421
|
+
@handler_mutex.synchronize { @handler_threads << thread }
|
|
422
|
+
end
|
|
423
|
+
end
|
|
424
|
+
|
|
425
|
+
# Streaming dispatch: parse headers from first RECEIVE, dispatch immediately.
|
|
426
|
+
# Body data arrives via subsequent RECEIVE events into StreamInput.
|
|
427
|
+
def dispatch_streaming(connection, connection_handle, stream_id, data, early_data: false)
|
|
428
|
+
parser = Protocol::RequestParser.new(
|
|
429
|
+
data,
|
|
430
|
+
max_header_size: @server_configuration.max_header_size,
|
|
431
|
+
max_header_count: @server_configuration.max_header_count,
|
|
432
|
+
max_frame_payload_size: @server_configuration.max_frame_payload_size
|
|
433
|
+
)
|
|
434
|
+
parser.parse
|
|
435
|
+
parser.validate_headers!
|
|
436
|
+
|
|
437
|
+
headers = parser.headers
|
|
438
|
+
return if headers.empty?
|
|
439
|
+
|
|
440
|
+
method = headers[":method"]
|
|
441
|
+
|
|
442
|
+
if @server_configuration.early_data_policy == :reject &&
|
|
443
|
+
early_data && !RequestHandler::SAFE_METHODS.include?(method)
|
|
444
|
+
Quicsilver.logger.debug("Rejected 0-RTT #{method} on stream #{stream_id} (no stream handle to send 425)")
|
|
445
|
+
return
|
|
446
|
+
end
|
|
447
|
+
|
|
448
|
+
request, body = @request_handler.adapter.build_request(headers)
|
|
449
|
+
request.headers.add("quicsilver-early-data", early_data.to_s)
|
|
450
|
+
|
|
451
|
+
# Feed body data from the first RECEIVE.
|
|
452
|
+
# The parser consumed complete frames (HEADERS + any complete DATA frames).
|
|
453
|
+
if body
|
|
454
|
+
# Complete DATA frames the parser extracted
|
|
455
|
+
if parser.body && parser.body.size > 0
|
|
456
|
+
parser.body.rewind
|
|
457
|
+
body_data = parser.body.read
|
|
458
|
+
body.write(body_data) unless body_data.empty?
|
|
459
|
+
end
|
|
460
|
+
end
|
|
461
|
+
|
|
462
|
+
pending = PendingStream.new(
|
|
463
|
+
connection: connection,
|
|
464
|
+
body: body,
|
|
465
|
+
request: request,
|
|
466
|
+
stream_id: stream_id,
|
|
467
|
+
priority: parser.priority
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Unconsumed bytes go into the frame buffer for incremental parsing
|
|
471
|
+
remainder = data.byteslice(parser.bytes_consumed..-1)
|
|
472
|
+
if remainder && remainder.bytesize > 0
|
|
473
|
+
pending.frame_buffer << remainder
|
|
474
|
+
drain_data_frames(pending)
|
|
475
|
+
end
|
|
476
|
+
@pending_mutex.synchronize { @pending_streams[stream_id] = pending }
|
|
477
|
+
|
|
478
|
+
connection.track_client_stream(stream_id)
|
|
479
|
+
@request_registry.track(stream_id, connection_handle,
|
|
480
|
+
path: headers[":path"] || "/", method: method || "GET")
|
|
481
|
+
|
|
482
|
+
if @work_queue.size >= @max_queue_size
|
|
483
|
+
Quicsilver.logger.warn("Work queue full (#{@max_queue_size}), rejecting request")
|
|
484
|
+
body&.close
|
|
485
|
+
@pending_mutex.synchronize { @pending_streams.delete(stream_id) }
|
|
486
|
+
else
|
|
487
|
+
@work_queue.push([:streaming, pending])
|
|
488
|
+
end
|
|
489
|
+
rescue Protocol::FrameError => e
|
|
490
|
+
Quicsilver.logger.error("Frame error: #{e.message}")
|
|
491
|
+
Quicsilver.connection_shutdown(connection_handle, e.error_code, false) rescue nil
|
|
492
|
+
rescue Protocol::MessageError => e
|
|
493
|
+
Quicsilver.logger.error("Message error on stream #{stream_id}: #{e.message}")
|
|
494
|
+
rescue => e
|
|
495
|
+
Quicsilver.logger.error("Error in streaming dispatch: #{e.class} - #{e.message}")
|
|
496
|
+
end
|
|
497
|
+
|
|
498
|
+
def handle_streaming_request(pending)
|
|
499
|
+
response = @request_handler.adapter.call(pending.request)
|
|
500
|
+
|
|
501
|
+
# Wait for RECEIVE_FIN to provide the stream handle
|
|
502
|
+
stream_handle = pending.wait_for_handle(timeout: 30)
|
|
503
|
+
unless stream_handle
|
|
504
|
+
Quicsilver.logger.error("Timed out waiting for stream handle on stream #{pending.stream_id}")
|
|
505
|
+
return
|
|
506
|
+
end
|
|
507
|
+
|
|
508
|
+
return if cancelled_stream?(pending.stream_id)
|
|
509
|
+
|
|
510
|
+
headers = response.headers
|
|
511
|
+
|
|
512
|
+
trailers = if headers.respond_to?(:trailer?) && headers.trailer?
|
|
513
|
+
trailer_hash = {}
|
|
514
|
+
headers.trailer.each { |name, value| trailer_hash[name] = value }
|
|
515
|
+
trailer_hash
|
|
516
|
+
end
|
|
517
|
+
|
|
518
|
+
response_headers = {}
|
|
519
|
+
if headers.respond_to?(:header)
|
|
520
|
+
headers.header.each { |name, value| response_headers[name] = value }
|
|
521
|
+
else
|
|
522
|
+
headers&.each { |name, value| response_headers[name] = value }
|
|
523
|
+
end
|
|
524
|
+
|
|
525
|
+
if !response_headers.key?("content-length") && response.body&.length
|
|
526
|
+
response_headers["content-length"] = response.body.length.to_s
|
|
527
|
+
end
|
|
528
|
+
|
|
529
|
+
stream = Transport::InboundStream.new(pending.stream_id)
|
|
530
|
+
stream.stream_handle = stream_handle
|
|
531
|
+
|
|
532
|
+
pending.connection.apply_stream_priority(stream, pending.priority)
|
|
533
|
+
pending.connection.send_response(stream, response.status, response_headers, response.body,
|
|
534
|
+
head_request: pending.request.method == "HEAD", trailers: trailers)
|
|
535
|
+
@request_registry.complete(pending.stream_id)
|
|
536
|
+
rescue => e
|
|
537
|
+
Quicsilver.logger.error("Streaming request error: #{e.class} - #{e.message}")
|
|
538
|
+
if pending.stream_handle
|
|
539
|
+
stream = Transport::InboundStream.new(pending.stream_id)
|
|
540
|
+
stream.stream_handle = pending.stream_handle
|
|
541
|
+
pending.connection.send_error(stream, 500, "Internal Server Error") if stream.writable?
|
|
542
|
+
end
|
|
543
|
+
ensure
|
|
544
|
+
@pending_mutex.synchronize { @pending_streams.delete(pending.stream_id) }
|
|
545
|
+
@request_registry.complete(pending.stream_id) if @request_registry.include?(pending.stream_id)
|
|
546
|
+
@cancelled_mutex.synchronize { @cancelled_streams.delete(pending.stream_id) }
|
|
547
|
+
end
|
|
548
|
+
|
|
549
|
+
# Incrementally extract complete DATA frame payloads from the frame buffer.
|
|
550
|
+
# Handles MsQuic splitting frames across RECEIVE callbacks — partial frames
|
|
551
|
+
# remain in the buffer until the next callback completes them.
|
|
552
|
+
def drain_data_frames(pending)
|
|
553
|
+
buf = pending.frame_buffer
|
|
554
|
+
|
|
555
|
+
while buf.bytesize >= 2
|
|
556
|
+
type_byte = buf.getbyte(0)
|
|
557
|
+
if type_byte < 0x40
|
|
558
|
+
type = type_byte
|
|
559
|
+
type_len = 1
|
|
560
|
+
else
|
|
561
|
+
type, type_len = Protocol.decode_varint_str(buf, 0)
|
|
562
|
+
break if type_len == 0
|
|
563
|
+
end
|
|
564
|
+
|
|
565
|
+
len_byte = buf.getbyte(type_len)
|
|
566
|
+
break unless len_byte
|
|
567
|
+
if len_byte < 0x40
|
|
568
|
+
length = len_byte
|
|
569
|
+
length_len = 1
|
|
570
|
+
else
|
|
571
|
+
length, length_len = Protocol.decode_varint_str(buf, type_len)
|
|
572
|
+
break if length_len == 0
|
|
573
|
+
end
|
|
574
|
+
|
|
575
|
+
header_len = type_len + length_len
|
|
576
|
+
total = header_len + length
|
|
577
|
+
|
|
578
|
+
# Incomplete frame — wait for more data
|
|
579
|
+
break if buf.bytesize < total
|
|
580
|
+
|
|
581
|
+
if type == Protocol::FRAME_DATA
|
|
582
|
+
pending.body.write(buf.byteslice(header_len, length))
|
|
583
|
+
end
|
|
584
|
+
# Skip non-DATA frames (e.g. unknown extension frames)
|
|
585
|
+
|
|
586
|
+
buf = buf.byteslice(total..-1) || "".b
|
|
587
|
+
end
|
|
588
|
+
|
|
589
|
+
pending.frame_buffer = buf
|
|
590
|
+
end
|
|
591
|
+
|
|
592
|
+
# Heuristic: check if raw data starts with an HTTP/3 HEADERS frame (type 0x01).
|
|
593
|
+
# QUIC typically delivers complete frames, but if this misidentifies data,
|
|
594
|
+
# the parser will fail safely in dispatch_streaming's rescue handlers.
|
|
595
|
+
def contains_headers_frame?(data)
|
|
596
|
+
return false if data.nil? || data.bytesize < 2
|
|
597
|
+
data.getbyte(0) == Protocol::FRAME_HEADERS
|
|
598
|
+
end
|
|
599
|
+
|
|
600
|
+
def stop_worker_pool
|
|
601
|
+
@thread_pool_size.times { @work_queue.push(:shutdown) }
|
|
602
|
+
@handler_mutex.synchronize do
|
|
603
|
+
@handler_threads.each { |t| t.join(2) }
|
|
604
|
+
# Raise into any stuck workers
|
|
605
|
+
@handler_threads.each { |t| t.raise(DrainTimeoutError, "drain timeout") if t.alive? }
|
|
606
|
+
@handler_threads.clear
|
|
607
|
+
end
|
|
608
|
+
end
|
|
609
|
+
end
|
|
610
|
+
end
|