quicsilver 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +3 -4
- data/CHANGELOG.md +49 -0
- data/Gemfile.lock +8 -4
- data/README.md +7 -6
- data/Rakefile +29 -2
- data/benchmarks/components.rb +191 -0
- data/benchmarks/concurrent.rb +110 -0
- data/benchmarks/helpers.rb +88 -0
- data/benchmarks/quicsilver_server.rb +1 -1
- data/benchmarks/rails.rb +170 -0
- data/benchmarks/throughput.rb +113 -0
- data/ext/quicsilver/quicsilver.c +529 -181
- data/lib/quicsilver/client/client.rb +250 -0
- data/lib/quicsilver/client/request.rb +98 -0
- data/lib/quicsilver/{http3.rb → protocol/frames.rb} +133 -28
- data/lib/quicsilver/protocol/qpack/decoder.rb +165 -0
- data/lib/quicsilver/protocol/qpack/encoder.rb +189 -0
- data/lib/quicsilver/protocol/qpack/header_block_decoder.rb +125 -0
- data/lib/quicsilver/protocol/qpack/huffman.rb +459 -0
- data/lib/quicsilver/protocol/request_encoder.rb +47 -0
- data/lib/quicsilver/protocol/request_parser.rb +387 -0
- data/lib/quicsilver/protocol/response_encoder.rb +72 -0
- data/lib/quicsilver/protocol/response_parser.rb +249 -0
- data/lib/quicsilver/server/listener_data.rb +14 -0
- data/lib/quicsilver/server/request_handler.rb +86 -0
- data/lib/quicsilver/server/request_registry.rb +50 -0
- data/lib/quicsilver/server/server.rb +336 -0
- data/lib/quicsilver/transport/configuration.rb +132 -0
- data/lib/quicsilver/transport/connection.rb +350 -0
- data/lib/quicsilver/transport/event_loop.rb +38 -0
- data/lib/quicsilver/transport/inbound_stream.rb +33 -0
- data/lib/quicsilver/transport/stream.rb +28 -0
- data/lib/quicsilver/transport/stream_event.rb +26 -0
- data/lib/quicsilver/version.rb +1 -1
- data/lib/quicsilver.rb +31 -13
- data/lib/rackup/handler/quicsilver.rb +1 -2
- data/quicsilver.gemspec +3 -1
- metadata +58 -18
- data/benchmarks/benchmark.rb +0 -68
- data/lib/quicsilver/client.rb +0 -261
- data/lib/quicsilver/connection.rb +0 -42
- data/lib/quicsilver/event_loop.rb +0 -38
- data/lib/quicsilver/http3/request_encoder.rb +0 -133
- data/lib/quicsilver/http3/request_parser.rb +0 -176
- data/lib/quicsilver/http3/response_encoder.rb +0 -186
- data/lib/quicsilver/http3/response_parser.rb +0 -160
- data/lib/quicsilver/listener_data.rb +0 -29
- data/lib/quicsilver/quic_stream.rb +0 -36
- data/lib/quicsilver/request_registry.rb +0 -48
- data/lib/quicsilver/server.rb +0 -355
- data/lib/quicsilver/server_configuration.rb +0 -78
data/benchmarks/rails.rb
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
#!/usr/bin/env ruby
|
|
2
|
+
# Rails benchmark: concurrent POST, GET, DELETE against a Rails app.
|
|
3
|
+
# Multiplexes requests within each connection (HTTP/3 streams),
|
|
4
|
+
# capped by CONCURRENCY to stay within the server's stream limit.
|
|
5
|
+
#
|
|
6
|
+
# Start blogz first:
|
|
7
|
+
# cd ../blogz && bundle exec rackup -s quicsilver -p 4433
|
|
8
|
+
#
|
|
9
|
+
# Usage:
|
|
10
|
+
# CONNECTIONS=5 ITERATIONS=100 ruby benchmarks/rails.rb
|
|
11
|
+
# CONCURRENCY=8 CONNECTIONS=3 ITERATIONS=200 ruby benchmarks/rails.rb
|
|
12
|
+
|
|
13
|
+
require "bundler/setup"
|
|
14
|
+
require "quicsilver"
|
|
15
|
+
require "json"
|
|
16
|
+
require "benchmark"
|
|
17
|
+
|
|
18
|
+
require_relative "helpers"
|
|
19
|
+
|
|
20
|
+
HOST = ENV.fetch("HOST", "127.0.0.1")
|
|
21
|
+
PORT = ENV.fetch("PORT", "4433").to_i
|
|
22
|
+
CONNECTIONS = ENV.fetch("CONNECTIONS", "5").to_i
|
|
23
|
+
ITERATIONS = ENV.fetch("ITERATIONS", "100").to_i
|
|
24
|
+
CONCURRENCY = ENV.fetch("CONCURRENCY", "8").to_i # max in-flight per connection
|
|
25
|
+
|
|
26
|
+
total_requests = CONNECTIONS * ITERATIONS
|
|
27
|
+
|
|
28
|
+
Benchmarks::Helpers.print_header(
|
|
29
|
+
"Rails Concurrent Benchmark (multiplexed)",
|
|
30
|
+
target: "#{HOST}:#{PORT}",
|
|
31
|
+
connections: CONNECTIONS,
|
|
32
|
+
"reqs/conn": ITERATIONS,
|
|
33
|
+
concurrency: "#{CONCURRENCY} streams/conn",
|
|
34
|
+
total: "#{total_requests * 3} (POST + GET + DELETE)"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
mutex = Mutex.new
|
|
38
|
+
results = { post: [], get: [], delete: [] }
|
|
39
|
+
all_created_ids = []
|
|
40
|
+
|
|
41
|
+
# Fire N requests with at most `concurrency` in-flight at a time on a shared connection.
|
|
42
|
+
def multiplex(count, concurrency:)
|
|
43
|
+
queue = Queue.new
|
|
44
|
+
count.times { |i| queue << i }
|
|
45
|
+
|
|
46
|
+
threads = concurrency.times.map do
|
|
47
|
+
Thread.new do
|
|
48
|
+
while (i = queue.pop(true) rescue nil)
|
|
49
|
+
yield i
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
threads.each(&:join)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
# Phase 1: Concurrent multiplexed POSTs
|
|
57
|
+
puts "\nPhase 1: POST /posts.json (#{CONNECTIONS} conns x #{ITERATIONS}, #{CONCURRENCY} in-flight)..."
|
|
58
|
+
post_elapsed = Benchmark.realtime do
|
|
59
|
+
conn_threads = CONNECTIONS.times.map do |conn_id|
|
|
60
|
+
Thread.new do
|
|
61
|
+
client = Quicsilver::Client.new(HOST, PORT, unsecure: true)
|
|
62
|
+
client.connect
|
|
63
|
+
|
|
64
|
+
local_times = []
|
|
65
|
+
local_ids = []
|
|
66
|
+
|
|
67
|
+
multiplex(ITERATIONS, concurrency: CONCURRENCY) do |i|
|
|
68
|
+
start = Time.now
|
|
69
|
+
response = client.post(
|
|
70
|
+
"/posts.json",
|
|
71
|
+
headers: { "content-type" => "application/json" },
|
|
72
|
+
body: { post: { name: "Author #{conn_id}-#{i}", title: "Post #{conn_id}-#{i}" } }.to_json
|
|
73
|
+
)
|
|
74
|
+
elapsed = Time.now - start
|
|
75
|
+
|
|
76
|
+
if response && response[:status] == 201
|
|
77
|
+
body = JSON.parse(response[:body]) rescue {}
|
|
78
|
+
mutex.synchronize do
|
|
79
|
+
local_times << elapsed
|
|
80
|
+
local_ids << body["id"] if body["id"]
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
client.disconnect
|
|
86
|
+
mutex.synchronize do
|
|
87
|
+
results[:post].concat(local_times)
|
|
88
|
+
all_created_ids.concat(local_ids)
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
conn_threads.each(&:join)
|
|
93
|
+
end
|
|
94
|
+
puts " #{results[:post].size} created in #{post_elapsed.round(2)}s (#{(results[:post].size / post_elapsed).round(1)} req/s)"
|
|
95
|
+
|
|
96
|
+
# Phase 2: Concurrent multiplexed GETs
|
|
97
|
+
puts "\nPhase 2: GET /posts.json (#{CONNECTIONS} conns x #{ITERATIONS}, #{CONCURRENCY} in-flight)..."
|
|
98
|
+
get_elapsed = Benchmark.realtime do
|
|
99
|
+
conn_threads = CONNECTIONS.times.map do
|
|
100
|
+
Thread.new do
|
|
101
|
+
client = Quicsilver::Client.new(HOST, PORT, unsecure: true)
|
|
102
|
+
client.connect
|
|
103
|
+
|
|
104
|
+
local_times = []
|
|
105
|
+
|
|
106
|
+
multiplex(ITERATIONS, concurrency: CONCURRENCY) do |_i|
|
|
107
|
+
start = Time.now
|
|
108
|
+
response = client.get("/posts.json")
|
|
109
|
+
elapsed = Time.now - start
|
|
110
|
+
|
|
111
|
+
if response && response[:status] == 200
|
|
112
|
+
mutex.synchronize { local_times << elapsed }
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
client.disconnect
|
|
117
|
+
mutex.synchronize { results[:get].concat(local_times) }
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
conn_threads.each(&:join)
|
|
121
|
+
end
|
|
122
|
+
puts " #{results[:get].size} fetched in #{get_elapsed.round(2)}s (#{(results[:get].size / get_elapsed).round(1)} req/s)"
|
|
123
|
+
|
|
124
|
+
# Phase 3: Concurrent multiplexed DELETEs
|
|
125
|
+
delete_count = all_created_ids.size
|
|
126
|
+
puts "\nPhase 3: DELETE /posts/:id (#{delete_count} across #{CONNECTIONS} conns, #{CONCURRENCY} in-flight)..."
|
|
127
|
+
delete_elapsed = Benchmark.realtime do
|
|
128
|
+
id_chunks = all_created_ids.each_slice((all_created_ids.size.to_f / CONNECTIONS).ceil).to_a
|
|
129
|
+
|
|
130
|
+
conn_threads = id_chunks.map do |ids|
|
|
131
|
+
Thread.new do
|
|
132
|
+
next if ids.empty?
|
|
133
|
+
|
|
134
|
+
client = Quicsilver::Client.new(HOST, PORT, unsecure: true)
|
|
135
|
+
client.connect
|
|
136
|
+
|
|
137
|
+
local_times = []
|
|
138
|
+
|
|
139
|
+
multiplex(ids.size, concurrency: CONCURRENCY) do |i|
|
|
140
|
+
start = Time.now
|
|
141
|
+
response = client.delete("/posts/#{ids[i]}.json")
|
|
142
|
+
elapsed = Time.now - start
|
|
143
|
+
|
|
144
|
+
if response && response[:status] == 204
|
|
145
|
+
mutex.synchronize { local_times << elapsed }
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
client.disconnect
|
|
150
|
+
mutex.synchronize { results[:delete].concat(local_times) }
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
conn_threads.each(&:join)
|
|
154
|
+
end
|
|
155
|
+
puts " #{results[:delete].size} deleted in #{delete_elapsed.round(2)}s (#{(results[:delete].size / delete_elapsed).round(1)} req/s)"
|
|
156
|
+
|
|
157
|
+
# Summary
|
|
158
|
+
total_elapsed = post_elapsed + get_elapsed + delete_elapsed
|
|
159
|
+
total_completed = results.values.sum(&:size)
|
|
160
|
+
|
|
161
|
+
puts
|
|
162
|
+
puts "=" * 70
|
|
163
|
+
puts "RESULTS"
|
|
164
|
+
puts "=" * 70
|
|
165
|
+
Benchmarks::Helpers.print_stats("POST /posts.json", results[:post])
|
|
166
|
+
Benchmarks::Helpers.print_stats("GET /posts.json", results[:get])
|
|
167
|
+
Benchmarks::Helpers.print_stats("DELETE /posts/:id ", results[:delete])
|
|
168
|
+
puts "-" * 70
|
|
169
|
+
puts " Total: #{total_completed} requests in #{total_elapsed.round(2)}s (#{(total_completed / total_elapsed).round(2)} req/s)"
|
|
170
|
+
puts "=" * 70
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
#!/usr/bin/env ruby
|
|
2
|
+
# Throughput benchmark: measures req/sec and latency percentiles.
|
|
3
|
+
# Tests both sequential and concurrent (multiplexed) modes.
|
|
4
|
+
#
|
|
5
|
+
# Self-contained (boots inline server with trivial Rack app):
|
|
6
|
+
# ruby benchmarks/throughput.rb
|
|
7
|
+
#
|
|
8
|
+
# External server:
|
|
9
|
+
# HOST=127.0.0.1 PORT=4433 ruby benchmarks/throughput.rb
|
|
10
|
+
|
|
11
|
+
require_relative "helpers"
|
|
12
|
+
require "benchmark"
|
|
13
|
+
|
|
14
|
+
REQUESTS = ENV.fetch("REQUESTS", "500").to_i
|
|
15
|
+
CONNECTIONS = ENV.fetch("CONNECTIONS", "5").to_i
|
|
16
|
+
CONCURRENCY = ENV.fetch("CONCURRENCY", "8").to_i
|
|
17
|
+
HOST = ENV["HOST"]
|
|
18
|
+
PORT = ENV["PORT"]&.to_i
|
|
19
|
+
|
|
20
|
+
def run_benchmark(host, port)
|
|
21
|
+
# --- Sequential: 1 request at a time per connection ---
|
|
22
|
+
puts "\n--- Sequential (1 stream/conn, #{CONNECTIONS} conns) ---"
|
|
23
|
+
seq_times = []
|
|
24
|
+
mutex = Mutex.new
|
|
25
|
+
|
|
26
|
+
seq_elapsed = Benchmark.realtime do
|
|
27
|
+
per_conn = REQUESTS / CONNECTIONS
|
|
28
|
+
|
|
29
|
+
threads = CONNECTIONS.times.map do
|
|
30
|
+
Thread.new do
|
|
31
|
+
client = Quicsilver::Client.new(host, port, connection_timeout: 5000, request_timeout: 10)
|
|
32
|
+
client.connect
|
|
33
|
+
|
|
34
|
+
local = []
|
|
35
|
+
per_conn.times do
|
|
36
|
+
start = Time.now
|
|
37
|
+
response = client.get("/")
|
|
38
|
+
local << (Time.now - start) if response && response[:status] == 200
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
client.disconnect
|
|
42
|
+
mutex.synchronize { seq_times.concat(local) }
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
threads.each(&:join)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
Benchmarks::Helpers.print_results(
|
|
49
|
+
total_time: seq_elapsed, total_requests: REQUESTS,
|
|
50
|
+
times: seq_times, failed: REQUESTS - seq_times.size, latency: true
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# --- Concurrent: CONCURRENCY streams per connection ---
|
|
54
|
+
puts "\n--- Concurrent (#{CONCURRENCY} streams/conn, #{CONNECTIONS} conns) ---"
|
|
55
|
+
con_times = []
|
|
56
|
+
con_failed = 0
|
|
57
|
+
|
|
58
|
+
con_elapsed = Benchmark.realtime do
|
|
59
|
+
per_conn = REQUESTS / CONNECTIONS
|
|
60
|
+
|
|
61
|
+
threads = CONNECTIONS.times.map do
|
|
62
|
+
Thread.new do
|
|
63
|
+
client = Quicsilver::Client.new(host, port, connection_timeout: 5000, request_timeout: 10)
|
|
64
|
+
client.connect
|
|
65
|
+
|
|
66
|
+
local = []
|
|
67
|
+
queue = Queue.new
|
|
68
|
+
per_conn.times { |i| queue << i }
|
|
69
|
+
|
|
70
|
+
workers = CONCURRENCY.times.map do
|
|
71
|
+
Thread.new do
|
|
72
|
+
while (queue.pop(true) rescue nil)
|
|
73
|
+
start = Time.now
|
|
74
|
+
response = client.get("/bench")
|
|
75
|
+
dur = Time.now - start
|
|
76
|
+
if response && response[:status] == 200
|
|
77
|
+
local << dur
|
|
78
|
+
else
|
|
79
|
+
mutex.synchronize { con_failed += 1 }
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
workers.each(&:join)
|
|
85
|
+
client.disconnect
|
|
86
|
+
mutex.synchronize { con_times.concat(local) }
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
threads.each(&:join)
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
Benchmarks::Helpers.print_results(
|
|
93
|
+
total_time: con_elapsed, total_requests: REQUESTS,
|
|
94
|
+
times: con_times, failed: con_failed, latency: true
|
|
95
|
+
)
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
Benchmarks::Helpers.print_header(
|
|
99
|
+
"Quicsilver Throughput (trivial Rack app, no DB)",
|
|
100
|
+
connections: CONNECTIONS,
|
|
101
|
+
"reqs/conn": REQUESTS / CONNECTIONS,
|
|
102
|
+
concurrency: "#{CONCURRENCY} streams/conn",
|
|
103
|
+
total: REQUESTS
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if HOST && PORT
|
|
107
|
+
run_benchmark(HOST, PORT)
|
|
108
|
+
else
|
|
109
|
+
puts "Booting inline server..."
|
|
110
|
+
Benchmarks::Helpers.with_server(Benchmarks::Helpers.benchmark_app) do |port|
|
|
111
|
+
run_benchmark("localhost", port)
|
|
112
|
+
end
|
|
113
|
+
end
|