puma 0.8.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- data/.gemtest +0 -0
- data/COPYING +55 -0
- data/History.txt +69 -0
- data/LICENSE +26 -0
- data/Manifest.txt +57 -0
- data/README.md +60 -0
- data/Rakefile +10 -0
- data/TODO +5 -0
- data/bin/puma +15 -0
- data/examples/builder.rb +29 -0
- data/examples/camping/README +3 -0
- data/examples/camping/blog.rb +294 -0
- data/examples/camping/tepee.rb +149 -0
- data/examples/httpd.conf +474 -0
- data/examples/mime.yaml +3 -0
- data/examples/mongrel.conf +9 -0
- data/examples/monitrc +57 -0
- data/examples/random_thrash.rb +19 -0
- data/examples/simpletest.rb +52 -0
- data/examples/webrick_compare.rb +20 -0
- data/ext/puma_http11/Http11Service.java +13 -0
- data/ext/puma_http11/ext_help.h +15 -0
- data/ext/puma_http11/extconf.rb +5 -0
- data/ext/puma_http11/http11_parser.c +1225 -0
- data/ext/puma_http11/http11_parser.h +63 -0
- data/ext/puma_http11/http11_parser.java.rl +159 -0
- data/ext/puma_http11/http11_parser.rl +146 -0
- data/ext/puma_http11/http11_parser_common.rl +54 -0
- data/ext/puma_http11/org/jruby/mongrel/Http11.java +241 -0
- data/ext/puma_http11/org/jruby/mongrel/Http11Parser.java +486 -0
- data/ext/puma_http11/puma_http11.c +482 -0
- data/lib/puma.rb +18 -0
- data/lib/puma/cli.rb +131 -0
- data/lib/puma/const.rb +132 -0
- data/lib/puma/events.rb +36 -0
- data/lib/puma/gems.rb +20 -0
- data/lib/puma/mime_types.yml +616 -0
- data/lib/puma/server.rb +419 -0
- data/lib/puma/thread_pool.rb +95 -0
- data/lib/puma/utils.rb +44 -0
- data/lib/rack/handler/puma.rb +33 -0
- data/puma.gemspec +37 -0
- data/tasks/gem.rake +22 -0
- data/tasks/java.rake +12 -0
- data/tasks/native.rake +25 -0
- data/tasks/ragel.rake +20 -0
- data/test/lobster.ru +4 -0
- data/test/mime.yaml +3 -0
- data/test/test_http10.rb +27 -0
- data/test/test_http11.rb +151 -0
- data/test/test_persistent.rb +159 -0
- data/test/test_rack_handler.rb +10 -0
- data/test/test_rack_server.rb +107 -0
- data/test/test_thread_pool.rb +102 -0
- data/test/test_unix_socket.rb +34 -0
- data/test/test_ws.rb +97 -0
- data/test/testhelp.rb +41 -0
- data/tools/trickletest.rb +45 -0
- metadata +165 -0
data/lib/puma/server.rb
ADDED
@@ -0,0 +1,419 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'rack'
|
3
|
+
require 'stringio'
|
4
|
+
|
5
|
+
require 'puma/thread_pool'
|
6
|
+
require 'puma/const'
|
7
|
+
require 'puma/events'
|
8
|
+
|
9
|
+
require 'puma_http11'
|
10
|
+
|
11
|
+
require 'socket'
|
12
|
+
|
13
|
+
module Puma
|
14
|
+
class Server
|
15
|
+
|
16
|
+
include Puma::Const
|
17
|
+
|
18
|
+
attr_reader :thread
|
19
|
+
attr_reader :events
|
20
|
+
attr_accessor :app
|
21
|
+
|
22
|
+
attr_accessor :min_threads
|
23
|
+
attr_accessor :max_threads
|
24
|
+
attr_accessor :persistent_timeout
|
25
|
+
|
26
|
+
# Creates a working server on host:port (strange things happen if port
|
27
|
+
# isn't a Number).
|
28
|
+
#
|
29
|
+
# Use HttpServer#run to start the server and HttpServer#acceptor.join to
|
30
|
+
# join the thread that's processing incoming requests on the socket.
|
31
|
+
#
|
32
|
+
def initialize(app, events=Events::DEFAULT)
|
33
|
+
@app = app
|
34
|
+
@events = events
|
35
|
+
|
36
|
+
@check, @notify = IO.pipe
|
37
|
+
@ios = [@check]
|
38
|
+
|
39
|
+
@running = false
|
40
|
+
|
41
|
+
@min_threads = 0
|
42
|
+
@max_threads = 16
|
43
|
+
|
44
|
+
@thread = nil
|
45
|
+
@thread_pool = nil
|
46
|
+
|
47
|
+
@persistent_timeout = PERSISTENT_TIMEOUT
|
48
|
+
|
49
|
+
@proto_env = {
|
50
|
+
"rack.version".freeze => Rack::VERSION,
|
51
|
+
"rack.errors".freeze => events.stderr,
|
52
|
+
"rack.multithread".freeze => true,
|
53
|
+
"rack.multiprocess".freeze => false,
|
54
|
+
"rack.run_once".freeze => true,
|
55
|
+
"SCRIPT_NAME".freeze => "",
|
56
|
+
"CONTENT_TYPE".freeze => "",
|
57
|
+
"QUERY_STRING".freeze => "",
|
58
|
+
SERVER_PROTOCOL => HTTP_11,
|
59
|
+
SERVER_SOFTWARE => PUMA_VERSION,
|
60
|
+
GATEWAY_INTERFACE => CGI_VER
|
61
|
+
}
|
62
|
+
end
|
63
|
+
|
64
|
+
def add_tcp_listener(host, port)
|
65
|
+
@ios << TCPServer.new(host, port)
|
66
|
+
end
|
67
|
+
|
68
|
+
def add_unix_listener(path)
|
69
|
+
@ios << UNIXServer.new(path)
|
70
|
+
end
|
71
|
+
|
72
|
+
# Runs the server. It returns the thread used so you can "join" it.
|
73
|
+
# You can also access the HttpServer#acceptor attribute to get the
|
74
|
+
# thread later.
|
75
|
+
def run
|
76
|
+
BasicSocket.do_not_reverse_lookup = true
|
77
|
+
|
78
|
+
@running = true
|
79
|
+
|
80
|
+
@thread_pool = ThreadPool.new(@min_threads, @max_threads) do |client|
|
81
|
+
process_client(client)
|
82
|
+
end
|
83
|
+
|
84
|
+
@thread = Thread.new do
|
85
|
+
begin
|
86
|
+
check = @check
|
87
|
+
sockets = @ios
|
88
|
+
pool = @thread_pool
|
89
|
+
|
90
|
+
while @running
|
91
|
+
begin
|
92
|
+
ios = IO.select sockets
|
93
|
+
ios.first.each do |sock|
|
94
|
+
if sock == check
|
95
|
+
break if handle_check
|
96
|
+
else
|
97
|
+
pool << sock.accept
|
98
|
+
end
|
99
|
+
end
|
100
|
+
rescue Errno::ECONNABORTED
|
101
|
+
# client closed the socket even before accept
|
102
|
+
client.close rescue nil
|
103
|
+
rescue Object => e
|
104
|
+
@events.unknown_error self, env, e, "Listen loop"
|
105
|
+
end
|
106
|
+
end
|
107
|
+
graceful_shutdown
|
108
|
+
ensure
|
109
|
+
@ios.each { |i| i.close }
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
return @thread
|
114
|
+
end
|
115
|
+
|
116
|
+
def handle_check
|
117
|
+
cmd = @check.read(1)
|
118
|
+
|
119
|
+
case cmd
|
120
|
+
when STOP_COMMAND
|
121
|
+
@running = false
|
122
|
+
return true
|
123
|
+
end
|
124
|
+
|
125
|
+
return false
|
126
|
+
end
|
127
|
+
|
128
|
+
def process_client(client)
|
129
|
+
begin
|
130
|
+
while true
|
131
|
+
parser = HttpParser.new
|
132
|
+
env = @proto_env.dup
|
133
|
+
data = client.readpartial(CHUNK_SIZE)
|
134
|
+
nparsed = 0
|
135
|
+
|
136
|
+
# Assumption: nparsed will always be less since data will get filled
|
137
|
+
# with more after each parsing. If it doesn't get more then there was
|
138
|
+
# a problem with the read operation on the client socket.
|
139
|
+
# Effect is to stop processing when the socket can't fill the buffer
|
140
|
+
# for further parsing.
|
141
|
+
while nparsed < data.length
|
142
|
+
nparsed = parser.execute(env, data, nparsed)
|
143
|
+
|
144
|
+
if parser.finished?
|
145
|
+
return unless handle_request env, client, parser.body
|
146
|
+
|
147
|
+
if data.size > nparsed
|
148
|
+
data.slice!(0, nparsed)
|
149
|
+
parser = HttpParser.new
|
150
|
+
env = @proto_env.dup
|
151
|
+
nparsed = 0
|
152
|
+
else
|
153
|
+
unless IO.select([client], nil, nil, @persistent_timeout)
|
154
|
+
raise EOFError, "Timed out persistent connection"
|
155
|
+
end
|
156
|
+
end
|
157
|
+
else
|
158
|
+
# Parser is not done, queue up more data to read and continue parsing
|
159
|
+
chunk = client.readpartial(CHUNK_SIZE)
|
160
|
+
return if !chunk or chunk.length == 0 # read failed, stop processing
|
161
|
+
|
162
|
+
data << chunk
|
163
|
+
if data.length >= MAX_HEADER
|
164
|
+
raise HttpParserError,
|
165
|
+
"HEADER is longer than allowed, aborting client early."
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
rescue EOFError, SystemCallError
|
171
|
+
client.close rescue nil
|
172
|
+
|
173
|
+
rescue HttpParserError => e
|
174
|
+
@events.parse_error self, env, e
|
175
|
+
|
176
|
+
rescue StandardError => e
|
177
|
+
@events.unknown_error self, env, e, "Read"
|
178
|
+
|
179
|
+
ensure
|
180
|
+
begin
|
181
|
+
client.close
|
182
|
+
rescue IOError, SystemCallError
|
183
|
+
# Already closed
|
184
|
+
rescue StandardError => e
|
185
|
+
@events.unknown_error self, env, e, "Client"
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
def normalize_env(env, client)
|
191
|
+
if host = env[HTTP_HOST]
|
192
|
+
if colon = host.index(":")
|
193
|
+
env[SERVER_NAME] = host[0, colon]
|
194
|
+
env[SERVER_PORT] = host[colon+1, host.size]
|
195
|
+
else
|
196
|
+
env[SERVER_NAME] = host
|
197
|
+
env[SERVER_PORT] = PORT_80
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
unless env[REQUEST_PATH]
|
202
|
+
# it might be a dumbass full host request header
|
203
|
+
uri = URI.parse(env[REQUEST_URI])
|
204
|
+
env[REQUEST_PATH] = uri.path
|
205
|
+
|
206
|
+
raise "No REQUEST PATH" unless env[REQUEST_PATH]
|
207
|
+
end
|
208
|
+
|
209
|
+
env[PATH_INFO] = env[REQUEST_PATH]
|
210
|
+
|
211
|
+
# From http://www.ietf.org/rfc/rfc3875 :
|
212
|
+
# "Script authors should be aware that the REMOTE_ADDR and
|
213
|
+
# REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
|
214
|
+
# may not identify the ultimate source of the request.
|
215
|
+
# They identify the client for the immediate request to the
|
216
|
+
# server; that client may be a proxy, gateway, or other
|
217
|
+
# intermediary acting on behalf of the actual source client."
|
218
|
+
#
|
219
|
+
env[REMOTE_ADDR] = client.peeraddr.last
|
220
|
+
end
|
221
|
+
|
222
|
+
def handle_request(env, client, body)
|
223
|
+
normalize_env env, client
|
224
|
+
|
225
|
+
body = read_body env, client, body
|
226
|
+
|
227
|
+
return false unless body
|
228
|
+
|
229
|
+
env["rack.input"] = body
|
230
|
+
env["rack.url_scheme"] = env["HTTPS"] ? "https" : "http"
|
231
|
+
|
232
|
+
allow_chunked = false
|
233
|
+
|
234
|
+
if env['HTTP_VERSION'] == 'HTTP/1.1'
|
235
|
+
allow_chunked = true
|
236
|
+
http_version = "HTTP/1.1 "
|
237
|
+
keep_alive = env["HTTP_CONNECTION"] != "close"
|
238
|
+
else
|
239
|
+
http_version = "HTTP/1.0 "
|
240
|
+
keep_alive = env["HTTP_CONNECTION"] == "Keep-Alive"
|
241
|
+
end
|
242
|
+
|
243
|
+
chunked = false
|
244
|
+
|
245
|
+
after_reply = env['rack.after_reply'] = []
|
246
|
+
|
247
|
+
begin
|
248
|
+
begin
|
249
|
+
status, headers, res_body = @app.call(env)
|
250
|
+
rescue => e
|
251
|
+
status, headers, res_body = lowlevel_error(e)
|
252
|
+
end
|
253
|
+
|
254
|
+
content_length = nil
|
255
|
+
|
256
|
+
if res_body.kind_of? Array and res_body.size == 1
|
257
|
+
content_length = res_body[0].size
|
258
|
+
end
|
259
|
+
|
260
|
+
client.write http_version
|
261
|
+
client.write status.to_s
|
262
|
+
client.write " "
|
263
|
+
client.write HTTP_STATUS_CODES[status]
|
264
|
+
client.write "\r\n"
|
265
|
+
|
266
|
+
colon = ": "
|
267
|
+
line_ending = "\r\n"
|
268
|
+
|
269
|
+
headers.each do |k, vs|
|
270
|
+
case k
|
271
|
+
when "Content-Length"
|
272
|
+
content_length = vs
|
273
|
+
next
|
274
|
+
when "Transfer-Encoding"
|
275
|
+
allow_chunked = false
|
276
|
+
content_length = nil
|
277
|
+
end
|
278
|
+
|
279
|
+
vs.split("\n").each do |v|
|
280
|
+
client.write k
|
281
|
+
client.write colon
|
282
|
+
client.write v
|
283
|
+
client.write line_ending
|
284
|
+
end
|
285
|
+
end
|
286
|
+
|
287
|
+
client.write "Connection: close\r\n" unless keep_alive
|
288
|
+
|
289
|
+
if content_length
|
290
|
+
client.write "Content-Length: #{content_length}\r\n"
|
291
|
+
elsif allow_chunked
|
292
|
+
client.write "Transfer-Encoding: chunked\r\n"
|
293
|
+
chunked = true
|
294
|
+
end
|
295
|
+
|
296
|
+
client.write line_ending
|
297
|
+
|
298
|
+
res_body.each do |part|
|
299
|
+
if chunked
|
300
|
+
client.write part.size.to_s(16)
|
301
|
+
client.write line_ending
|
302
|
+
client.write part
|
303
|
+
client.write line_ending
|
304
|
+
else
|
305
|
+
client.write part
|
306
|
+
end
|
307
|
+
|
308
|
+
client.flush
|
309
|
+
end
|
310
|
+
|
311
|
+
if chunked
|
312
|
+
client.write "0"
|
313
|
+
client.write line_ending
|
314
|
+
client.write line_ending
|
315
|
+
client.flush
|
316
|
+
end
|
317
|
+
|
318
|
+
ensure
|
319
|
+
body.close
|
320
|
+
res_body.close if res_body.respond_to? :close
|
321
|
+
|
322
|
+
after_reply.each { |o| o.call }
|
323
|
+
end
|
324
|
+
|
325
|
+
return keep_alive
|
326
|
+
end
|
327
|
+
|
328
|
+
def read_body(env, client, body)
|
329
|
+
content_length = env[CONTENT_LENGTH].to_i
|
330
|
+
|
331
|
+
remain = content_length - body.size
|
332
|
+
|
333
|
+
return StringIO.new(body) if remain <= 0
|
334
|
+
|
335
|
+
# Use a Tempfile if there is a lot of data left
|
336
|
+
if remain > MAX_BODY
|
337
|
+
stream = Tempfile.new(Const::PUMA_TMP_BASE)
|
338
|
+
stream.binmode
|
339
|
+
else
|
340
|
+
stream = StringIO.new
|
341
|
+
end
|
342
|
+
|
343
|
+
stream.write body
|
344
|
+
|
345
|
+
# Read an odd sized chunk so we can read even sized ones
|
346
|
+
# after this
|
347
|
+
chunk = client.readpartial(remain % CHUNK_SIZE)
|
348
|
+
|
349
|
+
# No chunk means a closed socket
|
350
|
+
unless chunk
|
351
|
+
stream.close
|
352
|
+
return nil
|
353
|
+
end
|
354
|
+
|
355
|
+
remain -= stream.write(chunk)
|
356
|
+
|
357
|
+
# Raed the rest of the chunks
|
358
|
+
while remain > 0
|
359
|
+
chunk = client.readpartial(CHUNK_SIZE)
|
360
|
+
unless chunk
|
361
|
+
stream.close
|
362
|
+
return nil
|
363
|
+
end
|
364
|
+
|
365
|
+
remain -= stream.write(chunk)
|
366
|
+
end
|
367
|
+
|
368
|
+
stream.rewind
|
369
|
+
|
370
|
+
return stream
|
371
|
+
end
|
372
|
+
|
373
|
+
def lowlevel_error(e)
|
374
|
+
[500, {}, ["No application configured"]]
|
375
|
+
end
|
376
|
+
|
377
|
+
# Wait for all outstanding requests to finish.
|
378
|
+
def graceful_shutdown
|
379
|
+
@thread_pool.shutdown if @thread_pool
|
380
|
+
end
|
381
|
+
|
382
|
+
# Stops the acceptor thread and then causes the worker threads to finish
|
383
|
+
# off the request queue before finally exiting.
|
384
|
+
def stop(sync=false)
|
385
|
+
@notify << STOP_COMMAND
|
386
|
+
|
387
|
+
@thread.join if @thread && sync
|
388
|
+
end
|
389
|
+
|
390
|
+
def attempt_bonjour(name)
|
391
|
+
begin
|
392
|
+
require 'dnssd'
|
393
|
+
rescue LoadError
|
394
|
+
return false
|
395
|
+
end
|
396
|
+
|
397
|
+
@bonjour_registered = false
|
398
|
+
announced = false
|
399
|
+
|
400
|
+
@ios.each do |io|
|
401
|
+
if io.kind_of? TCPServer
|
402
|
+
fixed_name = name.gsub(/\./, "-")
|
403
|
+
|
404
|
+
DNSSD.announce io, "puma - #{fixed_name}", "http" do |r|
|
405
|
+
@bonjour_registered = true
|
406
|
+
end
|
407
|
+
|
408
|
+
announced = true
|
409
|
+
end
|
410
|
+
end
|
411
|
+
|
412
|
+
return announced
|
413
|
+
end
|
414
|
+
|
415
|
+
def bonjour_registered?
|
416
|
+
@bonjour_registered ||= false
|
417
|
+
end
|
418
|
+
end
|
419
|
+
end
|
@@ -0,0 +1,95 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
module Puma
|
4
|
+
class ThreadPool
|
5
|
+
def initialize(min, max, &blk)
|
6
|
+
@todo = Queue.new
|
7
|
+
@mutex = Mutex.new
|
8
|
+
|
9
|
+
@spawned = 0
|
10
|
+
@min = min
|
11
|
+
@max = max
|
12
|
+
@block = blk
|
13
|
+
|
14
|
+
@trim_requested = 0
|
15
|
+
|
16
|
+
@workers = []
|
17
|
+
|
18
|
+
min.times { spawn_thread }
|
19
|
+
end
|
20
|
+
|
21
|
+
attr_reader :spawned
|
22
|
+
|
23
|
+
def backlog
|
24
|
+
@todo.size
|
25
|
+
end
|
26
|
+
|
27
|
+
Stop = Object.new
|
28
|
+
Trim = Object.new
|
29
|
+
|
30
|
+
def spawn_thread
|
31
|
+
@mutex.synchronize do
|
32
|
+
@spawned += 1
|
33
|
+
end
|
34
|
+
|
35
|
+
th = Thread.new do
|
36
|
+
todo = @todo
|
37
|
+
block = @block
|
38
|
+
|
39
|
+
while true
|
40
|
+
work = todo.pop
|
41
|
+
|
42
|
+
case work
|
43
|
+
when Stop
|
44
|
+
break
|
45
|
+
when Trim
|
46
|
+
@mutex.synchronize do
|
47
|
+
@trim_requested -= 1
|
48
|
+
end
|
49
|
+
|
50
|
+
break
|
51
|
+
else
|
52
|
+
block.call work
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
@mutex.synchronize do
|
57
|
+
@spawned -= 1
|
58
|
+
@workers.delete th
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
@mutex.synchronize { @workers << th }
|
63
|
+
|
64
|
+
th
|
65
|
+
end
|
66
|
+
|
67
|
+
def <<(work)
|
68
|
+
if @todo.num_waiting == 0 and @spawned < @max
|
69
|
+
spawn_thread
|
70
|
+
end
|
71
|
+
|
72
|
+
@todo << work
|
73
|
+
end
|
74
|
+
|
75
|
+
def trim
|
76
|
+
@mutex.synchronize do
|
77
|
+
if @spawned - @trim_requested > @min
|
78
|
+
@trim_requested += 1
|
79
|
+
@todo << Trim
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def shutdown
|
85
|
+
@spawned.times do
|
86
|
+
@todo << Stop
|
87
|
+
end
|
88
|
+
|
89
|
+
@workers.each { |w| w.join }
|
90
|
+
|
91
|
+
@spawned = 0
|
92
|
+
@workers = []
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|