puma 5.0.2-java → 5.2.0-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/History.md +667 -567
- data/README.md +51 -21
- data/bin/puma-wild +3 -9
- data/docs/compile_options.md +19 -0
- data/docs/deployment.md +6 -7
- data/docs/fork_worker.md +2 -0
- data/docs/jungle/README.md +0 -4
- data/docs/jungle/rc.d/puma +2 -2
- data/docs/kubernetes.md +66 -0
- data/docs/nginx.md +1 -1
- data/docs/plugins.md +1 -1
- data/docs/restart.md +46 -23
- data/docs/stats.md +142 -0
- data/docs/systemd.md +25 -3
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +18 -5
- data/ext/puma_http11/http11_parser.c +45 -47
- data/ext/puma_http11/http11_parser.java.rl +1 -1
- data/ext/puma_http11/http11_parser.rl +1 -1
- data/ext/puma_http11/mini_ssl.c +199 -119
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +5 -7
- data/ext/puma_http11/puma_http11.c +25 -12
- data/lib/puma.rb +2 -2
- data/lib/puma/app/status.rb +44 -46
- data/lib/puma/binder.rb +69 -25
- data/lib/puma/cli.rb +4 -0
- data/lib/puma/client.rb +26 -79
- data/lib/puma/cluster.rb +37 -202
- data/lib/puma/cluster/worker.rb +176 -0
- data/lib/puma/cluster/worker_handle.rb +86 -0
- data/lib/puma/configuration.rb +21 -8
- data/lib/puma/const.rb +11 -3
- data/lib/puma/control_cli.rb +73 -70
- data/lib/puma/dsl.rb +100 -22
- data/lib/puma/error_logger.rb +10 -3
- data/lib/puma/events.rb +18 -3
- data/lib/puma/json.rb +96 -0
- data/lib/puma/launcher.rb +57 -15
- data/lib/puma/minissl.rb +47 -16
- data/lib/puma/minissl/context_builder.rb +6 -0
- data/lib/puma/null_io.rb +4 -0
- data/lib/puma/puma_http11.jar +0 -0
- data/lib/puma/queue_close.rb +26 -0
- data/lib/puma/reactor.rb +85 -363
- data/lib/puma/request.rb +451 -0
- data/lib/puma/runner.rb +17 -23
- data/lib/puma/server.rb +164 -553
- data/lib/puma/single.rb +2 -2
- data/lib/puma/state_file.rb +5 -3
- data/lib/puma/systemd.rb +46 -0
- data/lib/puma/util.rb +11 -0
- metadata +11 -6
- data/docs/jungle/upstart/README.md +0 -61
- data/docs/jungle/upstart/puma-manager.conf +0 -31
- data/docs/jungle/upstart/puma.conf +0 -69
- data/lib/puma/accept_nonblock.rb +0 -29
data/lib/puma/minissl.rb
CHANGED
|
@@ -73,7 +73,6 @@ module Puma
|
|
|
73
73
|
|
|
74
74
|
def engine_read_all
|
|
75
75
|
output = @engine.read
|
|
76
|
-
raise SSLError.exception "HTTP connection?" if bad_tlsv1_3?
|
|
77
76
|
while output and additional_output = @engine.read
|
|
78
77
|
output << additional_output
|
|
79
78
|
end
|
|
@@ -100,6 +99,7 @@ module Puma
|
|
|
100
99
|
# ourselves.
|
|
101
100
|
raise IO::EAGAINWaitReadable
|
|
102
101
|
elsif data.nil?
|
|
102
|
+
raise SSLError.exception "HTTP connection?" if bad_tlsv1_3?
|
|
103
103
|
return nil
|
|
104
104
|
end
|
|
105
105
|
|
|
@@ -117,20 +117,21 @@ module Puma
|
|
|
117
117
|
def write(data)
|
|
118
118
|
return 0 if data.empty?
|
|
119
119
|
|
|
120
|
-
|
|
120
|
+
data_size = data.bytesize
|
|
121
|
+
need = data_size
|
|
121
122
|
|
|
122
123
|
while true
|
|
123
124
|
wrote = @engine.write data
|
|
124
|
-
enc = @engine.extract
|
|
125
125
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
126
|
+
enc_wr = ''.dup
|
|
127
|
+
while (enc = @engine.extract)
|
|
128
|
+
enc_wr << enc
|
|
129
129
|
end
|
|
130
|
+
@socket.write enc_wr unless enc_wr.empty?
|
|
130
131
|
|
|
131
132
|
need -= wrote
|
|
132
133
|
|
|
133
|
-
return
|
|
134
|
+
return data_size if need == 0
|
|
134
135
|
|
|
135
136
|
data = data[wrote..-1]
|
|
136
137
|
end
|
|
@@ -245,6 +246,7 @@ module Puma
|
|
|
245
246
|
attr_reader :cert
|
|
246
247
|
attr_reader :ca
|
|
247
248
|
attr_accessor :ssl_cipher_filter
|
|
249
|
+
attr_accessor :verification_flags
|
|
248
250
|
|
|
249
251
|
def key=(key)
|
|
250
252
|
raise ArgumentError, "No such key file '#{key}'" unless File.exist? key
|
|
@@ -287,33 +289,58 @@ module Puma
|
|
|
287
289
|
VERIFY_PEER = 1
|
|
288
290
|
VERIFY_FAIL_IF_NO_PEER_CERT = 2
|
|
289
291
|
|
|
292
|
+
# https://github.com/openssl/openssl/blob/master/include/openssl/x509_vfy.h.in
|
|
293
|
+
# /* Certificate verify flags */
|
|
294
|
+
VERIFICATION_FLAGS = {
|
|
295
|
+
"USE_CHECK_TIME" => 0x2,
|
|
296
|
+
"CRL_CHECK" => 0x4,
|
|
297
|
+
"CRL_CHECK_ALL" => 0x8,
|
|
298
|
+
"IGNORE_CRITICAL" => 0x10,
|
|
299
|
+
"X509_STRICT" => 0x20,
|
|
300
|
+
"ALLOW_PROXY_CERTS" => 0x40,
|
|
301
|
+
"POLICY_CHECK" => 0x80,
|
|
302
|
+
"EXPLICIT_POLICY" => 0x100,
|
|
303
|
+
"INHIBIT_ANY" => 0x200,
|
|
304
|
+
"INHIBIT_MAP" => 0x400,
|
|
305
|
+
"NOTIFY_POLICY" => 0x800,
|
|
306
|
+
"EXTENDED_CRL_SUPPORT" => 0x1000,
|
|
307
|
+
"USE_DELTAS" => 0x2000,
|
|
308
|
+
"CHECK_SS_SIGNATURE" => 0x4000,
|
|
309
|
+
"TRUSTED_FIRST" => 0x8000,
|
|
310
|
+
"SUITEB_128_LOS_ONLY" => 0x10000,
|
|
311
|
+
"SUITEB_192_LOS" => 0x20000,
|
|
312
|
+
"SUITEB_128_LOS" => 0x30000,
|
|
313
|
+
"PARTIAL_CHAIN" => 0x80000,
|
|
314
|
+
"NO_ALT_CHAINS" => 0x100000,
|
|
315
|
+
"NO_CHECK_TIME" => 0x200000
|
|
316
|
+
}.freeze
|
|
317
|
+
|
|
290
318
|
class Server
|
|
291
319
|
def initialize(socket, ctx)
|
|
292
320
|
@socket = socket
|
|
293
321
|
@ctx = ctx
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
# @!attribute [r] to_io
|
|
297
|
-
def to_io
|
|
298
|
-
@socket
|
|
322
|
+
@eng_ctx = IS_JRUBY ? @ctx : SSLContext.new(ctx)
|
|
299
323
|
end
|
|
300
324
|
|
|
301
325
|
def accept
|
|
302
326
|
@ctx.check
|
|
303
327
|
io = @socket.accept
|
|
304
|
-
engine = Engine.server @
|
|
305
|
-
|
|
328
|
+
engine = Engine.server @eng_ctx
|
|
306
329
|
Socket.new io, engine
|
|
307
330
|
end
|
|
308
331
|
|
|
309
332
|
def accept_nonblock
|
|
310
333
|
@ctx.check
|
|
311
334
|
io = @socket.accept_nonblock
|
|
312
|
-
engine = Engine.server @
|
|
313
|
-
|
|
335
|
+
engine = Engine.server @eng_ctx
|
|
314
336
|
Socket.new io, engine
|
|
315
337
|
end
|
|
316
338
|
|
|
339
|
+
# @!attribute [r] to_io
|
|
340
|
+
def to_io
|
|
341
|
+
@socket
|
|
342
|
+
end
|
|
343
|
+
|
|
317
344
|
# @!attribute [r] addr
|
|
318
345
|
# @version 5.0.0
|
|
319
346
|
def addr
|
|
@@ -323,6 +350,10 @@ module Puma
|
|
|
323
350
|
def close
|
|
324
351
|
@socket.close unless @socket.closed? # closed? call is for Windows
|
|
325
352
|
end
|
|
353
|
+
|
|
354
|
+
def closed?
|
|
355
|
+
@socket.closed?
|
|
356
|
+
end
|
|
326
357
|
end
|
|
327
358
|
end
|
|
328
359
|
end
|
|
@@ -62,6 +62,12 @@ module Puma
|
|
|
62
62
|
end
|
|
63
63
|
end
|
|
64
64
|
|
|
65
|
+
if params['verification_flags']
|
|
66
|
+
ctx.verification_flags = params['verification_flags'].split(',').
|
|
67
|
+
map { |flag| MiniSSL::VERIFICATION_FLAGS.fetch(flag) }.
|
|
68
|
+
inject { |sum, flag| sum ? sum | flag : flag }
|
|
69
|
+
end
|
|
70
|
+
|
|
65
71
|
ctx
|
|
66
72
|
end
|
|
67
73
|
|
data/lib/puma/null_io.rb
CHANGED
data/lib/puma/puma_http11.jar
CHANGED
|
Binary file
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
class ClosedQueueError < StandardError; end
|
|
2
|
+
module Puma
|
|
3
|
+
|
|
4
|
+
# Queue#close was added in Ruby 2.3.
|
|
5
|
+
# Add a simple implementation for earlier Ruby versions.
|
|
6
|
+
#
|
|
7
|
+
module QueueClose
|
|
8
|
+
def initialize
|
|
9
|
+
@closed = false
|
|
10
|
+
super
|
|
11
|
+
end
|
|
12
|
+
def close
|
|
13
|
+
@closed = true
|
|
14
|
+
end
|
|
15
|
+
def closed?
|
|
16
|
+
@closed
|
|
17
|
+
end
|
|
18
|
+
def push(object)
|
|
19
|
+
@closed ||= false
|
|
20
|
+
raise ClosedQueueError if @closed
|
|
21
|
+
super
|
|
22
|
+
end
|
|
23
|
+
alias << push
|
|
24
|
+
end
|
|
25
|
+
::Queue.prepend QueueClose
|
|
26
|
+
end
|
data/lib/puma/reactor.rb
CHANGED
|
@@ -1,394 +1,116 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
require 'puma/
|
|
4
|
-
require 'puma/minissl' if ::Puma::HAS_SSL
|
|
5
|
-
|
|
6
|
-
require 'nio'
|
|
3
|
+
require 'puma/queue_close' unless ::Queue.instance_methods.include? :close
|
|
7
4
|
|
|
8
5
|
module Puma
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
#
|
|
12
|
-
#
|
|
13
|
-
# If read buffering is not done, and no other read buffering is performed (such as by an application server
|
|
14
|
-
# such as nginx) then the application would be subject to a slow client attack.
|
|
15
|
-
#
|
|
16
|
-
# Each Puma "worker" process has its own Reactor. For example if you start puma with `$ puma -w 5` then
|
|
17
|
-
# it will have 5 workers and each worker will have it's own reactor.
|
|
18
|
-
#
|
|
19
|
-
# For a graphical representation of how the reactor works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
|
|
20
|
-
#
|
|
21
|
-
# ## Reactor Flow
|
|
22
|
-
#
|
|
23
|
-
# A connection comes into a `Puma::Server` instance, it is then passed to a `Puma::Reactor` instance,
|
|
24
|
-
# which stores it in an array and waits for any of the connections to be ready for reading.
|
|
6
|
+
class UnsupportedBackend < StandardError; end
|
|
7
|
+
|
|
8
|
+
# Monitors a collection of IO objects, calling a block whenever
|
|
9
|
+
# any monitored object either receives data or times out, or when the Reactor shuts down.
|
|
25
10
|
#
|
|
26
|
-
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev,
|
|
27
|
-
# just plain IO#select). The call to `NIO::Selector#select` will
|
|
28
|
-
#
|
|
29
|
-
# then loops through each of these request objects, and sees if they're complete. If they
|
|
30
|
-
# have a full header and body then the reactor passes the request to a thread pool.
|
|
31
|
-
# Once in a thread pool, a "worker thread" can run the the application's Ruby code against the request.
|
|
11
|
+
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev,
|
|
12
|
+
# Java NIO or just plain IO#select). The call to `NIO::Selector#select` will
|
|
13
|
+
# 'wakeup' any IO object that receives data.
|
|
32
14
|
#
|
|
33
|
-
#
|
|
34
|
-
#
|
|
15
|
+
# This class additionally tracks a timeout for every added object,
|
|
16
|
+
# and wakes up any object when its timeout elapses.
|
|
35
17
|
#
|
|
36
|
-
#
|
|
37
|
-
# of this logic lives.
|
|
18
|
+
# The implementation uses a Queue to synchronize adding new objects from the internal select loop.
|
|
38
19
|
class Reactor
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
#
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
# it will be passed to the `app_pool`.
|
|
50
|
-
def initialize(server, app_pool)
|
|
51
|
-
@server = server
|
|
52
|
-
@events = server.events
|
|
53
|
-
@app_pool = app_pool
|
|
54
|
-
|
|
55
|
-
@selector = NIO::Selector.new
|
|
56
|
-
|
|
57
|
-
@mutex = Mutex.new
|
|
58
|
-
|
|
59
|
-
# Read / Write pipes to wake up internal while loop
|
|
60
|
-
@ready, @trigger = Puma::Util.pipe
|
|
61
|
-
@input = []
|
|
62
|
-
@sleep_for = DefaultSleepFor
|
|
20
|
+
# Create a new Reactor to monitor IO objects added by #add.
|
|
21
|
+
# The provided block will be invoked when an IO has data available to read,
|
|
22
|
+
# its timeout elapses, or when the Reactor shuts down.
|
|
23
|
+
def initialize(backend, &block)
|
|
24
|
+
require 'nio'
|
|
25
|
+
unless backend == :auto || NIO::Selector.backends.include?(backend)
|
|
26
|
+
raise "unsupported IO selector backend: #{backend} (available backends: #{NIO::Selector.backends.join(', ')})"
|
|
27
|
+
end
|
|
28
|
+
@selector = backend == :auto ? NIO::Selector.new : NIO::Selector.new(backend)
|
|
29
|
+
@input = Queue.new
|
|
63
30
|
@timeouts = []
|
|
64
|
-
|
|
65
|
-
mon = @selector.register(@ready, :r)
|
|
66
|
-
mon.value = @ready
|
|
67
|
-
|
|
68
|
-
@monitors = [mon]
|
|
31
|
+
@block = block
|
|
69
32
|
end
|
|
70
33
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
# will break on `NIO::Selector#select` and return an array.
|
|
78
|
-
#
|
|
79
|
-
# ## When a request is added:
|
|
80
|
-
#
|
|
81
|
-
# When the `add` method is called, an instance of `Puma::Client` is added to the `@input` array.
|
|
82
|
-
# Next the `@ready` pipe is "woken" by writing a string of `"*"` to `@trigger`.
|
|
83
|
-
#
|
|
84
|
-
# When that happens, the internal loop stops blocking at `NIO::Selector#select` and returns a reference
|
|
85
|
-
# to whatever "woke" it up. On the very first loop, the only thing in `sockets` is `@ready`.
|
|
86
|
-
# When `@trigger` is written-to, the loop "wakes" and the `ready`
|
|
87
|
-
# variable returns an array of arrays that looks like `[[#<IO:fd 10>], [], []]` where the
|
|
88
|
-
# first IO object is the `@ready` object. This first array `[#<IO:fd 10>]`
|
|
89
|
-
# is saved as a `reads` variable.
|
|
90
|
-
#
|
|
91
|
-
# The `reads` variable is iterated through. In the case that the object
|
|
92
|
-
# is the same as the `@ready` input pipe, then we know that there was a `trigger` event.
|
|
93
|
-
#
|
|
94
|
-
# If there was a trigger event, then one byte of `@ready` is read into memory. In the case of the first request,
|
|
95
|
-
# the reactor sees that it's a `"*"` value and the reactor adds the contents of `@input` into the `sockets` array.
|
|
96
|
-
# The while then loop continues to iterate again, but now the `sockets` array contains a `Puma::Client` instance in addition
|
|
97
|
-
# to the `@ready` IO object. For example: `[#<IO:fd 10>, #<Puma::Client:0x3fdc1103bee8 @ready=false>]`.
|
|
98
|
-
#
|
|
99
|
-
# Since the `Puma::Client` in this example has data that has not been read yet,
|
|
100
|
-
# the `NIO::Selector#select` is immediately able to "wake" and read from the `Puma::Client`. At this point the
|
|
101
|
-
# `ready` output looks like this: `[[#<Puma::Client:0x3fdc1103bee8 @ready=false>], [], []]`.
|
|
102
|
-
#
|
|
103
|
-
# Each element in the first entry is iterated over. The `Puma::Client` object is not
|
|
104
|
-
# the `@ready` pipe, so the reactor checks to see if it has the full header and body with
|
|
105
|
-
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
|
106
|
-
# then the request is passed off to the `@app_pool` thread pool so that a "worker thread"
|
|
107
|
-
# can pick up the request and begin to execute application logic. This is done
|
|
108
|
-
# via `@app_pool << c`. The `Puma::Client` is then removed from the `sockets` array.
|
|
109
|
-
#
|
|
110
|
-
# If the request body is not present then nothing will happen, and the loop will iterate
|
|
111
|
-
# again. When the client sends more data to the socket the `Puma::Client` object will
|
|
112
|
-
# wake up the `NIO::Selector#select` and it can again be checked to see if it's ready to be
|
|
113
|
-
# passed to the thread pool.
|
|
114
|
-
#
|
|
115
|
-
# ## Time Out Case
|
|
116
|
-
#
|
|
117
|
-
# In addition to being woken via a write to one of the sockets the `NIO::Selector#select` will
|
|
118
|
-
# periodically "time out" of the sleep. One of the functions of this is to check for
|
|
119
|
-
# any requests that have "timed out". At the end of the loop it's checked to see if
|
|
120
|
-
# the first element in the `@timeout` array has exceed its allowed time. If so,
|
|
121
|
-
# the client object is removed from the timeout array, a 408 response is written.
|
|
122
|
-
# Then its connection is closed, and the object is removed from the `sockets` array
|
|
123
|
-
# that watches for new data.
|
|
124
|
-
#
|
|
125
|
-
# This behavior loops until all the objects that have timed out have been removed.
|
|
126
|
-
#
|
|
127
|
-
# Once all the timeouts have been processed, the next duration of the `NIO::Selector#select` sleep
|
|
128
|
-
# will be set to be equal to the amount of time it will take for the next timeout to occur.
|
|
129
|
-
# This calculation happens in `calculate_sleep`.
|
|
130
|
-
def run_internal
|
|
131
|
-
monitors = @monitors
|
|
132
|
-
selector = @selector
|
|
133
|
-
|
|
134
|
-
while true
|
|
135
|
-
begin
|
|
136
|
-
ready = selector.select @sleep_for
|
|
137
|
-
rescue IOError => e
|
|
138
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
|
139
|
-
if monitors.any? { |mon| mon.value.closed? }
|
|
140
|
-
STDERR.puts "Error in select: #{e.message} (#{e.class})"
|
|
141
|
-
STDERR.puts e.backtrace
|
|
142
|
-
|
|
143
|
-
monitors.reject! do |mon|
|
|
144
|
-
if mon.value.closed?
|
|
145
|
-
selector.deregister mon.value
|
|
146
|
-
true
|
|
147
|
-
end
|
|
148
|
-
end
|
|
149
|
-
|
|
150
|
-
retry
|
|
151
|
-
else
|
|
152
|
-
raise
|
|
153
|
-
end
|
|
154
|
-
end
|
|
155
|
-
|
|
156
|
-
if ready
|
|
157
|
-
ready.each do |mon|
|
|
158
|
-
if mon.value == @ready
|
|
159
|
-
@mutex.synchronize do
|
|
160
|
-
case @ready.read(1)
|
|
161
|
-
when "*"
|
|
162
|
-
@input.each do |c|
|
|
163
|
-
mon = nil
|
|
164
|
-
begin
|
|
165
|
-
begin
|
|
166
|
-
mon = selector.register(c, :r)
|
|
167
|
-
rescue ArgumentError
|
|
168
|
-
# There is a bug where we seem to be registering an already registered
|
|
169
|
-
# client. This code deals with this situation but I wish we didn't have to.
|
|
170
|
-
monitors.delete_if { |submon| submon.value.to_io == c.to_io }
|
|
171
|
-
selector.deregister(c)
|
|
172
|
-
mon = selector.register(c, :r)
|
|
173
|
-
end
|
|
174
|
-
rescue IOError
|
|
175
|
-
# Means that the io is closed, so we should ignore this request
|
|
176
|
-
# entirely
|
|
177
|
-
else
|
|
178
|
-
mon.value = c
|
|
179
|
-
@timeouts << mon if c.timeout_at
|
|
180
|
-
monitors << mon
|
|
181
|
-
end
|
|
182
|
-
end
|
|
183
|
-
@input.clear
|
|
184
|
-
|
|
185
|
-
@timeouts.sort! { |a,b| a.value.timeout_at <=> b.value.timeout_at }
|
|
186
|
-
calculate_sleep
|
|
187
|
-
when "c"
|
|
188
|
-
monitors.reject! do |submon|
|
|
189
|
-
if submon.value == @ready
|
|
190
|
-
false
|
|
191
|
-
else
|
|
192
|
-
if submon.value.can_close?
|
|
193
|
-
submon.value.close
|
|
194
|
-
else
|
|
195
|
-
# Pass remaining open client connections to the thread pool.
|
|
196
|
-
@app_pool << submon.value
|
|
197
|
-
end
|
|
198
|
-
begin
|
|
199
|
-
selector.deregister submon.value
|
|
200
|
-
rescue IOError
|
|
201
|
-
# nio4r on jruby seems to throw an IOError here if the IO is closed, so
|
|
202
|
-
# we need to swallow it.
|
|
203
|
-
end
|
|
204
|
-
true
|
|
205
|
-
end
|
|
206
|
-
end
|
|
207
|
-
when "!"
|
|
208
|
-
return
|
|
209
|
-
end
|
|
210
|
-
end
|
|
211
|
-
else
|
|
212
|
-
c = mon.value
|
|
213
|
-
|
|
214
|
-
# We have to be sure to remove it from the timeout
|
|
215
|
-
# list or we'll accidentally close the socket when
|
|
216
|
-
# it's in use!
|
|
217
|
-
if c.timeout_at
|
|
218
|
-
@mutex.synchronize do
|
|
219
|
-
@timeouts.delete mon
|
|
220
|
-
end
|
|
221
|
-
end
|
|
222
|
-
|
|
223
|
-
begin
|
|
224
|
-
if c.try_to_finish
|
|
225
|
-
@app_pool << c
|
|
226
|
-
clear_monitor mon
|
|
227
|
-
end
|
|
228
|
-
|
|
229
|
-
# Don't report these to the lowlevel_error handler, otherwise
|
|
230
|
-
# will be flooding them with errors when persistent connections
|
|
231
|
-
# are closed.
|
|
232
|
-
rescue ConnectionError
|
|
233
|
-
c.write_error(500)
|
|
234
|
-
c.close
|
|
235
|
-
|
|
236
|
-
clear_monitor mon
|
|
237
|
-
|
|
238
|
-
# SSL handshake failure
|
|
239
|
-
rescue MiniSSL::SSLError => e
|
|
240
|
-
@server.lowlevel_error e, c.env
|
|
241
|
-
@events.ssl_error e, c.io
|
|
242
|
-
|
|
243
|
-
c.close
|
|
244
|
-
clear_monitor mon
|
|
245
|
-
|
|
246
|
-
# The client doesn't know HTTP well
|
|
247
|
-
rescue HttpParserError => e
|
|
248
|
-
@server.lowlevel_error(e, c.env)
|
|
249
|
-
|
|
250
|
-
c.write_error(400)
|
|
251
|
-
c.close
|
|
252
|
-
|
|
253
|
-
clear_monitor mon
|
|
254
|
-
|
|
255
|
-
@events.parse_error e, c
|
|
256
|
-
rescue StandardError => e
|
|
257
|
-
@server.lowlevel_error(e, c.env)
|
|
258
|
-
|
|
259
|
-
c.write_error(500)
|
|
260
|
-
c.close
|
|
261
|
-
|
|
262
|
-
clear_monitor mon
|
|
263
|
-
end
|
|
264
|
-
end
|
|
265
|
-
end
|
|
266
|
-
end
|
|
267
|
-
|
|
268
|
-
unless @timeouts.empty?
|
|
269
|
-
@mutex.synchronize do
|
|
270
|
-
now = Time.now
|
|
271
|
-
|
|
272
|
-
while @timeouts.first.value.timeout_at < now
|
|
273
|
-
mon = @timeouts.shift
|
|
274
|
-
c = mon.value
|
|
275
|
-
c.write_error(408) if c.in_data_phase
|
|
276
|
-
c.close
|
|
277
|
-
|
|
278
|
-
clear_monitor mon
|
|
279
|
-
|
|
280
|
-
break if @timeouts.empty?
|
|
281
|
-
end
|
|
282
|
-
|
|
283
|
-
calculate_sleep
|
|
284
|
-
end
|
|
34
|
+
# Run the internal select loop, using a background thread by default.
|
|
35
|
+
def run(background=true)
|
|
36
|
+
if background
|
|
37
|
+
@thread = Thread.new do
|
|
38
|
+
Puma.set_thread_name "reactor"
|
|
39
|
+
select_loop
|
|
285
40
|
end
|
|
41
|
+
else
|
|
42
|
+
select_loop
|
|
286
43
|
end
|
|
287
44
|
end
|
|
288
45
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
ensure
|
|
299
|
-
@trigger.close
|
|
300
|
-
@ready.close
|
|
46
|
+
# Add a new client to monitor.
|
|
47
|
+
# The object must respond to #timeout and #timeout_at.
|
|
48
|
+
# Returns false if the reactor is already shut down.
|
|
49
|
+
def add(client)
|
|
50
|
+
@input << client
|
|
51
|
+
@selector.wakeup
|
|
52
|
+
true
|
|
53
|
+
rescue ClosedQueueError
|
|
54
|
+
false
|
|
301
55
|
end
|
|
302
56
|
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
|
310
|
-
STDERR.puts e.backtrace
|
|
311
|
-
retry
|
|
312
|
-
ensure
|
|
313
|
-
@trigger.close
|
|
314
|
-
@ready.close
|
|
315
|
-
end
|
|
57
|
+
# Shutdown the reactor, blocking until the background thread is finished.
|
|
58
|
+
def shutdown
|
|
59
|
+
@input.close
|
|
60
|
+
begin
|
|
61
|
+
@selector.wakeup
|
|
62
|
+
rescue IOError # Ignore if selector is already closed
|
|
316
63
|
end
|
|
64
|
+
@thread.join if @thread
|
|
317
65
|
end
|
|
318
66
|
|
|
319
|
-
|
|
320
|
-
# sleep for in the main reactor loop when no sockets are being written to.
|
|
321
|
-
#
|
|
322
|
-
# The values kept in `@timeouts` are sorted so that the first timeout
|
|
323
|
-
# comes first in the array. When there are no timeouts the default timeout is used.
|
|
324
|
-
#
|
|
325
|
-
# Otherwise a sleep value is set that is the same as the amount of time it
|
|
326
|
-
# would take for the first element to time out.
|
|
327
|
-
#
|
|
328
|
-
# If that value is in the past, then a sleep value of zero is used.
|
|
329
|
-
def calculate_sleep
|
|
330
|
-
if @timeouts.empty?
|
|
331
|
-
@sleep_for = DefaultSleepFor
|
|
332
|
-
else
|
|
333
|
-
diff = @timeouts.first.value.timeout_at.to_f - Time.now.to_f
|
|
67
|
+
private
|
|
334
68
|
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
69
|
+
def select_loop
|
|
70
|
+
begin
|
|
71
|
+
until @input.closed? && @input.empty?
|
|
72
|
+
# Wakeup any registered object that receives incoming data.
|
|
73
|
+
# Block until the earliest timeout or Selector#wakeup is called.
|
|
74
|
+
timeout = (earliest = @timeouts.first) && earliest.timeout
|
|
75
|
+
@selector.select(timeout) {|mon| wakeup!(mon.value)}
|
|
76
|
+
|
|
77
|
+
# Wakeup all objects that timed out.
|
|
78
|
+
timed_out = @timeouts.take_while {|t| t.timeout == 0}
|
|
79
|
+
timed_out.each(&method(:wakeup!))
|
|
80
|
+
|
|
81
|
+
unless @input.empty?
|
|
82
|
+
until @input.empty?
|
|
83
|
+
client = @input.pop
|
|
84
|
+
register(client) if client.io_ok?
|
|
85
|
+
end
|
|
86
|
+
@timeouts.sort_by!(&:timeout_at)
|
|
87
|
+
end
|
|
339
88
|
end
|
|
89
|
+
rescue StandardError => e
|
|
90
|
+
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
|
91
|
+
STDERR.puts e.backtrace
|
|
92
|
+
retry
|
|
340
93
|
end
|
|
94
|
+
# Wakeup all remaining objects on shutdown.
|
|
95
|
+
@timeouts.each(&@block)
|
|
96
|
+
@selector.close
|
|
341
97
|
end
|
|
342
98
|
|
|
343
|
-
#
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
# The main body of the reactor loop is in `run_internal` and it
|
|
350
|
-
# will sleep on `NIO::Selector#select`. When a new connection is added to the
|
|
351
|
-
# reactor it cannot be added directly to the `sockets` array, because
|
|
352
|
-
# the `NIO::Selector#select` will not be watching for it yet.
|
|
353
|
-
#
|
|
354
|
-
# Instead what needs to happen is that `NIO::Selector#select` needs to be woken up,
|
|
355
|
-
# the contents of `@input` added to the `sockets` array, and then
|
|
356
|
-
# another call to `NIO::Selector#select` needs to happen. Since the `Puma::Client`
|
|
357
|
-
# object can be read immediately, it does not block, but instead returns
|
|
358
|
-
# right away.
|
|
359
|
-
#
|
|
360
|
-
# This behavior is accomplished by writing to `@trigger` which wakes up
|
|
361
|
-
# the `NIO::Selector#select` and then there is logic to detect the value of `*`,
|
|
362
|
-
# pull the contents from `@input` and add them to the sockets array.
|
|
363
|
-
#
|
|
364
|
-
# If the object passed in has a timeout value in `timeout_at` then
|
|
365
|
-
# it is added to a `@timeouts` array. This array is then re-arranged
|
|
366
|
-
# so that the first element to timeout will be at the front of the
|
|
367
|
-
# array. Then a value to sleep for is derived in the call to `calculate_sleep`
|
|
368
|
-
def add(c)
|
|
369
|
-
@mutex.synchronize do
|
|
370
|
-
@input << c
|
|
371
|
-
@trigger << "*"
|
|
372
|
-
end
|
|
373
|
-
end
|
|
374
|
-
|
|
375
|
-
# Close all watched sockets and clear them from being watched
|
|
376
|
-
def clear!
|
|
377
|
-
begin
|
|
378
|
-
@trigger << "c"
|
|
379
|
-
rescue IOError
|
|
380
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
|
381
|
-
end
|
|
99
|
+
# Start monitoring the object.
|
|
100
|
+
def register(client)
|
|
101
|
+
@selector.register(client.to_io, :r).value = client
|
|
102
|
+
@timeouts << client
|
|
103
|
+
rescue ArgumentError
|
|
104
|
+
# unreadable clients raise error when processed by NIO
|
|
382
105
|
end
|
|
383
106
|
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
107
|
+
# 'Wake up' a monitored object by calling the provided block.
|
|
108
|
+
# Stop monitoring the object if the block returns `true`.
|
|
109
|
+
def wakeup!(client)
|
|
110
|
+
if @block.call client
|
|
111
|
+
@selector.deregister client.to_io
|
|
112
|
+
@timeouts.delete client
|
|
389
113
|
end
|
|
390
|
-
|
|
391
|
-
@thread.join
|
|
392
114
|
end
|
|
393
115
|
end
|
|
394
116
|
end
|