puma 5.0.0-java → 5.1.0-java
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +1190 -574
- data/README.md +28 -20
- data/bin/puma-wild +3 -9
- data/docs/compile_options.md +19 -0
- data/docs/deployment.md +5 -6
- data/docs/fork_worker.md +2 -0
- data/docs/jungle/README.md +0 -4
- data/docs/jungle/rc.d/puma +2 -2
- data/docs/nginx.md +1 -1
- data/docs/restart.md +46 -23
- data/docs/systemd.md +25 -3
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +4 -5
- data/ext/puma_http11/http11_parser.c +64 -64
- data/ext/puma_http11/mini_ssl.c +39 -37
- data/ext/puma_http11/puma_http11.c +25 -12
- data/lib/puma.rb +7 -4
- data/lib/puma/app/status.rb +44 -46
- data/lib/puma/binder.rb +48 -1
- data/lib/puma/cli.rb +4 -0
- data/lib/puma/client.rb +31 -80
- data/lib/puma/cluster.rb +39 -202
- data/lib/puma/cluster/worker.rb +176 -0
- data/lib/puma/cluster/worker_handle.rb +86 -0
- data/lib/puma/configuration.rb +20 -8
- data/lib/puma/const.rb +11 -3
- data/lib/puma/control_cli.rb +71 -70
- data/lib/puma/dsl.rb +67 -19
- data/lib/puma/error_logger.rb +2 -2
- data/lib/puma/events.rb +21 -3
- data/lib/puma/json.rb +96 -0
- data/lib/puma/launcher.rb +61 -12
- data/lib/puma/minissl.rb +8 -0
- data/lib/puma/puma_http11.jar +0 -0
- data/lib/puma/queue_close.rb +26 -0
- data/lib/puma/reactor.rb +79 -373
- data/lib/puma/request.rb +451 -0
- data/lib/puma/runner.rb +15 -21
- data/lib/puma/server.rb +193 -508
- data/lib/puma/single.rb +3 -2
- data/lib/puma/state_file.rb +5 -3
- data/lib/puma/systemd.rb +46 -0
- data/lib/puma/thread_pool.rb +22 -2
- data/lib/puma/util.rb +12 -0
- metadata +9 -6
- data/docs/jungle/upstart/README.md +0 -61
- data/docs/jungle/upstart/puma-manager.conf +0 -31
- data/docs/jungle/upstart/puma.conf +0 -69
- data/lib/puma/accept_nonblock.rb +0 -29
data/lib/puma/minissl.rb
CHANGED
@@ -24,6 +24,7 @@ module Puma
|
|
24
24
|
@peercert = nil
|
25
25
|
end
|
26
26
|
|
27
|
+
# @!attribute [r] to_io
|
27
28
|
def to_io
|
28
29
|
@socket
|
29
30
|
end
|
@@ -38,6 +39,7 @@ module Puma
|
|
38
39
|
#
|
39
40
|
# Used for dropping tcp connections to ssl.
|
40
41
|
# See OpenSSL ssl/ssl_stat.c SSL_state_string for info
|
42
|
+
# @!attribute [r] ssl_version_state
|
41
43
|
# @version 5.0.0
|
42
44
|
#
|
43
45
|
def ssl_version_state
|
@@ -188,10 +190,12 @@ module Puma
|
|
188
190
|
end
|
189
191
|
end
|
190
192
|
|
193
|
+
# @!attribute [r] peeraddr
|
191
194
|
def peeraddr
|
192
195
|
@socket.peeraddr
|
193
196
|
end
|
194
197
|
|
198
|
+
# @!attribute [r] peercert
|
195
199
|
def peercert
|
196
200
|
return @peercert if @peercert
|
197
201
|
|
@@ -264,12 +268,14 @@ module Puma
|
|
264
268
|
end
|
265
269
|
|
266
270
|
# disables TLSv1
|
271
|
+
# @!attribute [w] no_tlsv1=
|
267
272
|
def no_tlsv1=(tlsv1)
|
268
273
|
raise ArgumentError, "Invalid value of no_tlsv1=" unless ['true', 'false', true, false].include?(tlsv1)
|
269
274
|
@no_tlsv1 = tlsv1
|
270
275
|
end
|
271
276
|
|
272
277
|
# disables TLSv1 and TLSv1.1. Overrides `#no_tlsv1=`
|
278
|
+
# @!attribute [w] no_tlsv1_1=
|
273
279
|
def no_tlsv1_1=(tlsv1_1)
|
274
280
|
raise ArgumentError, "Invalid value of no_tlsv1_1=" unless ['true', 'false', true, false].include?(tlsv1_1)
|
275
281
|
@no_tlsv1_1 = tlsv1_1
|
@@ -287,6 +293,7 @@ module Puma
|
|
287
293
|
@ctx = ctx
|
288
294
|
end
|
289
295
|
|
296
|
+
# @!attribute [r] to_io
|
290
297
|
def to_io
|
291
298
|
@socket
|
292
299
|
end
|
@@ -307,6 +314,7 @@ module Puma
|
|
307
314
|
Socket.new io, engine
|
308
315
|
end
|
309
316
|
|
317
|
+
# @!attribute [r] addr
|
310
318
|
# @version 5.0.0
|
311
319
|
def addr
|
312
320
|
@socket.addr
|
data/lib/puma/puma_http11.jar
CHANGED
Binary file
|
@@ -0,0 +1,26 @@
|
|
1
|
+
class ClosedQueueError < StandardError; end
|
2
|
+
module Puma
|
3
|
+
|
4
|
+
# Queue#close was added in Ruby 2.3.
|
5
|
+
# Add a simple implementation for earlier Ruby versions.
|
6
|
+
#
|
7
|
+
module QueueClose
|
8
|
+
def initialize
|
9
|
+
@closed = false
|
10
|
+
super
|
11
|
+
end
|
12
|
+
def close
|
13
|
+
@closed = true
|
14
|
+
end
|
15
|
+
def closed?
|
16
|
+
@closed
|
17
|
+
end
|
18
|
+
def push(object)
|
19
|
+
@closed ||= false
|
20
|
+
raise ClosedQueueError if @closed
|
21
|
+
super
|
22
|
+
end
|
23
|
+
alias << push
|
24
|
+
end
|
25
|
+
::Queue.prepend QueueClose
|
26
|
+
end
|
data/lib/puma/reactor.rb
CHANGED
@@ -1,405 +1,111 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require 'puma/
|
4
|
-
require 'puma/minissl' if ::Puma::HAS_SSL
|
5
|
-
|
6
|
-
require 'nio'
|
3
|
+
require 'puma/queue_close' unless ::Queue.instance_methods.include? :close
|
7
4
|
|
8
5
|
module Puma
|
9
|
-
#
|
10
|
-
#
|
11
|
-
# The Reactor object is responsible for ensuring that a request has been
|
12
|
-
# completely received before it starts to be processed. This may be known as read buffering.
|
13
|
-
# If read buffering is not done, and no other read buffering is performed (such as by an application server
|
14
|
-
# such as nginx) then the application would be subject to a slow client attack.
|
15
|
-
#
|
16
|
-
# Each Puma "worker" process has its own Reactor. For example if you start puma with `$ puma -w 5` then
|
17
|
-
# it will have 5 workers and each worker will have it's own reactor.
|
18
|
-
#
|
19
|
-
# For a graphical representation of how the reactor works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
|
20
|
-
#
|
21
|
-
# ## Reactor Flow
|
6
|
+
# Monitors a collection of IO objects, calling a block whenever
|
7
|
+
# any monitored object either receives data or times out, or when the Reactor shuts down.
|
22
8
|
#
|
23
|
-
#
|
24
|
-
#
|
9
|
+
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev,
|
10
|
+
# Java NIO or just plain IO#select). The call to `NIO::Selector#select` will
|
11
|
+
# 'wakeup' any IO object that receives data.
|
25
12
|
#
|
26
|
-
#
|
27
|
-
#
|
28
|
-
# return the references to any objects that caused it to "wake". The reactor
|
29
|
-
# then loops through each of these request objects, and sees if they're complete. If they
|
30
|
-
# have a full header and body then the reactor passes the request to a thread pool.
|
31
|
-
# Once in a thread pool, a "worker thread" can run the the application's Ruby code against the request.
|
13
|
+
# This class additionally tracks a timeout for every added object,
|
14
|
+
# and wakes up any object when its timeout elapses.
|
32
15
|
#
|
33
|
-
#
|
34
|
-
# data is written to that socket reference, then the loop is woken up and it is checked for completeness again.
|
35
|
-
#
|
36
|
-
# A detailed example is given in the docs for `run_internal` which is where the bulk
|
37
|
-
# of this logic lives.
|
16
|
+
# The implementation uses a Queue to synchronize adding new objects from the internal select loop.
|
38
17
|
class Reactor
|
39
|
-
|
40
|
-
|
41
|
-
#
|
42
|
-
|
43
|
-
|
44
|
-
# that is used to write a response for "low level errors"
|
45
|
-
# when there is an exception inside of the reactor.
|
46
|
-
#
|
47
|
-
# The `app_pool` is an instance of `Puma::ThreadPool`.
|
48
|
-
# Once a request is fully formed (header and body are received)
|
49
|
-
# it will be passed to the `app_pool`.
|
50
|
-
def initialize(server, app_pool)
|
51
|
-
@server = server
|
52
|
-
@events = server.events
|
53
|
-
@app_pool = app_pool
|
54
|
-
|
18
|
+
# Create a new Reactor to monitor IO objects added by #add.
|
19
|
+
# The provided block will be invoked when an IO has data available to read,
|
20
|
+
# its timeout elapses, or when the Reactor shuts down.
|
21
|
+
def initialize(&block)
|
22
|
+
require 'nio'
|
55
23
|
@selector = NIO::Selector.new
|
56
|
-
|
57
|
-
@mutex = Mutex.new
|
58
|
-
|
59
|
-
# Read / Write pipes to wake up internal while loop
|
60
|
-
@ready, @trigger = Puma::Util.pipe
|
61
|
-
@input = []
|
62
|
-
@sleep_for = DefaultSleepFor
|
24
|
+
@input = Queue.new
|
63
25
|
@timeouts = []
|
64
|
-
|
65
|
-
mon = @selector.register(@ready, :r)
|
66
|
-
mon.value = @ready
|
67
|
-
|
68
|
-
@monitors = [mon]
|
26
|
+
@block = block
|
69
27
|
end
|
70
28
|
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
# will break on `NIO::Selector#select` and return an array.
|
78
|
-
#
|
79
|
-
# ## When a request is added:
|
80
|
-
#
|
81
|
-
# When the `add` method is called, an instance of `Puma::Client` is added to the `@input` array.
|
82
|
-
# Next the `@ready` pipe is "woken" by writing a string of `"*"` to `@trigger`.
|
83
|
-
#
|
84
|
-
# When that happens, the internal loop stops blocking at `NIO::Selector#select` and returns a reference
|
85
|
-
# to whatever "woke" it up. On the very first loop, the only thing in `sockets` is `@ready`.
|
86
|
-
# When `@trigger` is written-to, the loop "wakes" and the `ready`
|
87
|
-
# variable returns an array of arrays that looks like `[[#<IO:fd 10>], [], []]` where the
|
88
|
-
# first IO object is the `@ready` object. This first array `[#<IO:fd 10>]`
|
89
|
-
# is saved as a `reads` variable.
|
90
|
-
#
|
91
|
-
# The `reads` variable is iterated through. In the case that the object
|
92
|
-
# is the same as the `@ready` input pipe, then we know that there was a `trigger` event.
|
93
|
-
#
|
94
|
-
# If there was a trigger event, then one byte of `@ready` is read into memory. In the case of the first request,
|
95
|
-
# the reactor sees that it's a `"*"` value and the reactor adds the contents of `@input` into the `sockets` array.
|
96
|
-
# The while then loop continues to iterate again, but now the `sockets` array contains a `Puma::Client` instance in addition
|
97
|
-
# to the `@ready` IO object. For example: `[#<IO:fd 10>, #<Puma::Client:0x3fdc1103bee8 @ready=false>]`.
|
98
|
-
#
|
99
|
-
# Since the `Puma::Client` in this example has data that has not been read yet,
|
100
|
-
# the `NIO::Selector#select` is immediately able to "wake" and read from the `Puma::Client`. At this point the
|
101
|
-
# `ready` output looks like this: `[[#<Puma::Client:0x3fdc1103bee8 @ready=false>], [], []]`.
|
102
|
-
#
|
103
|
-
# Each element in the first entry is iterated over. The `Puma::Client` object is not
|
104
|
-
# the `@ready` pipe, so the reactor checks to see if it has the full header and body with
|
105
|
-
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
106
|
-
# then the request is passed off to the `@app_pool` thread pool so that a "worker thread"
|
107
|
-
# can pick up the request and begin to execute application logic. This is done
|
108
|
-
# via `@app_pool << c`. The `Puma::Client` is then removed from the `sockets` array.
|
109
|
-
#
|
110
|
-
# If the request body is not present then nothing will happen, and the loop will iterate
|
111
|
-
# again. When the client sends more data to the socket the `Puma::Client` object will
|
112
|
-
# wake up the `NIO::Selector#select` and it can again be checked to see if it's ready to be
|
113
|
-
# passed to the thread pool.
|
114
|
-
#
|
115
|
-
# ## Time Out Case
|
116
|
-
#
|
117
|
-
# In addition to being woken via a write to one of the sockets the `NIO::Selector#select` will
|
118
|
-
# periodically "time out" of the sleep. One of the functions of this is to check for
|
119
|
-
# any requests that have "timed out". At the end of the loop it's checked to see if
|
120
|
-
# the first element in the `@timeout` array has exceed its allowed time. If so,
|
121
|
-
# the client object is removed from the timeout array, a 408 response is written.
|
122
|
-
# Then its connection is closed, and the object is removed from the `sockets` array
|
123
|
-
# that watches for new data.
|
124
|
-
#
|
125
|
-
# This behavior loops until all the objects that have timed out have been removed.
|
126
|
-
#
|
127
|
-
# Once all the timeouts have been processed, the next duration of the `NIO::Selector#select` sleep
|
128
|
-
# will be set to be equal to the amount of time it will take for the next timeout to occur.
|
129
|
-
# This calculation happens in `calculate_sleep`.
|
130
|
-
def run_internal
|
131
|
-
monitors = @monitors
|
132
|
-
selector = @selector
|
133
|
-
|
134
|
-
while true
|
135
|
-
begin
|
136
|
-
ready = selector.select @sleep_for
|
137
|
-
rescue IOError => e
|
138
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
139
|
-
if monitors.any? { |mon| mon.value.closed? }
|
140
|
-
STDERR.puts "Error in select: #{e.message} (#{e.class})"
|
141
|
-
STDERR.puts e.backtrace
|
142
|
-
|
143
|
-
monitors.reject! do |mon|
|
144
|
-
if mon.value.closed?
|
145
|
-
selector.deregister mon.value
|
146
|
-
true
|
147
|
-
end
|
148
|
-
end
|
149
|
-
|
150
|
-
retry
|
151
|
-
else
|
152
|
-
raise
|
153
|
-
end
|
154
|
-
end
|
155
|
-
|
156
|
-
if ready
|
157
|
-
ready.each do |mon|
|
158
|
-
if mon.value == @ready
|
159
|
-
@mutex.synchronize do
|
160
|
-
case @ready.read(1)
|
161
|
-
when "*"
|
162
|
-
@input.each do |c|
|
163
|
-
mon = nil
|
164
|
-
begin
|
165
|
-
begin
|
166
|
-
mon = selector.register(c, :r)
|
167
|
-
rescue ArgumentError
|
168
|
-
# There is a bug where we seem to be registering an already registered
|
169
|
-
# client. This code deals with this situation but I wish we didn't have to.
|
170
|
-
monitors.delete_if { |submon| submon.value.to_io == c.to_io }
|
171
|
-
selector.deregister(c)
|
172
|
-
mon = selector.register(c, :r)
|
173
|
-
end
|
174
|
-
rescue IOError
|
175
|
-
# Means that the io is closed, so we should ignore this request
|
176
|
-
# entirely
|
177
|
-
else
|
178
|
-
mon.value = c
|
179
|
-
@timeouts << mon if c.timeout_at
|
180
|
-
monitors << mon
|
181
|
-
end
|
182
|
-
end
|
183
|
-
@input.clear
|
184
|
-
|
185
|
-
@timeouts.sort! { |a,b| a.value.timeout_at <=> b.value.timeout_at }
|
186
|
-
calculate_sleep
|
187
|
-
when "c"
|
188
|
-
monitors.reject! do |submon|
|
189
|
-
if submon.value == @ready
|
190
|
-
false
|
191
|
-
else
|
192
|
-
if submon.value.can_close?
|
193
|
-
submon.value.close
|
194
|
-
else
|
195
|
-
# Pass remaining open client connections to the thread pool.
|
196
|
-
@app_pool << submon.value
|
197
|
-
end
|
198
|
-
begin
|
199
|
-
selector.deregister submon.value
|
200
|
-
rescue IOError
|
201
|
-
# nio4r on jruby seems to throw an IOError here if the IO is closed, so
|
202
|
-
# we need to swallow it.
|
203
|
-
end
|
204
|
-
true
|
205
|
-
end
|
206
|
-
end
|
207
|
-
when "!"
|
208
|
-
return
|
209
|
-
end
|
210
|
-
end
|
211
|
-
else
|
212
|
-
c = mon.value
|
213
|
-
|
214
|
-
# We have to be sure to remove it from the timeout
|
215
|
-
# list or we'll accidentally close the socket when
|
216
|
-
# it's in use!
|
217
|
-
if c.timeout_at
|
218
|
-
@mutex.synchronize do
|
219
|
-
@timeouts.delete mon
|
220
|
-
end
|
221
|
-
end
|
222
|
-
|
223
|
-
begin
|
224
|
-
if c.try_to_finish
|
225
|
-
@app_pool << c
|
226
|
-
clear_monitor mon
|
227
|
-
end
|
228
|
-
|
229
|
-
# Don't report these to the lowlevel_error handler, otherwise
|
230
|
-
# will be flooding them with errors when persistent connections
|
231
|
-
# are closed.
|
232
|
-
rescue ConnectionError
|
233
|
-
c.write_error(500)
|
234
|
-
c.close
|
235
|
-
|
236
|
-
clear_monitor mon
|
237
|
-
|
238
|
-
# SSL handshake failure
|
239
|
-
rescue MiniSSL::SSLError => e
|
240
|
-
@server.lowlevel_error(e, c.env)
|
241
|
-
|
242
|
-
ssl_socket = c.io
|
243
|
-
begin
|
244
|
-
addr = ssl_socket.peeraddr.last
|
245
|
-
# EINVAL can happen when browser closes socket w/security exception
|
246
|
-
rescue IOError, Errno::EINVAL
|
247
|
-
addr = "<unknown>"
|
248
|
-
end
|
249
|
-
|
250
|
-
cert = ssl_socket.peercert
|
251
|
-
|
252
|
-
c.close
|
253
|
-
clear_monitor mon
|
254
|
-
|
255
|
-
@events.ssl_error e, addr, cert
|
256
|
-
|
257
|
-
# The client doesn't know HTTP well
|
258
|
-
rescue HttpParserError => e
|
259
|
-
@server.lowlevel_error(e, c.env)
|
260
|
-
|
261
|
-
c.write_error(400)
|
262
|
-
c.close
|
263
|
-
|
264
|
-
clear_monitor mon
|
265
|
-
|
266
|
-
@events.parse_error e, c
|
267
|
-
rescue StandardError => e
|
268
|
-
@server.lowlevel_error(e, c.env)
|
269
|
-
|
270
|
-
c.write_error(500)
|
271
|
-
c.close
|
272
|
-
|
273
|
-
clear_monitor mon
|
274
|
-
end
|
275
|
-
end
|
276
|
-
end
|
277
|
-
end
|
278
|
-
|
279
|
-
unless @timeouts.empty?
|
280
|
-
@mutex.synchronize do
|
281
|
-
now = Time.now
|
282
|
-
|
283
|
-
while @timeouts.first.value.timeout_at < now
|
284
|
-
mon = @timeouts.shift
|
285
|
-
c = mon.value
|
286
|
-
c.write_error(408) if c.in_data_phase
|
287
|
-
c.close
|
288
|
-
|
289
|
-
clear_monitor mon
|
290
|
-
|
291
|
-
break if @timeouts.empty?
|
292
|
-
end
|
293
|
-
|
294
|
-
calculate_sleep
|
295
|
-
end
|
29
|
+
# Run the internal select loop, using a background thread by default.
|
30
|
+
def run(background=true)
|
31
|
+
if background
|
32
|
+
@thread = Thread.new do
|
33
|
+
Puma.set_thread_name "reactor"
|
34
|
+
select_loop
|
296
35
|
end
|
36
|
+
else
|
37
|
+
select_loop
|
297
38
|
end
|
298
39
|
end
|
299
40
|
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
ensure
|
310
|
-
@trigger.close
|
311
|
-
@ready.close
|
41
|
+
# Add a new client to monitor.
|
42
|
+
# The object must respond to #timeout and #timeout_at.
|
43
|
+
# Returns false if the reactor is already shut down.
|
44
|
+
def add(client)
|
45
|
+
@input << client
|
46
|
+
@selector.wakeup
|
47
|
+
true
|
48
|
+
rescue ClosedQueueError
|
49
|
+
false
|
312
50
|
end
|
313
51
|
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
321
|
-
STDERR.puts e.backtrace
|
322
|
-
retry
|
323
|
-
ensure
|
324
|
-
@trigger.close
|
325
|
-
@ready.close
|
326
|
-
end
|
52
|
+
# Shutdown the reactor, blocking until the background thread is finished.
|
53
|
+
def shutdown
|
54
|
+
@input.close
|
55
|
+
begin
|
56
|
+
@selector.wakeup
|
57
|
+
rescue IOError # Ignore if selector is already closed
|
327
58
|
end
|
59
|
+
@thread.join if @thread
|
328
60
|
end
|
329
61
|
|
330
|
-
|
331
|
-
# sleep for in the main reactor loop when no sockets are being written to.
|
332
|
-
#
|
333
|
-
# The values kept in `@timeouts` are sorted so that the first timeout
|
334
|
-
# comes first in the array. When there are no timeouts the default timeout is used.
|
335
|
-
#
|
336
|
-
# Otherwise a sleep value is set that is the same as the amount of time it
|
337
|
-
# would take for the first element to time out.
|
338
|
-
#
|
339
|
-
# If that value is in the past, then a sleep value of zero is used.
|
340
|
-
def calculate_sleep
|
341
|
-
if @timeouts.empty?
|
342
|
-
@sleep_for = DefaultSleepFor
|
343
|
-
else
|
344
|
-
diff = @timeouts.first.value.timeout_at.to_f - Time.now.to_f
|
62
|
+
private
|
345
63
|
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
64
|
+
def select_loop
|
65
|
+
begin
|
66
|
+
until @input.closed? && @input.empty?
|
67
|
+
# Wakeup any registered object that receives incoming data.
|
68
|
+
# Block until the earliest timeout or Selector#wakeup is called.
|
69
|
+
timeout = (earliest = @timeouts.first) && earliest.timeout
|
70
|
+
@selector.select(timeout) {|mon| wakeup!(mon.value)}
|
71
|
+
|
72
|
+
# Wakeup all objects that timed out.
|
73
|
+
timed_out = @timeouts.take_while {|t| t.timeout == 0}
|
74
|
+
timed_out.each(&method(:wakeup!))
|
75
|
+
|
76
|
+
unless @input.empty?
|
77
|
+
until @input.empty?
|
78
|
+
client = @input.pop
|
79
|
+
register(client) if client.io_ok?
|
80
|
+
end
|
81
|
+
@timeouts.sort_by!(&:timeout_at)
|
82
|
+
end
|
350
83
|
end
|
84
|
+
rescue StandardError => e
|
85
|
+
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
86
|
+
STDERR.puts e.backtrace
|
87
|
+
retry
|
351
88
|
end
|
89
|
+
# Wakeup all remaining objects on shutdown.
|
90
|
+
@timeouts.each(&@block)
|
91
|
+
@selector.close
|
352
92
|
end
|
353
93
|
|
354
|
-
#
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
# The main body of the reactor loop is in `run_internal` and it
|
361
|
-
# will sleep on `NIO::Selector#select`. When a new connection is added to the
|
362
|
-
# reactor it cannot be added directly to the `sockets` array, because
|
363
|
-
# the `NIO::Selector#select` will not be watching for it yet.
|
364
|
-
#
|
365
|
-
# Instead what needs to happen is that `NIO::Selector#select` needs to be woken up,
|
366
|
-
# the contents of `@input` added to the `sockets` array, and then
|
367
|
-
# another call to `NIO::Selector#select` needs to happen. Since the `Puma::Client`
|
368
|
-
# object can be read immediately, it does not block, but instead returns
|
369
|
-
# right away.
|
370
|
-
#
|
371
|
-
# This behavior is accomplished by writing to `@trigger` which wakes up
|
372
|
-
# the `NIO::Selector#select` and then there is logic to detect the value of `*`,
|
373
|
-
# pull the contents from `@input` and add them to the sockets array.
|
374
|
-
#
|
375
|
-
# If the object passed in has a timeout value in `timeout_at` then
|
376
|
-
# it is added to a `@timeouts` array. This array is then re-arranged
|
377
|
-
# so that the first element to timeout will be at the front of the
|
378
|
-
# array. Then a value to sleep for is derived in the call to `calculate_sleep`
|
379
|
-
def add(c)
|
380
|
-
@mutex.synchronize do
|
381
|
-
@input << c
|
382
|
-
@trigger << "*"
|
383
|
-
end
|
384
|
-
end
|
385
|
-
|
386
|
-
# Close all watched sockets and clear them from being watched
|
387
|
-
def clear!
|
388
|
-
begin
|
389
|
-
@trigger << "c"
|
390
|
-
rescue IOError
|
391
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
392
|
-
end
|
94
|
+
# Start monitoring the object.
|
95
|
+
def register(client)
|
96
|
+
@selector.register(client.to_io, :r).value = client
|
97
|
+
@timeouts << client
|
98
|
+
rescue ArgumentError
|
99
|
+
# unreadable clients raise error when processed by NIO
|
393
100
|
end
|
394
101
|
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
102
|
+
# 'Wake up' a monitored object by calling the provided block.
|
103
|
+
# Stop monitoring the object if the block returns `true`.
|
104
|
+
def wakeup!(client)
|
105
|
+
if @block.call client
|
106
|
+
@selector.deregister client.to_io
|
107
|
+
@timeouts.delete client
|
400
108
|
end
|
401
|
-
|
402
|
-
@thread.join
|
403
109
|
end
|
404
110
|
end
|
405
111
|
end
|