puma 4.3.12 → 5.6.9
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/History.md +1526 -524
- data/LICENSE +23 -20
- data/README.md +120 -36
- data/bin/puma-wild +3 -9
- data/docs/architecture.md +63 -26
- data/docs/compile_options.md +21 -0
- data/docs/deployment.md +60 -69
- data/docs/fork_worker.md +33 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +9 -0
- data/{tools → docs}/jungle/rc.d/README.md +1 -1
- data/{tools → docs}/jungle/rc.d/puma +2 -2
- data/{tools → docs}/jungle/rc.d/puma.conf +0 -0
- data/docs/kubernetes.md +66 -0
- data/docs/nginx.md +1 -1
- data/docs/plugins.md +15 -15
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +46 -23
- data/docs/signals.md +13 -11
- data/docs/stats.md +142 -0
- data/docs/systemd.md +85 -128
- data/ext/puma_http11/PumaHttp11Service.java +2 -4
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +44 -10
- data/ext/puma_http11/http11_parser.c +45 -47
- data/ext/puma_http11/http11_parser.h +1 -1
- data/ext/puma_http11/http11_parser.java.rl +1 -1
- data/ext/puma_http11/http11_parser.rl +1 -1
- data/ext/puma_http11/http11_parser_common.rl +0 -0
- data/ext/puma_http11/mini_ssl.c +225 -89
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +5 -3
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +3 -5
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +109 -67
- data/ext/puma_http11/puma_http11.c +32 -51
- data/lib/puma/app/status.rb +50 -36
- data/lib/puma/binder.rb +225 -106
- data/lib/puma/cli.rb +24 -18
- data/lib/puma/client.rb +146 -84
- data/lib/puma/cluster/worker.rb +173 -0
- data/lib/puma/cluster/worker_handle.rb +94 -0
- data/lib/puma/cluster.rb +212 -220
- data/lib/puma/commonlogger.rb +2 -2
- data/lib/puma/configuration.rb +58 -49
- data/lib/puma/const.rb +22 -7
- data/lib/puma/control_cli.rb +99 -76
- data/lib/puma/detect.rb +29 -2
- data/lib/puma/dsl.rb +368 -96
- data/lib/puma/error_logger.rb +104 -0
- data/lib/puma/events.rb +55 -34
- data/lib/puma/io_buffer.rb +9 -2
- data/lib/puma/jruby_restart.rb +0 -58
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher.rb +128 -46
- data/lib/puma/minissl/context_builder.rb +14 -9
- data/lib/puma/minissl.rb +137 -50
- data/lib/puma/null_io.rb +18 -1
- data/lib/puma/plugin/tmp_restart.rb +0 -0
- data/lib/puma/plugin.rb +3 -12
- data/lib/puma/queue_close.rb +26 -0
- data/lib/puma/rack/builder.rb +1 -5
- data/lib/puma/rack/urlmap.rb +0 -0
- data/lib/puma/rack_default.rb +0 -0
- data/lib/puma/reactor.rb +85 -369
- data/lib/puma/request.rb +489 -0
- data/lib/puma/runner.rb +46 -61
- data/lib/puma/server.rb +292 -763
- data/lib/puma/single.rb +9 -65
- data/lib/puma/state_file.rb +48 -8
- data/lib/puma/systemd.rb +46 -0
- data/lib/puma/thread_pool.rb +125 -57
- data/lib/puma/util.rb +32 -4
- data/lib/puma.rb +48 -0
- data/lib/rack/handler/puma.rb +2 -3
- data/lib/rack/version_restriction.rb +15 -0
- data/tools/{docker/Dockerfile → Dockerfile} +1 -1
- data/tools/trickletest.rb +0 -0
- metadata +29 -24
- data/docs/tcp_mode.md +0 -96
- data/ext/puma_http11/io_buffer.c +0 -155
- data/ext/puma_http11/org/jruby/puma/IOBuffer.java +0 -72
- data/lib/puma/accept_nonblock.rb +0 -29
- data/lib/puma/tcp_logger.rb +0 -41
- data/tools/jungle/README.md +0 -19
- data/tools/jungle/init.d/README.md +0 -61
- data/tools/jungle/init.d/puma +0 -421
- data/tools/jungle/init.d/run-puma +0 -18
- data/tools/jungle/upstart/README.md +0 -61
- data/tools/jungle/upstart/puma-manager.conf +0 -31
- data/tools/jungle/upstart/puma.conf +0 -69
data/lib/puma/reactor.rb
CHANGED
@@ -1,400 +1,116 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require 'puma/
|
4
|
-
require 'puma/minissl'
|
5
|
-
|
6
|
-
require 'nio'
|
3
|
+
require 'puma/queue_close' unless ::Queue.instance_methods.include? :close
|
7
4
|
|
8
5
|
module Puma
|
9
|
-
|
10
|
-
|
11
|
-
#
|
12
|
-
#
|
13
|
-
# If read buffering is not done, and no other read buffering is performed (such as by an application server
|
14
|
-
# such as nginx) then the application would be subject to a slow client attack.
|
15
|
-
#
|
16
|
-
# Each Puma "worker" process has its own Reactor. For example if you start puma with `$ puma -w 5` then
|
17
|
-
# it will have 5 workers and each worker will have it's own reactor.
|
18
|
-
#
|
19
|
-
# For a graphical representation of how the reactor works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
|
20
|
-
#
|
21
|
-
# ## Reactor Flow
|
22
|
-
#
|
23
|
-
# A connection comes into a `Puma::Server` instance, it is then passed to a `Puma::Reactor` instance,
|
24
|
-
# which stores it in an array and waits for any of the connections to be ready for reading.
|
6
|
+
class UnsupportedBackend < StandardError; end
|
7
|
+
|
8
|
+
# Monitors a collection of IO objects, calling a block whenever
|
9
|
+
# any monitored object either receives data or times out, or when the Reactor shuts down.
|
25
10
|
#
|
26
|
-
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev,
|
27
|
-
# just plain IO#select). The call to `NIO::Selector#select` will
|
28
|
-
#
|
29
|
-
# then loops through each of these request objects, and sees if they're complete. If they
|
30
|
-
# have a full header and body then the reactor passes the request to a thread pool.
|
31
|
-
# Once in a thread pool, a "worker thread" can run the the application's Ruby code against the request.
|
11
|
+
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev,
|
12
|
+
# Java NIO or just plain IO#select). The call to `NIO::Selector#select` will
|
13
|
+
# 'wakeup' any IO object that receives data.
|
32
14
|
#
|
33
|
-
#
|
34
|
-
#
|
15
|
+
# This class additionally tracks a timeout for every added object,
|
16
|
+
# and wakes up any object when its timeout elapses.
|
35
17
|
#
|
36
|
-
#
|
37
|
-
# of this logic lives.
|
18
|
+
# The implementation uses a Queue to synchronize adding new objects from the internal select loop.
|
38
19
|
class Reactor
|
39
|
-
|
40
|
-
|
41
|
-
#
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
# it will be passed to the `app_pool`.
|
50
|
-
def initialize(server, app_pool)
|
51
|
-
@server = server
|
52
|
-
@events = server.events
|
53
|
-
@app_pool = app_pool
|
54
|
-
|
55
|
-
@selector = NIO::Selector.new
|
56
|
-
|
57
|
-
@mutex = Mutex.new
|
58
|
-
|
59
|
-
# Read / Write pipes to wake up internal while loop
|
60
|
-
@ready, @trigger = Puma::Util.pipe
|
61
|
-
@input = []
|
62
|
-
@sleep_for = DefaultSleepFor
|
20
|
+
# Create a new Reactor to monitor IO objects added by #add.
|
21
|
+
# The provided block will be invoked when an IO has data available to read,
|
22
|
+
# its timeout elapses, or when the Reactor shuts down.
|
23
|
+
def initialize(backend, &block)
|
24
|
+
require 'nio'
|
25
|
+
unless backend == :auto || NIO::Selector.backends.include?(backend)
|
26
|
+
raise "unsupported IO selector backend: #{backend} (available backends: #{NIO::Selector.backends.join(', ')})"
|
27
|
+
end
|
28
|
+
@selector = backend == :auto ? NIO::Selector.new : NIO::Selector.new(backend)
|
29
|
+
@input = Queue.new
|
63
30
|
@timeouts = []
|
64
|
-
|
65
|
-
mon = @selector.register(@ready, :r)
|
66
|
-
mon.value = @ready
|
67
|
-
|
68
|
-
@monitors = [mon]
|
31
|
+
@block = block
|
69
32
|
end
|
70
33
|
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
# will break on `NIO::Selector#select` and return an array.
|
78
|
-
#
|
79
|
-
# ## When a request is added:
|
80
|
-
#
|
81
|
-
# When the `add` method is called, an instance of `Puma::Client` is added to the `@input` array.
|
82
|
-
# Next the `@ready` pipe is "woken" by writing a string of `"*"` to `@trigger`.
|
83
|
-
#
|
84
|
-
# When that happens, the internal loop stops blocking at `NIO::Selector#select` and returns a reference
|
85
|
-
# to whatever "woke" it up. On the very first loop, the only thing in `sockets` is `@ready`.
|
86
|
-
# When `@trigger` is written-to, the loop "wakes" and the `ready`
|
87
|
-
# variable returns an array of arrays that looks like `[[#<IO:fd 10>], [], []]` where the
|
88
|
-
# first IO object is the `@ready` object. This first array `[#<IO:fd 10>]`
|
89
|
-
# is saved as a `reads` variable.
|
90
|
-
#
|
91
|
-
# The `reads` variable is iterated through. In the case that the object
|
92
|
-
# is the same as the `@ready` input pipe, then we know that there was a `trigger` event.
|
93
|
-
#
|
94
|
-
# If there was a trigger event, then one byte of `@ready` is read into memory. In the case of the first request,
|
95
|
-
# the reactor sees that it's a `"*"` value and the reactor adds the contents of `@input` into the `sockets` array.
|
96
|
-
# The while then loop continues to iterate again, but now the `sockets` array contains a `Puma::Client` instance in addition
|
97
|
-
# to the `@ready` IO object. For example: `[#<IO:fd 10>, #<Puma::Client:0x3fdc1103bee8 @ready=false>]`.
|
98
|
-
#
|
99
|
-
# Since the `Puma::Client` in this example has data that has not been read yet,
|
100
|
-
# the `NIO::Selector#select` is immediately able to "wake" and read from the `Puma::Client`. At this point the
|
101
|
-
# `ready` output looks like this: `[[#<Puma::Client:0x3fdc1103bee8 @ready=false>], [], []]`.
|
102
|
-
#
|
103
|
-
# Each element in the first entry is iterated over. The `Puma::Client` object is not
|
104
|
-
# the `@ready` pipe, so the reactor checks to see if it has the full header and body with
|
105
|
-
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
106
|
-
# then the request is passed off to the `@app_pool` thread pool so that a "worker thread"
|
107
|
-
# can pick up the request and begin to execute application logic. This is done
|
108
|
-
# via `@app_pool << c`. The `Puma::Client` is then removed from the `sockets` array.
|
109
|
-
#
|
110
|
-
# If the request body is not present then nothing will happen, and the loop will iterate
|
111
|
-
# again. When the client sends more data to the socket the `Puma::Client` object will
|
112
|
-
# wake up the `NIO::Selector#select` and it can again be checked to see if it's ready to be
|
113
|
-
# passed to the thread pool.
|
114
|
-
#
|
115
|
-
# ## Time Out Case
|
116
|
-
#
|
117
|
-
# In addition to being woken via a write to one of the sockets the `NIO::Selector#select` will
|
118
|
-
# periodically "time out" of the sleep. One of the functions of this is to check for
|
119
|
-
# any requests that have "timed out". At the end of the loop it's checked to see if
|
120
|
-
# the first element in the `@timeout` array has exceed its allowed time. If so,
|
121
|
-
# the client object is removed from the timeout array, a 408 response is written.
|
122
|
-
# Then its connection is closed, and the object is removed from the `sockets` array
|
123
|
-
# that watches for new data.
|
124
|
-
#
|
125
|
-
# This behavior loops until all the objects that have timed out have been removed.
|
126
|
-
#
|
127
|
-
# Once all the timeouts have been processed, the next duration of the `NIO::Selector#select` sleep
|
128
|
-
# will be set to be equal to the amount of time it will take for the next timeout to occur.
|
129
|
-
# This calculation happens in `calculate_sleep`.
|
130
|
-
def run_internal
|
131
|
-
monitors = @monitors
|
132
|
-
selector = @selector
|
133
|
-
|
134
|
-
while true
|
135
|
-
begin
|
136
|
-
ready = selector.select @sleep_for
|
137
|
-
rescue IOError => e
|
138
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
139
|
-
if monitors.any? { |mon| mon.value.closed? }
|
140
|
-
STDERR.puts "Error in select: #{e.message} (#{e.class})"
|
141
|
-
STDERR.puts e.backtrace
|
142
|
-
|
143
|
-
monitors.reject! do |mon|
|
144
|
-
if mon.value.closed?
|
145
|
-
selector.deregister mon.value
|
146
|
-
true
|
147
|
-
end
|
148
|
-
end
|
149
|
-
|
150
|
-
retry
|
151
|
-
else
|
152
|
-
raise
|
153
|
-
end
|
154
|
-
end
|
155
|
-
|
156
|
-
if ready
|
157
|
-
ready.each do |mon|
|
158
|
-
if mon.value == @ready
|
159
|
-
@mutex.synchronize do
|
160
|
-
case @ready.read(1)
|
161
|
-
when "*"
|
162
|
-
@input.each do |c|
|
163
|
-
mon = nil
|
164
|
-
begin
|
165
|
-
begin
|
166
|
-
mon = selector.register(c, :r)
|
167
|
-
rescue ArgumentError
|
168
|
-
# There is a bug where we seem to be registering an already registered
|
169
|
-
# client. This code deals with this situation but I wish we didn't have to.
|
170
|
-
monitors.delete_if { |submon| submon.value.to_io == c.to_io }
|
171
|
-
selector.deregister(c)
|
172
|
-
mon = selector.register(c, :r)
|
173
|
-
end
|
174
|
-
rescue IOError
|
175
|
-
# Means that the io is closed, so we should ignore this request
|
176
|
-
# entirely
|
177
|
-
else
|
178
|
-
mon.value = c
|
179
|
-
@timeouts << mon if c.timeout_at
|
180
|
-
monitors << mon
|
181
|
-
end
|
182
|
-
end
|
183
|
-
@input.clear
|
184
|
-
|
185
|
-
@timeouts.sort! { |a,b| a.value.timeout_at <=> b.value.timeout_at }
|
186
|
-
calculate_sleep
|
187
|
-
when "c"
|
188
|
-
monitors.reject! do |submon|
|
189
|
-
if submon.value == @ready
|
190
|
-
false
|
191
|
-
else
|
192
|
-
submon.value.close
|
193
|
-
begin
|
194
|
-
selector.deregister submon.value
|
195
|
-
rescue IOError
|
196
|
-
# nio4r on jruby seems to throw an IOError here if the IO is closed, so
|
197
|
-
# we need to swallow it.
|
198
|
-
end
|
199
|
-
true
|
200
|
-
end
|
201
|
-
end
|
202
|
-
when "!"
|
203
|
-
return
|
204
|
-
end
|
205
|
-
end
|
206
|
-
else
|
207
|
-
c = mon.value
|
208
|
-
|
209
|
-
# We have to be sure to remove it from the timeout
|
210
|
-
# list or we'll accidentally close the socket when
|
211
|
-
# it's in use!
|
212
|
-
if c.timeout_at
|
213
|
-
@mutex.synchronize do
|
214
|
-
@timeouts.delete mon
|
215
|
-
end
|
216
|
-
end
|
217
|
-
|
218
|
-
begin
|
219
|
-
if c.try_to_finish
|
220
|
-
@app_pool << c
|
221
|
-
clear_monitor mon
|
222
|
-
end
|
223
|
-
|
224
|
-
# Don't report these to the lowlevel_error handler, otherwise
|
225
|
-
# will be flooding them with errors when persistent connections
|
226
|
-
# are closed.
|
227
|
-
rescue ConnectionError
|
228
|
-
c.write_error(500)
|
229
|
-
c.close
|
230
|
-
|
231
|
-
clear_monitor mon
|
232
|
-
|
233
|
-
# SSL handshake failure
|
234
|
-
rescue MiniSSL::SSLError => e
|
235
|
-
@server.lowlevel_error(e, c.env)
|
236
|
-
|
237
|
-
ssl_socket = c.io
|
238
|
-
begin
|
239
|
-
addr = ssl_socket.peeraddr.last
|
240
|
-
# EINVAL can happen when browser closes socket w/security exception
|
241
|
-
rescue IOError, Errno::EINVAL
|
242
|
-
addr = "<unknown>"
|
243
|
-
end
|
244
|
-
|
245
|
-
cert = ssl_socket.peercert
|
246
|
-
|
247
|
-
c.close
|
248
|
-
clear_monitor mon
|
249
|
-
|
250
|
-
@events.ssl_error @server, addr, cert, e
|
251
|
-
|
252
|
-
# The client doesn't know HTTP well
|
253
|
-
rescue HttpParserError => e
|
254
|
-
@server.lowlevel_error(e, c.env)
|
255
|
-
|
256
|
-
c.write_error(400)
|
257
|
-
c.close
|
258
|
-
|
259
|
-
clear_monitor mon
|
260
|
-
|
261
|
-
@events.parse_error @server, c.env, e
|
262
|
-
rescue StandardError => e
|
263
|
-
@server.lowlevel_error(e, c.env)
|
264
|
-
|
265
|
-
c.write_error(500)
|
266
|
-
c.close
|
267
|
-
|
268
|
-
clear_monitor mon
|
269
|
-
end
|
270
|
-
end
|
271
|
-
end
|
272
|
-
end
|
273
|
-
|
274
|
-
unless @timeouts.empty?
|
275
|
-
@mutex.synchronize do
|
276
|
-
now = Time.now
|
277
|
-
|
278
|
-
while @timeouts.first.value.timeout_at < now
|
279
|
-
mon = @timeouts.shift
|
280
|
-
c = mon.value
|
281
|
-
c.write_error(408) if c.in_data_phase
|
282
|
-
c.close
|
283
|
-
|
284
|
-
clear_monitor mon
|
285
|
-
|
286
|
-
break if @timeouts.empty?
|
287
|
-
end
|
288
|
-
|
289
|
-
calculate_sleep
|
290
|
-
end
|
34
|
+
# Run the internal select loop, using a background thread by default.
|
35
|
+
def run(background=true)
|
36
|
+
if background
|
37
|
+
@thread = Thread.new do
|
38
|
+
Puma.set_thread_name "reactor"
|
39
|
+
select_loop
|
291
40
|
end
|
41
|
+
else
|
42
|
+
select_loop
|
292
43
|
end
|
293
44
|
end
|
294
45
|
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
ensure
|
305
|
-
@trigger.close
|
306
|
-
@ready.close
|
46
|
+
# Add a new client to monitor.
|
47
|
+
# The object must respond to #timeout and #timeout_at.
|
48
|
+
# Returns false if the reactor is already shut down.
|
49
|
+
def add(client)
|
50
|
+
@input << client
|
51
|
+
@selector.wakeup
|
52
|
+
true
|
53
|
+
rescue ClosedQueueError
|
54
|
+
false
|
307
55
|
end
|
308
56
|
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
316
|
-
STDERR.puts e.backtrace
|
317
|
-
retry
|
318
|
-
ensure
|
319
|
-
@trigger.close
|
320
|
-
@ready.close
|
321
|
-
end
|
57
|
+
# Shutdown the reactor, blocking until the background thread is finished.
|
58
|
+
def shutdown
|
59
|
+
@input.close
|
60
|
+
begin
|
61
|
+
@selector.wakeup
|
62
|
+
rescue IOError # Ignore if selector is already closed
|
322
63
|
end
|
64
|
+
@thread.join if @thread
|
323
65
|
end
|
324
66
|
|
325
|
-
|
326
|
-
# sleep for in the main reactor loop when no sockets are being written to.
|
327
|
-
#
|
328
|
-
# The values kept in `@timeouts` are sorted so that the first timeout
|
329
|
-
# comes first in the array. When there are no timeouts the default timeout is used.
|
330
|
-
#
|
331
|
-
# Otherwise a sleep value is set that is the same as the amount of time it
|
332
|
-
# would take for the first element to time out.
|
333
|
-
#
|
334
|
-
# If that value is in the past, then a sleep value of zero is used.
|
335
|
-
def calculate_sleep
|
336
|
-
if @timeouts.empty?
|
337
|
-
@sleep_for = DefaultSleepFor
|
338
|
-
else
|
339
|
-
diff = @timeouts.first.value.timeout_at.to_f - Time.now.to_f
|
67
|
+
private
|
340
68
|
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
69
|
+
def select_loop
|
70
|
+
begin
|
71
|
+
until @input.closed? && @input.empty?
|
72
|
+
# Wakeup any registered object that receives incoming data.
|
73
|
+
# Block until the earliest timeout or Selector#wakeup is called.
|
74
|
+
timeout = (earliest = @timeouts.first) && earliest.timeout
|
75
|
+
@selector.select(timeout) {|mon| wakeup!(mon.value)}
|
76
|
+
|
77
|
+
# Wakeup all objects that timed out.
|
78
|
+
timed_out = @timeouts.take_while {|t| t.timeout == 0}
|
79
|
+
timed_out.each(&method(:wakeup!))
|
80
|
+
|
81
|
+
unless @input.empty?
|
82
|
+
until @input.empty?
|
83
|
+
client = @input.pop
|
84
|
+
register(client) if client.io_ok?
|
85
|
+
end
|
86
|
+
@timeouts.sort_by!(&:timeout_at)
|
87
|
+
end
|
345
88
|
end
|
89
|
+
rescue StandardError => e
|
90
|
+
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
91
|
+
STDERR.puts e.backtrace
|
92
|
+
retry
|
346
93
|
end
|
94
|
+
# Wakeup all remaining objects on shutdown.
|
95
|
+
@timeouts.each(&@block)
|
96
|
+
@selector.close
|
347
97
|
end
|
348
98
|
|
349
|
-
#
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
# The main body of the reactor loop is in `run_internal` and it
|
356
|
-
# will sleep on `NIO::Selector#select`. When a new connection is added to the
|
357
|
-
# reactor it cannot be added directly to the `sockets` array, because
|
358
|
-
# the `NIO::Selector#select` will not be watching for it yet.
|
359
|
-
#
|
360
|
-
# Instead what needs to happen is that `NIO::Selector#select` needs to be woken up,
|
361
|
-
# the contents of `@input` added to the `sockets` array, and then
|
362
|
-
# another call to `NIO::Selector#select` needs to happen. Since the `Puma::Client`
|
363
|
-
# object can be read immediately, it does not block, but instead returns
|
364
|
-
# right away.
|
365
|
-
#
|
366
|
-
# This behavior is accomplished by writing to `@trigger` which wakes up
|
367
|
-
# the `NIO::Selector#select` and then there is logic to detect the value of `*`,
|
368
|
-
# pull the contents from `@input` and add them to the sockets array.
|
369
|
-
#
|
370
|
-
# If the object passed in has a timeout value in `timeout_at` then
|
371
|
-
# it is added to a `@timeouts` array. This array is then re-arranged
|
372
|
-
# so that the first element to timeout will be at the front of the
|
373
|
-
# array. Then a value to sleep for is derived in the call to `calculate_sleep`
|
374
|
-
def add(c)
|
375
|
-
@mutex.synchronize do
|
376
|
-
@input << c
|
377
|
-
@trigger << "*"
|
378
|
-
end
|
379
|
-
end
|
380
|
-
|
381
|
-
# Close all watched sockets and clear them from being watched
|
382
|
-
def clear!
|
383
|
-
begin
|
384
|
-
@trigger << "c"
|
385
|
-
rescue IOError
|
386
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
387
|
-
end
|
99
|
+
# Start monitoring the object.
|
100
|
+
def register(client)
|
101
|
+
@selector.register(client.to_io, :r).value = client
|
102
|
+
@timeouts << client
|
103
|
+
rescue ArgumentError
|
104
|
+
# unreadable clients raise error when processed by NIO
|
388
105
|
end
|
389
106
|
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
107
|
+
# 'Wake up' a monitored object by calling the provided block.
|
108
|
+
# Stop monitoring the object if the block returns `true`.
|
109
|
+
def wakeup!(client)
|
110
|
+
if @block.call client
|
111
|
+
@selector.deregister client.to_io
|
112
|
+
@timeouts.delete client
|
395
113
|
end
|
396
|
-
|
397
|
-
@thread.join
|
398
114
|
end
|
399
115
|
end
|
400
116
|
end
|