puma 3.12.6-java → 4.0.0-java
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +24 -20
- data/README.md +29 -9
- data/docs/architecture.md +1 -0
- data/docs/deployment.md +24 -4
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/restart.md +4 -2
- data/docs/systemd.md +27 -9
- data/ext/puma_http11/PumaHttp11Service.java +2 -0
- data/ext/puma_http11/http11_parser.c +1 -3
- data/ext/puma_http11/http11_parser.rl +1 -3
- data/ext/puma_http11/mini_ssl.c +20 -4
- data/ext/puma_http11/org/jruby/puma/IOBuffer.java +72 -0
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +11 -4
- data/lib/puma/app/status.rb +3 -2
- data/lib/puma/binder.rb +9 -1
- data/lib/puma/client.rb +45 -35
- data/lib/puma/cluster.rb +38 -14
- data/lib/puma/configuration.rb +2 -1
- data/lib/puma/const.rb +6 -10
- data/lib/puma/control_cli.rb +10 -0
- data/lib/puma/dsl.rb +45 -3
- data/lib/puma/io_buffer.rb +1 -6
- data/lib/puma/launcher.rb +10 -11
- data/lib/puma/minissl.rb +13 -1
- data/lib/puma/puma_http11.jar +0 -0
- data/lib/puma/reactor.rb +104 -53
- data/lib/puma/runner.rb +1 -1
- data/lib/puma/server.rb +26 -78
- data/lib/puma/single.rb +2 -2
- data/lib/puma/thread_pool.rb +5 -1
- data/lib/puma/util.rb +1 -6
- data/tools/jungle/init.d/puma +5 -5
- metadata +20 -7
- data/lib/puma/compat.rb +0 -14
- data/lib/puma/java_io_buffer.rb +0 -47
- data/lib/puma/rack/backports/uri/common_193.rb +0 -33
data/lib/puma/control_cli.rb
CHANGED
@@ -206,6 +206,16 @@ module Puma
|
|
206
206
|
when "phased-restart"
|
207
207
|
Process.kill "SIGUSR1", @pid
|
208
208
|
|
209
|
+
when "status"
|
210
|
+
begin
|
211
|
+
Process.kill 0, @pid
|
212
|
+
puts "Puma is started"
|
213
|
+
rescue Errno::ESRCH
|
214
|
+
raise "Puma is not running"
|
215
|
+
end
|
216
|
+
|
217
|
+
return
|
218
|
+
|
209
219
|
else
|
210
220
|
return
|
211
221
|
end
|
data/lib/puma/dsl.rb
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require 'puma/const'
|
4
|
+
|
3
5
|
module Puma
|
4
6
|
# The methods that are available for use inside the config file.
|
5
7
|
# These same methods are used in Puma cli and the rack handler
|
@@ -103,7 +105,12 @@ module Puma
|
|
103
105
|
end
|
104
106
|
|
105
107
|
if opts[:no_token]
|
106
|
-
|
108
|
+
# We need to use 'none' rather than :none because this value will be
|
109
|
+
# passed on to an instance of OptionParser, which doesn't support
|
110
|
+
# symbols as option values.
|
111
|
+
#
|
112
|
+
# See: https://github.com/puma/puma/issues/1193#issuecomment-305995488
|
113
|
+
auth_token = 'none'
|
107
114
|
else
|
108
115
|
auth_token = opts[:auth_token]
|
109
116
|
auth_token ||= Configuration.random_token
|
@@ -295,12 +302,15 @@ module Puma
|
|
295
302
|
|
296
303
|
def ssl_bind(host, port, opts)
|
297
304
|
verify = opts.fetch(:verify_mode, 'none')
|
305
|
+
no_tlsv1 = opts.fetch(:no_tlsv1, 'false')
|
306
|
+
ca_additions = "&ca=#{opts[:ca]}" if ['peer', 'force_peer'].include?(verify)
|
298
307
|
|
299
308
|
if defined?(JRUBY_VERSION)
|
300
309
|
keystore_additions = "keystore=#{opts[:keystore]}&keystore-pass=#{opts[:keystore_pass]}"
|
301
|
-
bind "ssl://#{host}:#{port}?cert=#{opts[:cert]}&key=#{opts[:key]}&#{keystore_additions}&verify_mode=#{verify}"
|
310
|
+
bind "ssl://#{host}:#{port}?cert=#{opts[:cert]}&key=#{opts[:key]}&#{keystore_additions}&verify_mode=#{verify}&no_tlsv1=#{no_tlsv1}#{ca_additions}"
|
302
311
|
else
|
303
|
-
|
312
|
+
ssl_cipher_filter = "&ssl_cipher_filter=#{opts[:ssl_cipher_filter]}" if opts[:ssl_cipher_filter]
|
313
|
+
bind "ssl://#{host}:#{port}?cert=#{opts[:cert]}&key=#{opts[:key]}#{ssl_cipher_filter}&verify_mode=#{verify}&no_tlsv1=#{no_tlsv1}#{ca_additions}"
|
304
314
|
end
|
305
315
|
end
|
306
316
|
|
@@ -375,6 +385,21 @@ module Puma
|
|
375
385
|
|
376
386
|
alias_method :after_worker_boot, :after_worker_fork
|
377
387
|
|
388
|
+
# Code to run out-of-band when the worker is idle.
|
389
|
+
# These hooks run immediately after a request has finished
|
390
|
+
# processing and there are no busy threads on the worker.
|
391
|
+
# The worker doesn't accept new requests until this code finishes.
|
392
|
+
#
|
393
|
+
# This hook is useful for running out-of-band garbage collection
|
394
|
+
# or scheduling asynchronous tasks to execute after a response.
|
395
|
+
#
|
396
|
+
# This can be called multiple times to add hooks.
|
397
|
+
#
|
398
|
+
def out_of_band(&block)
|
399
|
+
@options[:out_of_band] ||= []
|
400
|
+
@options[:out_of_band] << block
|
401
|
+
end
|
402
|
+
|
378
403
|
# The directory to operate out of.
|
379
404
|
def directory(dir)
|
380
405
|
@options[:directory] = dir.to_s
|
@@ -424,6 +449,16 @@ module Puma
|
|
424
449
|
@options[:prune_bundler] = answer
|
425
450
|
end
|
426
451
|
|
452
|
+
# In environments where SIGTERM is something expected, instructing
|
453
|
+
# puma to shutdown gracefully ( for example in Kubernetes, where
|
454
|
+
# rolling restart is guaranteed usually on infrastructure level )
|
455
|
+
# SignalException should not be raised for SIGTERM
|
456
|
+
#
|
457
|
+
# When set to false, if puma process receives SIGTERM, it won't raise SignalException
|
458
|
+
def raise_exception_on_sigterm(answer=true)
|
459
|
+
@options[:raise_exception_on_sigterm] = answer
|
460
|
+
end
|
461
|
+
|
427
462
|
# Additional text to display in process listing
|
428
463
|
def tag(string)
|
429
464
|
@options[:tag] = string.to_s
|
@@ -434,6 +469,13 @@ module Puma
|
|
434
469
|
# that have not checked in within the given +timeout+.
|
435
470
|
# This mitigates hung processes. Default value is 60 seconds.
|
436
471
|
def worker_timeout(timeout)
|
472
|
+
timeout = Integer(timeout)
|
473
|
+
min = Const::WORKER_CHECK_INTERVAL
|
474
|
+
|
475
|
+
if timeout <= min
|
476
|
+
raise "The minimum worker_timeout must be greater than the worker reporting interval (#{min})"
|
477
|
+
end
|
478
|
+
|
437
479
|
@options[:worker_timeout] = Integer(timeout)
|
438
480
|
end
|
439
481
|
|
data/lib/puma/io_buffer.rb
CHANGED
data/lib/puma/launcher.rb
CHANGED
@@ -214,6 +214,15 @@ module Puma
|
|
214
214
|
end
|
215
215
|
end
|
216
216
|
|
217
|
+
def close_binder_listeners
|
218
|
+
@binder.listeners.each do |l, io|
|
219
|
+
io.close
|
220
|
+
uri = URI.parse(l)
|
221
|
+
next unless uri.scheme == 'unix'
|
222
|
+
File.unlink("#{uri.host}#{uri.path}")
|
223
|
+
end
|
224
|
+
end
|
225
|
+
|
217
226
|
private
|
218
227
|
|
219
228
|
def reload_worker_directory
|
@@ -319,16 +328,6 @@ module Puma
|
|
319
328
|
@options[:prune_bundler] && clustered? && !@options[:preload_app]
|
320
329
|
end
|
321
330
|
|
322
|
-
def close_binder_listeners
|
323
|
-
@binder.listeners.each do |l, io|
|
324
|
-
io.close
|
325
|
-
uri = URI.parse(l)
|
326
|
-
next unless uri.scheme == 'unix'
|
327
|
-
File.unlink("#{uri.host}#{uri.path}")
|
328
|
-
end
|
329
|
-
end
|
330
|
-
|
331
|
-
|
332
331
|
def generate_restart_data
|
333
332
|
if dir = @options[:directory]
|
334
333
|
@restart_dir = dir
|
@@ -397,7 +396,7 @@ module Puma
|
|
397
396
|
Signal.trap "SIGTERM" do
|
398
397
|
graceful_stop
|
399
398
|
|
400
|
-
raise
|
399
|
+
raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm]
|
401
400
|
end
|
402
401
|
rescue Exception
|
403
402
|
log "*** SIGTERM not implemented, signal based gracefully stopping unavailable!"
|
data/lib/puma/minissl.rb
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
begin
|
4
4
|
require 'io/wait'
|
5
|
-
|
5
|
+
rescue LoadError
|
6
6
|
end
|
7
7
|
|
8
8
|
module Puma
|
@@ -177,6 +177,11 @@ module Puma
|
|
177
177
|
|
178
178
|
class Context
|
179
179
|
attr_accessor :verify_mode
|
180
|
+
attr_reader :no_tlsv1
|
181
|
+
|
182
|
+
def initialize
|
183
|
+
@no_tlsv1 = false
|
184
|
+
end
|
180
185
|
|
181
186
|
if defined?(JRUBY_VERSION)
|
182
187
|
# jruby-specific Context properties: java uses a keystore and password pair rather than a cert/key pair
|
@@ -215,11 +220,18 @@ module Puma
|
|
215
220
|
@ca = ca
|
216
221
|
end
|
217
222
|
|
223
|
+
|
218
224
|
def check
|
219
225
|
raise "Key not configured" unless @key
|
220
226
|
raise "Cert not configured" unless @cert
|
221
227
|
end
|
222
228
|
end
|
229
|
+
|
230
|
+
def no_tlsv1=(tlsv1)
|
231
|
+
raise ArgumentError, "Invalid value of no_tlsv1" unless ['true', 'false', true, false].include?(tlsv1)
|
232
|
+
@no_tlsv1 = tlsv1
|
233
|
+
end
|
234
|
+
|
223
235
|
end
|
224
236
|
|
225
237
|
VERIFY_NONE = 0
|
data/lib/puma/puma_http11.jar
CHANGED
Binary file
|
data/lib/puma/reactor.rb
CHANGED
@@ -3,6 +3,8 @@
|
|
3
3
|
require 'puma/util'
|
4
4
|
require 'puma/minissl'
|
5
5
|
|
6
|
+
require 'nio'
|
7
|
+
|
6
8
|
module Puma
|
7
9
|
# Internal Docs, Not a public interface.
|
8
10
|
#
|
@@ -18,12 +20,13 @@ module Puma
|
|
18
20
|
#
|
19
21
|
# ## Reactor Flow
|
20
22
|
#
|
21
|
-
# A
|
22
|
-
#
|
23
|
+
# A connection comes into a `Puma::Server` instance, it is then passed to a `Puma::Reactor` instance,
|
24
|
+
# which stores it in an array and waits for any of the connections to be ready for reading.
|
23
25
|
#
|
24
|
-
#
|
26
|
+
# The waiting/wake up is performed with nio4r, which will use the apropriate backend (libev, Java NIO or
|
27
|
+
# just plain IO#select). The call to `NIO::Selector#select` will "wake up" and
|
25
28
|
# return the references to any objects that caused it to "wake". The reactor
|
26
|
-
# then loops through each of these request objects, and sees if they're
|
29
|
+
# then loops through each of these request objects, and sees if they're complete. If they
|
27
30
|
# have a full header and body then the reactor passes the request to a thread pool.
|
28
31
|
# Once in a thread pool, a "worker thread" can run the the application's Ruby code against the request.
|
29
32
|
#
|
@@ -38,7 +41,7 @@ module Puma
|
|
38
41
|
# Creates an instance of Puma::Reactor
|
39
42
|
#
|
40
43
|
# The `server` argument is an instance of `Puma::Server`
|
41
|
-
#
|
44
|
+
# that is used to write a response for "low level errors"
|
42
45
|
# when there is an exception inside of the reactor.
|
43
46
|
#
|
44
47
|
# The `app_pool` is an instance of `Puma::ThreadPool`.
|
@@ -49,6 +52,8 @@ module Puma
|
|
49
52
|
@events = server.events
|
50
53
|
@app_pool = app_pool
|
51
54
|
|
55
|
+
@selector = NIO::Selector.new
|
56
|
+
|
52
57
|
@mutex = Mutex.new
|
53
58
|
|
54
59
|
# Read / Write pipes to wake up internal while loop
|
@@ -57,24 +62,26 @@ module Puma
|
|
57
62
|
@sleep_for = DefaultSleepFor
|
58
63
|
@timeouts = []
|
59
64
|
|
60
|
-
|
65
|
+
mon = @selector.register(@ready, :r)
|
66
|
+
mon.value = @ready
|
67
|
+
|
68
|
+
@monitors = [mon]
|
61
69
|
end
|
62
70
|
|
63
71
|
private
|
64
72
|
|
65
|
-
|
66
73
|
# Until a request is added via the `add` method this method will internally
|
67
74
|
# loop, waiting on the `sockets` array objects. The only object in this
|
68
75
|
# array at first is the `@ready` IO object, which is the read end of a pipe
|
69
76
|
# connected to `@trigger` object. When `@trigger` is written to, then the loop
|
70
|
-
# will break on `
|
77
|
+
# will break on `NIO::Selector#select` and return an array.
|
71
78
|
#
|
72
79
|
# ## When a request is added:
|
73
80
|
#
|
74
81
|
# When the `add` method is called, an instance of `Puma::Client` is added to the `@input` array.
|
75
82
|
# Next the `@ready` pipe is "woken" by writing a string of `"*"` to `@trigger`.
|
76
83
|
#
|
77
|
-
# When that happens, the internal loop stops blocking at `
|
84
|
+
# When that happens, the internal loop stops blocking at `NIO::Selector#select` and returns a reference
|
78
85
|
# to whatever "woke" it up. On the very first loop, the only thing in `sockets` is `@ready`.
|
79
86
|
# When `@trigger` is written-to, the loop "wakes" and the `ready`
|
80
87
|
# variable returns an array of arrays that looks like `[[#<IO:fd 10>], [], []]` where the
|
@@ -90,11 +97,11 @@ module Puma
|
|
90
97
|
# to the `@ready` IO object. For example: `[#<IO:fd 10>, #<Puma::Client:0x3fdc1103bee8 @ready=false>]`.
|
91
98
|
#
|
92
99
|
# Since the `Puma::Client` in this example has data that has not been read yet,
|
93
|
-
# the `
|
100
|
+
# the `NIO::Selector#select` is immediately able to "wake" and read from the `Puma::Client`. At this point the
|
94
101
|
# `ready` output looks like this: `[[#<Puma::Client:0x3fdc1103bee8 @ready=false>], [], []]`.
|
95
102
|
#
|
96
103
|
# Each element in the first entry is iterated over. The `Puma::Client` object is not
|
97
|
-
# the `@ready` pipe, so the reactor checks to see if it has the
|
104
|
+
# the `@ready` pipe, so the reactor checks to see if it has the full header and body with
|
98
105
|
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
99
106
|
# then the request is passed off to the `@app_pool` thread pool so that a "worker thread"
|
100
107
|
# can pick up the request and begin to execute application logic. This is done
|
@@ -102,56 +109,93 @@ module Puma
|
|
102
109
|
#
|
103
110
|
# If the request body is not present then nothing will happen, and the loop will iterate
|
104
111
|
# again. When the client sends more data to the socket the `Puma::Client` object will
|
105
|
-
# wake up the `
|
112
|
+
# wake up the `NIO::Selector#select` and it can again be checked to see if it's ready to be
|
106
113
|
# passed to the thread pool.
|
107
114
|
#
|
108
115
|
# ## Time Out Case
|
109
116
|
#
|
110
|
-
# In addition to being woken via a write to one of the sockets the `
|
117
|
+
# In addition to being woken via a write to one of the sockets the `NIO::Selector#select` will
|
111
118
|
# periodically "time out" of the sleep. One of the functions of this is to check for
|
112
119
|
# any requests that have "timed out". At the end of the loop it's checked to see if
|
113
|
-
# the first element in the `@timeout` array has exceed
|
114
|
-
# the client object is removed from the timeout
|
115
|
-
# Then
|
120
|
+
# the first element in the `@timeout` array has exceed its allowed time. If so,
|
121
|
+
# the client object is removed from the timeout array, a 408 response is written.
|
122
|
+
# Then its connection is closed, and the object is removed from the `sockets` array
|
116
123
|
# that watches for new data.
|
117
124
|
#
|
118
125
|
# This behavior loops until all the objects that have timed out have been removed.
|
119
126
|
#
|
120
|
-
# Once all the timeouts have been processed, the next duration of the `
|
127
|
+
# Once all the timeouts have been processed, the next duration of the `NIO::Selector#select` sleep
|
121
128
|
# will be set to be equal to the amount of time it will take for the next timeout to occur.
|
122
129
|
# This calculation happens in `calculate_sleep`.
|
123
130
|
def run_internal
|
124
|
-
|
131
|
+
monitors = @monitors
|
132
|
+
selector = @selector
|
125
133
|
|
126
134
|
while true
|
127
135
|
begin
|
128
|
-
ready =
|
136
|
+
ready = selector.select @sleep_for
|
129
137
|
rescue IOError => e
|
130
138
|
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
131
|
-
if
|
139
|
+
if monitors.any? { |mon| mon.value.closed? }
|
132
140
|
STDERR.puts "Error in select: #{e.message} (#{e.class})"
|
133
141
|
STDERR.puts e.backtrace
|
134
|
-
|
142
|
+
|
143
|
+
monitors.reject! do |mon|
|
144
|
+
if mon.value.closed?
|
145
|
+
selector.deregister mon.value
|
146
|
+
true
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
135
150
|
retry
|
136
151
|
else
|
137
152
|
raise
|
138
153
|
end
|
139
154
|
end
|
140
155
|
|
141
|
-
if ready
|
142
|
-
|
143
|
-
if
|
156
|
+
if ready
|
157
|
+
ready.each do |mon|
|
158
|
+
if mon.value == @ready
|
144
159
|
@mutex.synchronize do
|
145
160
|
case @ready.read(1)
|
146
161
|
when "*"
|
147
|
-
|
162
|
+
@input.each do |c|
|
163
|
+
mon = nil
|
164
|
+
begin
|
165
|
+
begin
|
166
|
+
mon = selector.register(c, :r)
|
167
|
+
rescue ArgumentError
|
168
|
+
# There is a bug where we seem to be registering an already registered
|
169
|
+
# client. This code deals with this situation but I wish we didn't have to.
|
170
|
+
monitors.delete_if { |submon| submon.value.to_io == c.to_io }
|
171
|
+
selector.deregister(c)
|
172
|
+
mon = selector.register(c, :r)
|
173
|
+
end
|
174
|
+
rescue IOError
|
175
|
+
# Means that the io is closed, so we should ignore this request
|
176
|
+
# entirely
|
177
|
+
else
|
178
|
+
mon.value = c
|
179
|
+
@timeouts << mon if c.timeout_at
|
180
|
+
monitors << mon
|
181
|
+
end
|
182
|
+
end
|
148
183
|
@input.clear
|
184
|
+
|
185
|
+
@timeouts.sort! { |a,b| a.value.timeout_at <=> b.value.timeout_at }
|
186
|
+
calculate_sleep
|
149
187
|
when "c"
|
150
|
-
|
151
|
-
if
|
188
|
+
monitors.reject! do |submon|
|
189
|
+
if submon.value == @ready
|
152
190
|
false
|
153
191
|
else
|
154
|
-
|
192
|
+
submon.value.close
|
193
|
+
begin
|
194
|
+
selector.deregister submon.value
|
195
|
+
rescue IOError
|
196
|
+
# nio4r on jruby seems to throw an IOError here if the IO is closed, so
|
197
|
+
# we need to swallow it.
|
198
|
+
end
|
155
199
|
true
|
156
200
|
end
|
157
201
|
end
|
@@ -160,19 +204,21 @@ module Puma
|
|
160
204
|
end
|
161
205
|
end
|
162
206
|
else
|
207
|
+
c = mon.value
|
208
|
+
|
163
209
|
# We have to be sure to remove it from the timeout
|
164
210
|
# list or we'll accidentally close the socket when
|
165
211
|
# it's in use!
|
166
212
|
if c.timeout_at
|
167
213
|
@mutex.synchronize do
|
168
|
-
@timeouts.delete
|
214
|
+
@timeouts.delete mon
|
169
215
|
end
|
170
216
|
end
|
171
217
|
|
172
218
|
begin
|
173
219
|
if c.try_to_finish
|
174
220
|
@app_pool << c
|
175
|
-
|
221
|
+
clear_monitor mon
|
176
222
|
end
|
177
223
|
|
178
224
|
# Don't report these to the lowlevel_error handler, otherwise
|
@@ -182,18 +228,23 @@ module Puma
|
|
182
228
|
c.write_500
|
183
229
|
c.close
|
184
230
|
|
185
|
-
|
231
|
+
clear_monitor mon
|
186
232
|
|
187
233
|
# SSL handshake failure
|
188
234
|
rescue MiniSSL::SSLError => e
|
189
235
|
@server.lowlevel_error(e, c.env)
|
190
236
|
|
191
237
|
ssl_socket = c.io
|
192
|
-
|
238
|
+
begin
|
239
|
+
addr = ssl_socket.peeraddr.last
|
240
|
+
rescue IOError
|
241
|
+
addr = "<unknown>"
|
242
|
+
end
|
243
|
+
|
193
244
|
cert = ssl_socket.peercert
|
194
245
|
|
195
246
|
c.close
|
196
|
-
|
247
|
+
clear_monitor mon
|
197
248
|
|
198
249
|
@events.ssl_error @server, addr, cert, e
|
199
250
|
|
@@ -204,7 +255,7 @@ module Puma
|
|
204
255
|
c.write_400
|
205
256
|
c.close
|
206
257
|
|
207
|
-
|
258
|
+
clear_monitor mon
|
208
259
|
|
209
260
|
@events.parse_error @server, c.env, e
|
210
261
|
rescue StandardError => e
|
@@ -213,7 +264,7 @@ module Puma
|
|
213
264
|
c.write_500
|
214
265
|
c.close
|
215
266
|
|
216
|
-
|
267
|
+
clear_monitor mon
|
217
268
|
end
|
218
269
|
end
|
219
270
|
end
|
@@ -223,11 +274,13 @@ module Puma
|
|
223
274
|
@mutex.synchronize do
|
224
275
|
now = Time.now
|
225
276
|
|
226
|
-
while @timeouts.first.timeout_at < now
|
227
|
-
|
277
|
+
while @timeouts.first.value.timeout_at < now
|
278
|
+
mon = @timeouts.shift
|
279
|
+
c = mon.value
|
228
280
|
c.write_408 if c.in_data_phase
|
229
281
|
c.close
|
230
|
-
|
282
|
+
|
283
|
+
clear_monitor mon
|
231
284
|
|
232
285
|
break if @timeouts.empty?
|
233
286
|
end
|
@@ -238,6 +291,11 @@ module Puma
|
|
238
291
|
end
|
239
292
|
end
|
240
293
|
|
294
|
+
def clear_monitor(mon)
|
295
|
+
@selector.deregister mon.value
|
296
|
+
@monitors.delete mon
|
297
|
+
end
|
298
|
+
|
241
299
|
public
|
242
300
|
|
243
301
|
def run
|
@@ -262,7 +320,7 @@ module Puma
|
|
262
320
|
end
|
263
321
|
end
|
264
322
|
|
265
|
-
# The `calculate_sleep` sets the value that the `
|
323
|
+
# The `calculate_sleep` sets the value that the `NIO::Selector#select` will
|
266
324
|
# sleep for in the main reactor loop when no sockets are being written to.
|
267
325
|
#
|
268
326
|
# The values kept in `@timeouts` are sorted so that the first timeout
|
@@ -276,7 +334,7 @@ module Puma
|
|
276
334
|
if @timeouts.empty?
|
277
335
|
@sleep_for = DefaultSleepFor
|
278
336
|
else
|
279
|
-
diff = @timeouts.first.timeout_at.to_f - Time.now.to_f
|
337
|
+
diff = @timeouts.first.value.timeout_at.to_f - Time.now.to_f
|
280
338
|
|
281
339
|
if diff < 0.0
|
282
340
|
@sleep_for = 0
|
@@ -293,18 +351,18 @@ module Puma
|
|
293
351
|
# object.
|
294
352
|
#
|
295
353
|
# The main body of the reactor loop is in `run_internal` and it
|
296
|
-
# will sleep on `
|
297
|
-
# reactor it cannot be added directly to the `sockets`
|
298
|
-
# the `
|
354
|
+
# will sleep on `NIO::Selector#select`. When a new connection is added to the
|
355
|
+
# reactor it cannot be added directly to the `sockets` array, because
|
356
|
+
# the `NIO::Selector#select` will not be watching for it yet.
|
299
357
|
#
|
300
|
-
# Instead what needs to happen is that `
|
358
|
+
# Instead what needs to happen is that `NIO::Selector#select` needs to be woken up,
|
301
359
|
# the contents of `@input` added to the `sockets` array, and then
|
302
|
-
# another call to `
|
360
|
+
# another call to `NIO::Selector#select` needs to happen. Since the `Puma::Client`
|
303
361
|
# object can be read immediately, it does not block, but instead returns
|
304
362
|
# right away.
|
305
363
|
#
|
306
364
|
# This behavior is accomplished by writing to `@trigger` which wakes up
|
307
|
-
# the `
|
365
|
+
# the `NIO::Selector#select` and then there is logic to detect the value of `*`,
|
308
366
|
# pull the contents from `@input` and add them to the sockets array.
|
309
367
|
#
|
310
368
|
# If the object passed in has a timeout value in `timeout_at` then
|
@@ -315,13 +373,6 @@ module Puma
|
|
315
373
|
@mutex.synchronize do
|
316
374
|
@input << c
|
317
375
|
@trigger << "*"
|
318
|
-
|
319
|
-
if c.timeout_at
|
320
|
-
@timeouts << c
|
321
|
-
@timeouts.sort! { |a,b| a.timeout_at <=> b.timeout_at }
|
322
|
-
|
323
|
-
calculate_sleep
|
324
|
-
end
|
325
376
|
end
|
326
377
|
end
|
327
378
|
|