puma 3.0.0.rc1 → 5.0.0.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +5 -5
- data/{History.txt → History.md} +703 -70
- data/LICENSE +23 -20
- data/README.md +173 -163
- data/docs/architecture.md +37 -0
- data/{DEPLOYMENT.md → docs/deployment.md} +28 -6
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +13 -0
- data/docs/jungle/rc.d/README.md +74 -0
- data/docs/jungle/rc.d/puma +61 -0
- data/docs/jungle/rc.d/puma.conf +10 -0
- data/{tools → docs}/jungle/upstart/README.md +0 -0
- data/{tools → docs}/jungle/upstart/puma-manager.conf +0 -0
- data/{tools → docs}/jungle/upstart/puma.conf +1 -1
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +38 -0
- data/docs/restart.md +41 -0
- data/docs/signals.md +57 -3
- data/docs/systemd.md +228 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/extconf.rb +16 -0
- data/ext/puma_http11/http11_parser.c +287 -468
- data/ext/puma_http11/http11_parser.h +1 -0
- data/ext/puma_http11/http11_parser.java.rl +21 -37
- data/ext/puma_http11/http11_parser.rl +10 -9
- data/ext/puma_http11/http11_parser_common.rl +4 -4
- data/ext/puma_http11/mini_ssl.c +159 -10
- data/ext/puma_http11/org/jruby/puma/Http11.java +108 -116
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +99 -132
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +30 -6
- data/ext/puma_http11/puma_http11.c +6 -38
- data/lib/puma.rb +25 -5
- data/lib/puma/accept_nonblock.rb +7 -1
- data/lib/puma/app/status.rb +53 -26
- data/lib/puma/binder.rb +150 -119
- data/lib/puma/cli.rb +56 -38
- data/lib/puma/client.rb +277 -80
- data/lib/puma/cluster.rb +326 -130
- data/lib/puma/commonlogger.rb +21 -20
- data/lib/puma/configuration.rb +160 -161
- data/lib/puma/const.rb +50 -47
- data/lib/puma/control_cli.rb +104 -63
- data/lib/puma/detect.rb +13 -1
- data/lib/puma/dsl.rb +463 -114
- data/lib/puma/events.rb +22 -13
- data/lib/puma/io_buffer.rb +9 -5
- data/lib/puma/jruby_restart.rb +2 -59
- data/lib/puma/launcher.rb +195 -105
- data/lib/puma/minissl.rb +110 -4
- data/lib/puma/minissl/context_builder.rb +76 -0
- data/lib/puma/null_io.rb +9 -14
- data/lib/puma/plugin.rb +32 -12
- data/lib/puma/plugin/tmp_restart.rb +19 -6
- data/lib/puma/rack/builder.rb +7 -5
- data/lib/puma/rack/urlmap.rb +11 -8
- data/lib/puma/rack_default.rb +2 -0
- data/lib/puma/reactor.rb +242 -32
- data/lib/puma/runner.rb +41 -30
- data/lib/puma/server.rb +265 -183
- data/lib/puma/single.rb +22 -63
- data/lib/puma/state_file.rb +9 -2
- data/lib/puma/thread_pool.rb +179 -68
- data/lib/puma/util.rb +3 -11
- data/lib/rack/handler/puma.rb +60 -11
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +1 -2
- metadata +35 -99
- data/COPYING +0 -55
- data/Gemfile +0 -13
- data/Manifest.txt +0 -79
- data/Rakefile +0 -158
- data/docs/config.md +0 -0
- data/ext/puma_http11/io_buffer.c +0 -155
- data/lib/puma/capistrano.rb +0 -94
- data/lib/puma/compat.rb +0 -18
- data/lib/puma/convenient.rb +0 -23
- data/lib/puma/daemon_ext.rb +0 -31
- data/lib/puma/delegation.rb +0 -11
- data/lib/puma/java_io_buffer.rb +0 -45
- data/lib/puma/rack/backports/uri/common_18.rb +0 -56
- data/lib/puma/rack/backports/uri/common_192.rb +0 -52
- data/lib/puma/rack/backports/uri/common_193.rb +0 -29
- data/lib/puma/tcp_logger.rb +0 -32
- data/puma.gemspec +0 -52
- data/tools/jungle/README.md +0 -9
- data/tools/jungle/init.d/README.md +0 -54
- data/tools/jungle/init.d/puma +0 -394
- data/tools/jungle/init.d/run-puma +0 -3
data/lib/puma/rack/builder.rb
CHANGED
@@ -1,3 +1,8 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Puma
|
4
|
+
end
|
5
|
+
|
1
6
|
module Puma::Rack
|
2
7
|
class Options
|
3
8
|
def parse!(args)
|
@@ -62,10 +67,6 @@ module Puma::Rack
|
|
62
67
|
options[:environment] = e
|
63
68
|
}
|
64
69
|
|
65
|
-
opts.on("-D", "--daemonize", "run daemonized in the background") { |d|
|
66
|
-
options[:daemonize] = d ? true : false
|
67
|
-
}
|
68
|
-
|
69
70
|
opts.on("-P", "--pid FILE", "file to store PID") { |f|
|
70
71
|
options[:pid] = ::File.expand_path(f)
|
71
72
|
}
|
@@ -107,7 +108,8 @@ module Puma::Rack
|
|
107
108
|
|
108
109
|
has_options = false
|
109
110
|
server.valid_options.each do |name, description|
|
110
|
-
next if name.to_s
|
111
|
+
next if name.to_s =~ /^(Host|Port)[^a-zA-Z]/ # ignore handler's host and port options, we do our own.
|
112
|
+
|
111
113
|
info << " -O %-21s %s" % [name, description]
|
112
114
|
has_options = true
|
113
115
|
end
|
data/lib/puma/rack/urlmap.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Puma::Rack
|
2
4
|
# Rack::URLMap takes a hash mapping urls or paths to apps, and
|
3
5
|
# dispatches accordingly. Support for HTTP/1.1 host names exists if
|
@@ -43,15 +45,17 @@ module Puma::Rack
|
|
43
45
|
def call(env)
|
44
46
|
path = env['PATH_INFO']
|
45
47
|
script_name = env['SCRIPT_NAME']
|
46
|
-
|
47
|
-
|
48
|
-
|
48
|
+
http_host = env['HTTP_HOST']
|
49
|
+
server_name = env['SERVER_NAME']
|
50
|
+
server_port = env['SERVER_PORT']
|
51
|
+
|
52
|
+
is_same_server = casecmp?(http_host, server_name) ||
|
53
|
+
casecmp?(http_host, "#{server_name}:#{server_port}")
|
49
54
|
|
50
55
|
@mapping.each do |host, location, match, app|
|
51
|
-
unless casecmp?(
|
52
|
-
|| casecmp?(
|
53
|
-
|| (!host &&
|
54
|
-
casecmp?(hHost, sName+':'+sPort)))
|
56
|
+
unless casecmp?(http_host, host) \
|
57
|
+
|| casecmp?(server_name, host) \
|
58
|
+
|| (!host && is_same_server)
|
55
59
|
next
|
56
60
|
end
|
57
61
|
|
@@ -87,4 +91,3 @@ module Puma::Rack
|
|
87
91
|
end
|
88
92
|
end
|
89
93
|
end
|
90
|
-
|
data/lib/puma/rack_default.rb
CHANGED
data/lib/puma/reactor.rb
CHANGED
@@ -1,57 +1,206 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require 'puma/util'
|
2
4
|
require 'puma/minissl'
|
3
5
|
|
6
|
+
require 'nio'
|
7
|
+
|
4
8
|
module Puma
|
9
|
+
# Internal Docs, Not a public interface.
|
10
|
+
#
|
11
|
+
# The Reactor object is responsible for ensuring that a request has been
|
12
|
+
# completely received before it starts to be processed. This may be known as read buffering.
|
13
|
+
# If read buffering is not done, and no other read buffering is performed (such as by an application server
|
14
|
+
# such as nginx) then the application would be subject to a slow client attack.
|
15
|
+
#
|
16
|
+
# Each Puma "worker" process has its own Reactor. For example if you start puma with `$ puma -w 5` then
|
17
|
+
# it will have 5 workers and each worker will have it's own reactor.
|
18
|
+
#
|
19
|
+
# For a graphical representation of how the reactor works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
|
20
|
+
#
|
21
|
+
# ## Reactor Flow
|
22
|
+
#
|
23
|
+
# A connection comes into a `Puma::Server` instance, it is then passed to a `Puma::Reactor` instance,
|
24
|
+
# which stores it in an array and waits for any of the connections to be ready for reading.
|
25
|
+
#
|
26
|
+
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev, Java NIO or
|
27
|
+
# just plain IO#select). The call to `NIO::Selector#select` will "wake up" and
|
28
|
+
# return the references to any objects that caused it to "wake". The reactor
|
29
|
+
# then loops through each of these request objects, and sees if they're complete. If they
|
30
|
+
# have a full header and body then the reactor passes the request to a thread pool.
|
31
|
+
# Once in a thread pool, a "worker thread" can run the the application's Ruby code against the request.
|
32
|
+
#
|
33
|
+
# If the request is not complete, then it stays in the array, and the next time any
|
34
|
+
# data is written to that socket reference, then the loop is woken up and it is checked for completeness again.
|
35
|
+
#
|
36
|
+
# A detailed example is given in the docs for `run_internal` which is where the bulk
|
37
|
+
# of this logic lives.
|
5
38
|
class Reactor
|
6
39
|
DefaultSleepFor = 5
|
7
40
|
|
41
|
+
# Creates an instance of Puma::Reactor
|
42
|
+
#
|
43
|
+
# The `server` argument is an instance of `Puma::Server`
|
44
|
+
# that is used to write a response for "low level errors"
|
45
|
+
# when there is an exception inside of the reactor.
|
46
|
+
#
|
47
|
+
# The `app_pool` is an instance of `Puma::ThreadPool`.
|
48
|
+
# Once a request is fully formed (header and body are received)
|
49
|
+
# it will be passed to the `app_pool`.
|
8
50
|
def initialize(server, app_pool)
|
9
51
|
@server = server
|
10
52
|
@events = server.events
|
11
53
|
@app_pool = app_pool
|
12
54
|
|
55
|
+
@selector = NIO::Selector.new
|
56
|
+
|
13
57
|
@mutex = Mutex.new
|
58
|
+
|
59
|
+
# Read / Write pipes to wake up internal while loop
|
14
60
|
@ready, @trigger = Puma::Util.pipe
|
15
61
|
@input = []
|
16
62
|
@sleep_for = DefaultSleepFor
|
17
63
|
@timeouts = []
|
18
64
|
|
19
|
-
|
65
|
+
mon = @selector.register(@ready, :r)
|
66
|
+
mon.value = @ready
|
67
|
+
|
68
|
+
@monitors = [mon]
|
20
69
|
end
|
21
70
|
|
22
71
|
private
|
23
72
|
|
73
|
+
# Until a request is added via the `add` method this method will internally
|
74
|
+
# loop, waiting on the `sockets` array objects. The only object in this
|
75
|
+
# array at first is the `@ready` IO object, which is the read end of a pipe
|
76
|
+
# connected to `@trigger` object. When `@trigger` is written to, then the loop
|
77
|
+
# will break on `NIO::Selector#select` and return an array.
|
78
|
+
#
|
79
|
+
# ## When a request is added:
|
80
|
+
#
|
81
|
+
# When the `add` method is called, an instance of `Puma::Client` is added to the `@input` array.
|
82
|
+
# Next the `@ready` pipe is "woken" by writing a string of `"*"` to `@trigger`.
|
83
|
+
#
|
84
|
+
# When that happens, the internal loop stops blocking at `NIO::Selector#select` and returns a reference
|
85
|
+
# to whatever "woke" it up. On the very first loop, the only thing in `sockets` is `@ready`.
|
86
|
+
# When `@trigger` is written-to, the loop "wakes" and the `ready`
|
87
|
+
# variable returns an array of arrays that looks like `[[#<IO:fd 10>], [], []]` where the
|
88
|
+
# first IO object is the `@ready` object. This first array `[#<IO:fd 10>]`
|
89
|
+
# is saved as a `reads` variable.
|
90
|
+
#
|
91
|
+
# The `reads` variable is iterated through. In the case that the object
|
92
|
+
# is the same as the `@ready` input pipe, then we know that there was a `trigger` event.
|
93
|
+
#
|
94
|
+
# If there was a trigger event, then one byte of `@ready` is read into memory. In the case of the first request,
|
95
|
+
# the reactor sees that it's a `"*"` value and the reactor adds the contents of `@input` into the `sockets` array.
|
96
|
+
# The while then loop continues to iterate again, but now the `sockets` array contains a `Puma::Client` instance in addition
|
97
|
+
# to the `@ready` IO object. For example: `[#<IO:fd 10>, #<Puma::Client:0x3fdc1103bee8 @ready=false>]`.
|
98
|
+
#
|
99
|
+
# Since the `Puma::Client` in this example has data that has not been read yet,
|
100
|
+
# the `NIO::Selector#select` is immediately able to "wake" and read from the `Puma::Client`. At this point the
|
101
|
+
# `ready` output looks like this: `[[#<Puma::Client:0x3fdc1103bee8 @ready=false>], [], []]`.
|
102
|
+
#
|
103
|
+
# Each element in the first entry is iterated over. The `Puma::Client` object is not
|
104
|
+
# the `@ready` pipe, so the reactor checks to see if it has the full header and body with
|
105
|
+
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
106
|
+
# then the request is passed off to the `@app_pool` thread pool so that a "worker thread"
|
107
|
+
# can pick up the request and begin to execute application logic. This is done
|
108
|
+
# via `@app_pool << c`. The `Puma::Client` is then removed from the `sockets` array.
|
109
|
+
#
|
110
|
+
# If the request body is not present then nothing will happen, and the loop will iterate
|
111
|
+
# again. When the client sends more data to the socket the `Puma::Client` object will
|
112
|
+
# wake up the `NIO::Selector#select` and it can again be checked to see if it's ready to be
|
113
|
+
# passed to the thread pool.
|
114
|
+
#
|
115
|
+
# ## Time Out Case
|
116
|
+
#
|
117
|
+
# In addition to being woken via a write to one of the sockets the `NIO::Selector#select` will
|
118
|
+
# periodically "time out" of the sleep. One of the functions of this is to check for
|
119
|
+
# any requests that have "timed out". At the end of the loop it's checked to see if
|
120
|
+
# the first element in the `@timeout` array has exceed its allowed time. If so,
|
121
|
+
# the client object is removed from the timeout array, a 408 response is written.
|
122
|
+
# Then its connection is closed, and the object is removed from the `sockets` array
|
123
|
+
# that watches for new data.
|
124
|
+
#
|
125
|
+
# This behavior loops until all the objects that have timed out have been removed.
|
126
|
+
#
|
127
|
+
# Once all the timeouts have been processed, the next duration of the `NIO::Selector#select` sleep
|
128
|
+
# will be set to be equal to the amount of time it will take for the next timeout to occur.
|
129
|
+
# This calculation happens in `calculate_sleep`.
|
24
130
|
def run_internal
|
25
|
-
|
131
|
+
monitors = @monitors
|
132
|
+
selector = @selector
|
26
133
|
|
27
134
|
while true
|
28
135
|
begin
|
29
|
-
ready =
|
136
|
+
ready = selector.select @sleep_for
|
30
137
|
rescue IOError => e
|
31
|
-
|
138
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
139
|
+
if monitors.any? { |mon| mon.value.closed? }
|
32
140
|
STDERR.puts "Error in select: #{e.message} (#{e.class})"
|
33
141
|
STDERR.puts e.backtrace
|
34
|
-
|
142
|
+
|
143
|
+
monitors.reject! do |mon|
|
144
|
+
if mon.value.closed?
|
145
|
+
selector.deregister mon.value
|
146
|
+
true
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
35
150
|
retry
|
36
151
|
else
|
37
152
|
raise
|
38
153
|
end
|
39
154
|
end
|
40
155
|
|
41
|
-
if ready
|
42
|
-
|
43
|
-
if
|
156
|
+
if ready
|
157
|
+
ready.each do |mon|
|
158
|
+
if mon.value == @ready
|
44
159
|
@mutex.synchronize do
|
45
160
|
case @ready.read(1)
|
46
161
|
when "*"
|
47
|
-
|
162
|
+
@input.each do |c|
|
163
|
+
mon = nil
|
164
|
+
begin
|
165
|
+
begin
|
166
|
+
mon = selector.register(c, :r)
|
167
|
+
rescue ArgumentError
|
168
|
+
# There is a bug where we seem to be registering an already registered
|
169
|
+
# client. This code deals with this situation but I wish we didn't have to.
|
170
|
+
monitors.delete_if { |submon| submon.value.to_io == c.to_io }
|
171
|
+
selector.deregister(c)
|
172
|
+
mon = selector.register(c, :r)
|
173
|
+
end
|
174
|
+
rescue IOError
|
175
|
+
# Means that the io is closed, so we should ignore this request
|
176
|
+
# entirely
|
177
|
+
else
|
178
|
+
mon.value = c
|
179
|
+
@timeouts << mon if c.timeout_at
|
180
|
+
monitors << mon
|
181
|
+
end
|
182
|
+
end
|
48
183
|
@input.clear
|
184
|
+
|
185
|
+
@timeouts.sort! { |a,b| a.value.timeout_at <=> b.value.timeout_at }
|
186
|
+
calculate_sleep
|
49
187
|
when "c"
|
50
|
-
|
51
|
-
if
|
188
|
+
monitors.reject! do |submon|
|
189
|
+
if submon.value == @ready
|
52
190
|
false
|
53
191
|
else
|
54
|
-
|
192
|
+
if submon.value.can_close?
|
193
|
+
submon.value.close
|
194
|
+
else
|
195
|
+
# Pass remaining open client connections to the thread pool.
|
196
|
+
@app_pool << submon.value
|
197
|
+
end
|
198
|
+
begin
|
199
|
+
selector.deregister submon.value
|
200
|
+
rescue IOError
|
201
|
+
# nio4r on jruby seems to throw an IOError here if the IO is closed, so
|
202
|
+
# we need to swallow it.
|
203
|
+
end
|
55
204
|
true
|
56
205
|
end
|
57
206
|
end
|
@@ -60,45 +209,68 @@ module Puma
|
|
60
209
|
end
|
61
210
|
end
|
62
211
|
else
|
212
|
+
c = mon.value
|
213
|
+
|
63
214
|
# We have to be sure to remove it from the timeout
|
64
215
|
# list or we'll accidentally close the socket when
|
65
216
|
# it's in use!
|
66
217
|
if c.timeout_at
|
67
218
|
@mutex.synchronize do
|
68
|
-
@timeouts.delete
|
219
|
+
@timeouts.delete mon
|
69
220
|
end
|
70
221
|
end
|
71
222
|
|
72
223
|
begin
|
73
224
|
if c.try_to_finish
|
74
225
|
@app_pool << c
|
75
|
-
|
226
|
+
clear_monitor mon
|
76
227
|
end
|
77
228
|
|
229
|
+
# Don't report these to the lowlevel_error handler, otherwise
|
230
|
+
# will be flooding them with errors when persistent connections
|
231
|
+
# are closed.
|
232
|
+
rescue ConnectionError
|
233
|
+
c.write_error(500)
|
234
|
+
c.close
|
235
|
+
|
236
|
+
clear_monitor mon
|
237
|
+
|
78
238
|
# SSL handshake failure
|
79
239
|
rescue MiniSSL::SSLError => e
|
240
|
+
@server.lowlevel_error(e, c.env)
|
241
|
+
|
80
242
|
ssl_socket = c.io
|
81
|
-
|
243
|
+
begin
|
244
|
+
addr = ssl_socket.peeraddr.last
|
245
|
+
# EINVAL can happen when browser closes socket w/security exception
|
246
|
+
rescue IOError, Errno::EINVAL
|
247
|
+
addr = "<unknown>"
|
248
|
+
end
|
249
|
+
|
82
250
|
cert = ssl_socket.peercert
|
83
251
|
|
84
252
|
c.close
|
85
|
-
|
253
|
+
clear_monitor mon
|
86
254
|
|
87
255
|
@events.ssl_error @server, addr, cert, e
|
88
256
|
|
89
257
|
# The client doesn't know HTTP well
|
90
258
|
rescue HttpParserError => e
|
91
|
-
c.
|
259
|
+
@server.lowlevel_error(e, c.env)
|
260
|
+
|
261
|
+
c.write_error(400)
|
92
262
|
c.close
|
93
263
|
|
94
|
-
|
264
|
+
clear_monitor mon
|
95
265
|
|
96
266
|
@events.parse_error @server, c.env, e
|
97
267
|
rescue StandardError => e
|
98
|
-
c.
|
268
|
+
@server.lowlevel_error(e, c.env)
|
269
|
+
|
270
|
+
c.write_error(500)
|
99
271
|
c.close
|
100
272
|
|
101
|
-
|
273
|
+
clear_monitor mon
|
102
274
|
end
|
103
275
|
end
|
104
276
|
end
|
@@ -108,11 +280,13 @@ module Puma
|
|
108
280
|
@mutex.synchronize do
|
109
281
|
now = Time.now
|
110
282
|
|
111
|
-
while @timeouts.first.timeout_at < now
|
112
|
-
|
113
|
-
c
|
283
|
+
while @timeouts.first.value.timeout_at < now
|
284
|
+
mon = @timeouts.shift
|
285
|
+
c = mon.value
|
286
|
+
c.write_error(408) if c.in_data_phase
|
114
287
|
c.close
|
115
|
-
|
288
|
+
|
289
|
+
clear_monitor mon
|
116
290
|
|
117
291
|
break if @timeouts.empty?
|
118
292
|
end
|
@@ -123,6 +297,11 @@ module Puma
|
|
123
297
|
end
|
124
298
|
end
|
125
299
|
|
300
|
+
def clear_monitor(mon)
|
301
|
+
@selector.deregister mon.value
|
302
|
+
@monitors.delete mon
|
303
|
+
end
|
304
|
+
|
126
305
|
public
|
127
306
|
|
128
307
|
def run
|
@@ -134,6 +313,7 @@ module Puma
|
|
134
313
|
|
135
314
|
def run_in_thread
|
136
315
|
@thread = Thread.new do
|
316
|
+
Puma.set_thread_name "reactor"
|
137
317
|
begin
|
138
318
|
run_internal
|
139
319
|
rescue StandardError => e
|
@@ -147,11 +327,21 @@ module Puma
|
|
147
327
|
end
|
148
328
|
end
|
149
329
|
|
330
|
+
# The `calculate_sleep` sets the value that the `NIO::Selector#select` will
|
331
|
+
# sleep for in the main reactor loop when no sockets are being written to.
|
332
|
+
#
|
333
|
+
# The values kept in `@timeouts` are sorted so that the first timeout
|
334
|
+
# comes first in the array. When there are no timeouts the default timeout is used.
|
335
|
+
#
|
336
|
+
# Otherwise a sleep value is set that is the same as the amount of time it
|
337
|
+
# would take for the first element to time out.
|
338
|
+
#
|
339
|
+
# If that value is in the past, then a sleep value of zero is used.
|
150
340
|
def calculate_sleep
|
151
341
|
if @timeouts.empty?
|
152
342
|
@sleep_for = DefaultSleepFor
|
153
343
|
else
|
154
|
-
diff = @timeouts.first.timeout_at.to_f - Time.now.to_f
|
344
|
+
diff = @timeouts.first.value.timeout_at.to_f - Time.now.to_f
|
155
345
|
|
156
346
|
if diff < 0.0
|
157
347
|
@sleep_for = 0
|
@@ -161,17 +351,35 @@ module Puma
|
|
161
351
|
end
|
162
352
|
end
|
163
353
|
|
354
|
+
# This method adds a connection to the reactor
|
355
|
+
#
|
356
|
+
# Typically called by `Puma::Server` the value passed in
|
357
|
+
# is usually a `Puma::Client` object that responds like an IO
|
358
|
+
# object.
|
359
|
+
#
|
360
|
+
# The main body of the reactor loop is in `run_internal` and it
|
361
|
+
# will sleep on `NIO::Selector#select`. When a new connection is added to the
|
362
|
+
# reactor it cannot be added directly to the `sockets` array, because
|
363
|
+
# the `NIO::Selector#select` will not be watching for it yet.
|
364
|
+
#
|
365
|
+
# Instead what needs to happen is that `NIO::Selector#select` needs to be woken up,
|
366
|
+
# the contents of `@input` added to the `sockets` array, and then
|
367
|
+
# another call to `NIO::Selector#select` needs to happen. Since the `Puma::Client`
|
368
|
+
# object can be read immediately, it does not block, but instead returns
|
369
|
+
# right away.
|
370
|
+
#
|
371
|
+
# This behavior is accomplished by writing to `@trigger` which wakes up
|
372
|
+
# the `NIO::Selector#select` and then there is logic to detect the value of `*`,
|
373
|
+
# pull the contents from `@input` and add them to the sockets array.
|
374
|
+
#
|
375
|
+
# If the object passed in has a timeout value in `timeout_at` then
|
376
|
+
# it is added to a `@timeouts` array. This array is then re-arranged
|
377
|
+
# so that the first element to timeout will be at the front of the
|
378
|
+
# array. Then a value to sleep for is derived in the call to `calculate_sleep`
|
164
379
|
def add(c)
|
165
380
|
@mutex.synchronize do
|
166
381
|
@input << c
|
167
382
|
@trigger << "*"
|
168
|
-
|
169
|
-
if c.timeout_at
|
170
|
-
@timeouts << c
|
171
|
-
@timeouts.sort! { |a,b| a.timeout_at <=> b.timeout_at }
|
172
|
-
|
173
|
-
calculate_sleep
|
174
|
-
end
|
175
383
|
end
|
176
384
|
end
|
177
385
|
|
@@ -180,6 +388,7 @@ module Puma
|
|
180
388
|
begin
|
181
389
|
@trigger << "c"
|
182
390
|
rescue IOError
|
391
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
183
392
|
end
|
184
393
|
end
|
185
394
|
|
@@ -187,6 +396,7 @@ module Puma
|
|
187
396
|
begin
|
188
397
|
@trigger << "!"
|
189
398
|
rescue IOError
|
399
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
190
400
|
end
|
191
401
|
|
192
402
|
@thread.join
|