puma 5.0.2 → 5.0.3
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +608 -566
- data/README.md +4 -4
- data/bin/puma-wild +3 -9
- data/docs/deployment.md +5 -6
- data/docs/jungle/README.md +0 -4
- data/docs/jungle/rc.d/puma +2 -2
- data/docs/nginx.md +1 -1
- data/docs/restart.md +46 -23
- data/docs/systemd.md +1 -1
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/mini_ssl.c +39 -37
- data/ext/puma_http11/puma_http11.c +17 -10
- data/lib/puma/app/status.rb +44 -43
- data/lib/puma/binder.rb +9 -1
- data/lib/puma/client.rb +24 -72
- data/lib/puma/cluster.rb +25 -196
- data/lib/puma/cluster/worker.rb +170 -0
- data/lib/puma/cluster/worker_handle.rb +83 -0
- data/lib/puma/configuration.rb +8 -7
- data/lib/puma/const.rb +1 -1
- data/lib/puma/launcher.rb +5 -9
- data/lib/puma/queue_close.rb +26 -0
- data/lib/puma/reactor.rb +77 -362
- data/lib/puma/request.rb +438 -0
- data/lib/puma/runner.rb +4 -17
- data/lib/puma/server.rb +166 -501
- data/lib/puma/single.rb +2 -2
- data/lib/puma/util.rb +11 -0
- metadata +6 -6
- data/docs/jungle/upstart/README.md +0 -61
- data/docs/jungle/upstart/puma-manager.conf +0 -31
- data/docs/jungle/upstart/puma.conf +0 -69
- data/lib/puma/accept_nonblock.rb +0 -29
@@ -0,0 +1,170 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Puma
|
4
|
+
class Cluster < Puma::Runner
|
5
|
+
# This class is instantiated by the `Puma::Cluster` and represents a single
|
6
|
+
# worker process.
|
7
|
+
#
|
8
|
+
# At the core of this class is running an instance of `Puma::Server` which
|
9
|
+
# gets created via the `start_server` method from the `Puma::Runner` class
|
10
|
+
# that this inherits from.
|
11
|
+
class Worker < Puma::Runner
|
12
|
+
attr_reader :index, :master
|
13
|
+
|
14
|
+
def initialize(index:, master:, launcher:, pipes:, server: nil)
|
15
|
+
super launcher, launcher.events
|
16
|
+
|
17
|
+
@index = index
|
18
|
+
@master = master
|
19
|
+
@launcher = launcher
|
20
|
+
@options = launcher.options
|
21
|
+
@check_pipe = pipes[:check_pipe]
|
22
|
+
@worker_write = pipes[:worker_write]
|
23
|
+
@fork_pipe = pipes[:fork_pipe]
|
24
|
+
@wakeup = pipes[:wakeup]
|
25
|
+
@server = server
|
26
|
+
end
|
27
|
+
|
28
|
+
def run
|
29
|
+
title = "puma: cluster worker #{index}: #{master}"
|
30
|
+
title += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty?
|
31
|
+
$0 = title
|
32
|
+
|
33
|
+
Signal.trap "SIGINT", "IGNORE"
|
34
|
+
Signal.trap "SIGCHLD", "DEFAULT"
|
35
|
+
|
36
|
+
Thread.new do
|
37
|
+
Puma.set_thread_name "worker check pipe"
|
38
|
+
IO.select [@check_pipe]
|
39
|
+
log "! Detected parent died, dying"
|
40
|
+
exit! 1
|
41
|
+
end
|
42
|
+
|
43
|
+
# If we're not running under a Bundler context, then
|
44
|
+
# report the info about the context we will be using
|
45
|
+
if !ENV['BUNDLE_GEMFILE']
|
46
|
+
if File.exist?("Gemfile")
|
47
|
+
log "+ Gemfile in context: #{File.expand_path("Gemfile")}"
|
48
|
+
elsif File.exist?("gems.rb")
|
49
|
+
log "+ Gemfile in context: #{File.expand_path("gems.rb")}"
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
# Invoke any worker boot hooks so they can get
|
54
|
+
# things in shape before booting the app.
|
55
|
+
@launcher.config.run_hooks :before_worker_boot, index, @launcher.events
|
56
|
+
|
57
|
+
server = @server ||= start_server
|
58
|
+
restart_server = Queue.new << true << false
|
59
|
+
|
60
|
+
fork_worker = @options[:fork_worker] && index == 0
|
61
|
+
|
62
|
+
if fork_worker
|
63
|
+
restart_server.clear
|
64
|
+
worker_pids = []
|
65
|
+
Signal.trap "SIGCHLD" do
|
66
|
+
wakeup! if worker_pids.reject! do |p|
|
67
|
+
Process.wait(p, Process::WNOHANG) rescue true
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
Thread.new do
|
72
|
+
Puma.set_thread_name "worker fork pipe"
|
73
|
+
while (idx = @fork_pipe.gets)
|
74
|
+
idx = idx.to_i
|
75
|
+
if idx == -1 # stop server
|
76
|
+
if restart_server.length > 0
|
77
|
+
restart_server.clear
|
78
|
+
server.begin_restart(true)
|
79
|
+
@launcher.config.run_hooks :before_refork, nil, @launcher.events
|
80
|
+
Puma::Util.nakayoshi_gc @events if @options[:nakayoshi_fork]
|
81
|
+
end
|
82
|
+
elsif idx == 0 # restart server
|
83
|
+
restart_server << true << false
|
84
|
+
else # fork worker
|
85
|
+
worker_pids << pid = spawn_worker(idx)
|
86
|
+
@worker_write << "f#{pid}:#{idx}\n" rescue nil
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
Signal.trap "SIGTERM" do
|
93
|
+
@worker_write << "e#{Process.pid}\n" rescue nil
|
94
|
+
restart_server.clear
|
95
|
+
server.stop
|
96
|
+
restart_server << false
|
97
|
+
end
|
98
|
+
|
99
|
+
begin
|
100
|
+
@worker_write << "b#{Process.pid}:#{index}\n"
|
101
|
+
rescue SystemCallError, IOError
|
102
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
103
|
+
STDERR.puts "Master seems to have exited, exiting."
|
104
|
+
return
|
105
|
+
end
|
106
|
+
|
107
|
+
while restart_server.pop
|
108
|
+
server_thread = server.run
|
109
|
+
stat_thread ||= Thread.new(@worker_write) do |io|
|
110
|
+
Puma.set_thread_name "stat payload"
|
111
|
+
|
112
|
+
while true
|
113
|
+
begin
|
114
|
+
require 'json'
|
115
|
+
io << "p#{Process.pid}#{server.stats.to_json}\n"
|
116
|
+
rescue IOError
|
117
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
118
|
+
break
|
119
|
+
end
|
120
|
+
sleep Const::WORKER_CHECK_INTERVAL
|
121
|
+
end
|
122
|
+
end
|
123
|
+
server_thread.join
|
124
|
+
end
|
125
|
+
|
126
|
+
# Invoke any worker shutdown hooks so they can prevent the worker
|
127
|
+
# exiting until any background operations are completed
|
128
|
+
@launcher.config.run_hooks :before_worker_shutdown, index, @launcher.events
|
129
|
+
ensure
|
130
|
+
@worker_write << "t#{Process.pid}\n" rescue nil
|
131
|
+
@worker_write.close
|
132
|
+
end
|
133
|
+
|
134
|
+
private
|
135
|
+
|
136
|
+
def spawn_worker(idx)
|
137
|
+
@launcher.config.run_hooks :before_worker_fork, idx, @launcher.events
|
138
|
+
|
139
|
+
pid = fork do
|
140
|
+
new_worker = Worker.new index: idx,
|
141
|
+
master: master,
|
142
|
+
launcher: @launcher,
|
143
|
+
pipes: { check_pipe: @check_pipe,
|
144
|
+
worker_write: @worker_write },
|
145
|
+
server: @server
|
146
|
+
new_worker.run
|
147
|
+
end
|
148
|
+
|
149
|
+
if !pid
|
150
|
+
log "! Complete inability to spawn new workers detected"
|
151
|
+
log "! Seppuku is the only choice."
|
152
|
+
exit! 1
|
153
|
+
end
|
154
|
+
|
155
|
+
@launcher.config.run_hooks :after_worker_fork, idx, @launcher.events
|
156
|
+
pid
|
157
|
+
end
|
158
|
+
|
159
|
+
def wakeup!
|
160
|
+
return unless @wakeup
|
161
|
+
|
162
|
+
begin
|
163
|
+
@wakeup.write "!" unless @wakeup.closed?
|
164
|
+
rescue SystemCallError, IOError
|
165
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Puma
|
4
|
+
class Cluster < Runner
|
5
|
+
# This class represents a worker process from the perspective of the puma
|
6
|
+
# master process. It contains information about the process and its health
|
7
|
+
# and it exposes methods to control the process via IPC. It does not
|
8
|
+
# include the actual logic executed by the worker process itself. For that,
|
9
|
+
# see Puma::Cluster::Worker.
|
10
|
+
class WorkerHandle
|
11
|
+
def initialize(idx, pid, phase, options)
|
12
|
+
@index = idx
|
13
|
+
@pid = pid
|
14
|
+
@phase = phase
|
15
|
+
@stage = :started
|
16
|
+
@signal = "TERM"
|
17
|
+
@options = options
|
18
|
+
@first_term_sent = nil
|
19
|
+
@started_at = Time.now
|
20
|
+
@last_checkin = Time.now
|
21
|
+
@last_status = {}
|
22
|
+
@term = false
|
23
|
+
end
|
24
|
+
|
25
|
+
attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status, :started_at
|
26
|
+
|
27
|
+
# @version 5.0.0
|
28
|
+
attr_writer :pid, :phase
|
29
|
+
|
30
|
+
def booted?
|
31
|
+
@stage == :booted
|
32
|
+
end
|
33
|
+
|
34
|
+
def boot!
|
35
|
+
@last_checkin = Time.now
|
36
|
+
@stage = :booted
|
37
|
+
end
|
38
|
+
|
39
|
+
def term?
|
40
|
+
@term
|
41
|
+
end
|
42
|
+
|
43
|
+
def ping!(status)
|
44
|
+
@last_checkin = Time.now
|
45
|
+
require 'json'
|
46
|
+
@last_status = JSON.parse(status, symbolize_names: true)
|
47
|
+
end
|
48
|
+
|
49
|
+
# @see Puma::Cluster#check_workers
|
50
|
+
# @version 5.0.0
|
51
|
+
def ping_timeout
|
52
|
+
@last_checkin +
|
53
|
+
(booted? ?
|
54
|
+
@options[:worker_timeout] :
|
55
|
+
@options[:worker_boot_timeout]
|
56
|
+
)
|
57
|
+
end
|
58
|
+
|
59
|
+
def term
|
60
|
+
begin
|
61
|
+
if @first_term_sent && (Time.now - @first_term_sent) > @options[:worker_shutdown_timeout]
|
62
|
+
@signal = "KILL"
|
63
|
+
else
|
64
|
+
@term ||= true
|
65
|
+
@first_term_sent ||= Time.now
|
66
|
+
end
|
67
|
+
Process.kill @signal, @pid if @pid
|
68
|
+
rescue Errno::ESRCH
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
def kill
|
73
|
+
@signal = 'KILL'
|
74
|
+
term
|
75
|
+
end
|
76
|
+
|
77
|
+
def hup
|
78
|
+
Process.kill "HUP", @pid
|
79
|
+
rescue Errno::ESRCH
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
data/lib/puma/configuration.rb
CHANGED
@@ -108,16 +108,17 @@ module Puma
|
|
108
108
|
#
|
109
109
|
# It also handles loading plugins.
|
110
110
|
#
|
111
|
-
#
|
111
|
+
# [Note:]
|
112
|
+
# `:port` and `:host` are not valid keys. By the time they make it to the
|
112
113
|
# configuration options they are expected to be incorporated into a `:binds` key.
|
113
114
|
# Under the hood the DSL maps `port` and `host` calls to `:binds`
|
114
115
|
#
|
115
|
-
#
|
116
|
-
#
|
117
|
-
#
|
118
|
-
#
|
119
|
-
#
|
120
|
-
#
|
116
|
+
# config = Configuration.new({}) do |user_config, file_config, default_config|
|
117
|
+
# user_config.port 3003
|
118
|
+
# end
|
119
|
+
# config.load
|
120
|
+
# puts config.options[:port]
|
121
|
+
# # => 3003
|
121
122
|
#
|
122
123
|
# It is expected that `load` is called on the configuration instance after setting
|
123
124
|
# config. This method expands any values in `config_file` and puts them into the
|
data/lib/puma/const.rb
CHANGED
@@ -100,7 +100,7 @@ module Puma
|
|
100
100
|
# too taxing on performance.
|
101
101
|
module Const
|
102
102
|
|
103
|
-
PUMA_VERSION = VERSION = "5.0.
|
103
|
+
PUMA_VERSION = VERSION = "5.0.3".freeze
|
104
104
|
CODE_NAME = "Spoony Bard".freeze
|
105
105
|
|
106
106
|
PUMA_SERVER_STRING = ['puma', PUMA_VERSION, CODE_NAME].join(' ').freeze
|
data/lib/puma/launcher.rb
CHANGED
@@ -264,15 +264,11 @@ module Puma
|
|
264
264
|
end
|
265
265
|
end
|
266
266
|
|
267
|
-
# @!attribute [r]
|
268
|
-
def
|
267
|
+
# @!attribute [r] files_to_require_after_prune
|
268
|
+
def files_to_require_after_prune
|
269
269
|
puma = spec_for_gem("puma")
|
270
270
|
|
271
|
-
|
272
|
-
"#{d.name}:#{spec_for_gem(d.name).version}"
|
273
|
-
end
|
274
|
-
|
275
|
-
[deps, require_paths_for_gem(puma) + extra_runtime_deps_directories]
|
271
|
+
require_paths_for_gem(puma) + extra_runtime_deps_directories
|
276
272
|
end
|
277
273
|
|
278
274
|
# @!attribute [r] extra_runtime_deps_directories
|
@@ -304,7 +300,7 @@ module Puma
|
|
304
300
|
return
|
305
301
|
end
|
306
302
|
|
307
|
-
|
303
|
+
dirs = files_to_require_after_prune
|
308
304
|
|
309
305
|
log '* Pruning Bundler environment'
|
310
306
|
home = ENV['GEM_HOME']
|
@@ -313,7 +309,7 @@ module Puma
|
|
313
309
|
ENV['GEM_HOME'] = home
|
314
310
|
ENV['BUNDLE_GEMFILE'] = bundle_gemfile
|
315
311
|
ENV['PUMA_BUNDLER_PRUNED'] = '1'
|
316
|
-
args = [Gem.ruby, puma_wild_location, '-I', dirs.join(':')
|
312
|
+
args = [Gem.ruby, puma_wild_location, '-I', dirs.join(':')] + @original_argv
|
317
313
|
# Ruby 2.0+ defaults to true which breaks socket activation
|
318
314
|
args += [{:close_others => false}]
|
319
315
|
Kernel.exec(*args)
|
@@ -0,0 +1,26 @@
|
|
1
|
+
class ClosedQueueError < StandardError; end
|
2
|
+
module Puma
|
3
|
+
|
4
|
+
# Queue#close was added in Ruby 2.3.
|
5
|
+
# Add a simple implementation for earlier Ruby versions.
|
6
|
+
#
|
7
|
+
module QueueClose
|
8
|
+
def initialize
|
9
|
+
@closed = false
|
10
|
+
super
|
11
|
+
end
|
12
|
+
def close
|
13
|
+
@closed = true
|
14
|
+
end
|
15
|
+
def closed?
|
16
|
+
@closed
|
17
|
+
end
|
18
|
+
def push(object)
|
19
|
+
@closed ||= false
|
20
|
+
raise ClosedQueueError if @closed
|
21
|
+
super
|
22
|
+
end
|
23
|
+
alias << push
|
24
|
+
end
|
25
|
+
::Queue.prepend QueueClose
|
26
|
+
end
|
data/lib/puma/reactor.rb
CHANGED
@@ -1,394 +1,109 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require 'puma/
|
4
|
-
require 'puma/minissl' if ::Puma::HAS_SSL
|
5
|
-
|
6
|
-
require 'nio'
|
3
|
+
require 'puma/queue_close' unless ::Queue.instance_methods.include? :close
|
7
4
|
|
8
5
|
module Puma
|
9
|
-
#
|
10
|
-
#
|
11
|
-
# The Reactor object is responsible for ensuring that a request has been
|
12
|
-
# completely received before it starts to be processed. This may be known as read buffering.
|
13
|
-
# If read buffering is not done, and no other read buffering is performed (such as by an application server
|
14
|
-
# such as nginx) then the application would be subject to a slow client attack.
|
15
|
-
#
|
16
|
-
# Each Puma "worker" process has its own Reactor. For example if you start puma with `$ puma -w 5` then
|
17
|
-
# it will have 5 workers and each worker will have it's own reactor.
|
18
|
-
#
|
19
|
-
# For a graphical representation of how the reactor works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
|
6
|
+
# Monitors a collection of IO objects, calling a block whenever
|
7
|
+
# any monitored object either receives data or times out, or when the Reactor shuts down.
|
20
8
|
#
|
21
|
-
#
|
9
|
+
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev,
|
10
|
+
# Java NIO or just plain IO#select). The call to `NIO::Selector#select` will
|
11
|
+
# 'wakeup' any IO object that receives data.
|
22
12
|
#
|
23
|
-
#
|
24
|
-
#
|
13
|
+
# This class additionally tracks a timeout for every added object,
|
14
|
+
# and wakes up any object when its timeout elapses.
|
25
15
|
#
|
26
|
-
# The
|
27
|
-
# just plain IO#select). The call to `NIO::Selector#select` will "wake up" and
|
28
|
-
# return the references to any objects that caused it to "wake". The reactor
|
29
|
-
# then loops through each of these request objects, and sees if they're complete. If they
|
30
|
-
# have a full header and body then the reactor passes the request to a thread pool.
|
31
|
-
# Once in a thread pool, a "worker thread" can run the the application's Ruby code against the request.
|
32
|
-
#
|
33
|
-
# If the request is not complete, then it stays in the array, and the next time any
|
34
|
-
# data is written to that socket reference, then the loop is woken up and it is checked for completeness again.
|
35
|
-
#
|
36
|
-
# A detailed example is given in the docs for `run_internal` which is where the bulk
|
37
|
-
# of this logic lives.
|
16
|
+
# The implementation uses a Queue to synchronize adding new objects from the internal select loop.
|
38
17
|
class Reactor
|
39
|
-
|
40
|
-
|
41
|
-
#
|
42
|
-
|
43
|
-
|
44
|
-
# that is used to write a response for "low level errors"
|
45
|
-
# when there is an exception inside of the reactor.
|
46
|
-
#
|
47
|
-
# The `app_pool` is an instance of `Puma::ThreadPool`.
|
48
|
-
# Once a request is fully formed (header and body are received)
|
49
|
-
# it will be passed to the `app_pool`.
|
50
|
-
def initialize(server, app_pool)
|
51
|
-
@server = server
|
52
|
-
@events = server.events
|
53
|
-
@app_pool = app_pool
|
54
|
-
|
18
|
+
# Create a new Reactor to monitor IO objects added by #add.
|
19
|
+
# The provided block will be invoked when an IO has data available to read,
|
20
|
+
# its timeout elapses, or when the Reactor shuts down.
|
21
|
+
def initialize(&block)
|
22
|
+
require 'nio'
|
55
23
|
@selector = NIO::Selector.new
|
56
|
-
|
57
|
-
@mutex = Mutex.new
|
58
|
-
|
59
|
-
# Read / Write pipes to wake up internal while loop
|
60
|
-
@ready, @trigger = Puma::Util.pipe
|
61
|
-
@input = []
|
62
|
-
@sleep_for = DefaultSleepFor
|
24
|
+
@input = Queue.new
|
63
25
|
@timeouts = []
|
64
|
-
|
65
|
-
mon = @selector.register(@ready, :r)
|
66
|
-
mon.value = @ready
|
67
|
-
|
68
|
-
@monitors = [mon]
|
26
|
+
@block = block
|
69
27
|
end
|
70
28
|
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
# will break on `NIO::Selector#select` and return an array.
|
78
|
-
#
|
79
|
-
# ## When a request is added:
|
80
|
-
#
|
81
|
-
# When the `add` method is called, an instance of `Puma::Client` is added to the `@input` array.
|
82
|
-
# Next the `@ready` pipe is "woken" by writing a string of `"*"` to `@trigger`.
|
83
|
-
#
|
84
|
-
# When that happens, the internal loop stops blocking at `NIO::Selector#select` and returns a reference
|
85
|
-
# to whatever "woke" it up. On the very first loop, the only thing in `sockets` is `@ready`.
|
86
|
-
# When `@trigger` is written-to, the loop "wakes" and the `ready`
|
87
|
-
# variable returns an array of arrays that looks like `[[#<IO:fd 10>], [], []]` where the
|
88
|
-
# first IO object is the `@ready` object. This first array `[#<IO:fd 10>]`
|
89
|
-
# is saved as a `reads` variable.
|
90
|
-
#
|
91
|
-
# The `reads` variable is iterated through. In the case that the object
|
92
|
-
# is the same as the `@ready` input pipe, then we know that there was a `trigger` event.
|
93
|
-
#
|
94
|
-
# If there was a trigger event, then one byte of `@ready` is read into memory. In the case of the first request,
|
95
|
-
# the reactor sees that it's a `"*"` value and the reactor adds the contents of `@input` into the `sockets` array.
|
96
|
-
# The while then loop continues to iterate again, but now the `sockets` array contains a `Puma::Client` instance in addition
|
97
|
-
# to the `@ready` IO object. For example: `[#<IO:fd 10>, #<Puma::Client:0x3fdc1103bee8 @ready=false>]`.
|
98
|
-
#
|
99
|
-
# Since the `Puma::Client` in this example has data that has not been read yet,
|
100
|
-
# the `NIO::Selector#select` is immediately able to "wake" and read from the `Puma::Client`. At this point the
|
101
|
-
# `ready` output looks like this: `[[#<Puma::Client:0x3fdc1103bee8 @ready=false>], [], []]`.
|
102
|
-
#
|
103
|
-
# Each element in the first entry is iterated over. The `Puma::Client` object is not
|
104
|
-
# the `@ready` pipe, so the reactor checks to see if it has the full header and body with
|
105
|
-
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
106
|
-
# then the request is passed off to the `@app_pool` thread pool so that a "worker thread"
|
107
|
-
# can pick up the request and begin to execute application logic. This is done
|
108
|
-
# via `@app_pool << c`. The `Puma::Client` is then removed from the `sockets` array.
|
109
|
-
#
|
110
|
-
# If the request body is not present then nothing will happen, and the loop will iterate
|
111
|
-
# again. When the client sends more data to the socket the `Puma::Client` object will
|
112
|
-
# wake up the `NIO::Selector#select` and it can again be checked to see if it's ready to be
|
113
|
-
# passed to the thread pool.
|
114
|
-
#
|
115
|
-
# ## Time Out Case
|
116
|
-
#
|
117
|
-
# In addition to being woken via a write to one of the sockets the `NIO::Selector#select` will
|
118
|
-
# periodically "time out" of the sleep. One of the functions of this is to check for
|
119
|
-
# any requests that have "timed out". At the end of the loop it's checked to see if
|
120
|
-
# the first element in the `@timeout` array has exceed its allowed time. If so,
|
121
|
-
# the client object is removed from the timeout array, a 408 response is written.
|
122
|
-
# Then its connection is closed, and the object is removed from the `sockets` array
|
123
|
-
# that watches for new data.
|
124
|
-
#
|
125
|
-
# This behavior loops until all the objects that have timed out have been removed.
|
126
|
-
#
|
127
|
-
# Once all the timeouts have been processed, the next duration of the `NIO::Selector#select` sleep
|
128
|
-
# will be set to be equal to the amount of time it will take for the next timeout to occur.
|
129
|
-
# This calculation happens in `calculate_sleep`.
|
130
|
-
def run_internal
|
131
|
-
monitors = @monitors
|
132
|
-
selector = @selector
|
133
|
-
|
134
|
-
while true
|
135
|
-
begin
|
136
|
-
ready = selector.select @sleep_for
|
137
|
-
rescue IOError => e
|
138
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
139
|
-
if monitors.any? { |mon| mon.value.closed? }
|
140
|
-
STDERR.puts "Error in select: #{e.message} (#{e.class})"
|
141
|
-
STDERR.puts e.backtrace
|
142
|
-
|
143
|
-
monitors.reject! do |mon|
|
144
|
-
if mon.value.closed?
|
145
|
-
selector.deregister mon.value
|
146
|
-
true
|
147
|
-
end
|
148
|
-
end
|
149
|
-
|
150
|
-
retry
|
151
|
-
else
|
152
|
-
raise
|
153
|
-
end
|
154
|
-
end
|
155
|
-
|
156
|
-
if ready
|
157
|
-
ready.each do |mon|
|
158
|
-
if mon.value == @ready
|
159
|
-
@mutex.synchronize do
|
160
|
-
case @ready.read(1)
|
161
|
-
when "*"
|
162
|
-
@input.each do |c|
|
163
|
-
mon = nil
|
164
|
-
begin
|
165
|
-
begin
|
166
|
-
mon = selector.register(c, :r)
|
167
|
-
rescue ArgumentError
|
168
|
-
# There is a bug where we seem to be registering an already registered
|
169
|
-
# client. This code deals with this situation but I wish we didn't have to.
|
170
|
-
monitors.delete_if { |submon| submon.value.to_io == c.to_io }
|
171
|
-
selector.deregister(c)
|
172
|
-
mon = selector.register(c, :r)
|
173
|
-
end
|
174
|
-
rescue IOError
|
175
|
-
# Means that the io is closed, so we should ignore this request
|
176
|
-
# entirely
|
177
|
-
else
|
178
|
-
mon.value = c
|
179
|
-
@timeouts << mon if c.timeout_at
|
180
|
-
monitors << mon
|
181
|
-
end
|
182
|
-
end
|
183
|
-
@input.clear
|
184
|
-
|
185
|
-
@timeouts.sort! { |a,b| a.value.timeout_at <=> b.value.timeout_at }
|
186
|
-
calculate_sleep
|
187
|
-
when "c"
|
188
|
-
monitors.reject! do |submon|
|
189
|
-
if submon.value == @ready
|
190
|
-
false
|
191
|
-
else
|
192
|
-
if submon.value.can_close?
|
193
|
-
submon.value.close
|
194
|
-
else
|
195
|
-
# Pass remaining open client connections to the thread pool.
|
196
|
-
@app_pool << submon.value
|
197
|
-
end
|
198
|
-
begin
|
199
|
-
selector.deregister submon.value
|
200
|
-
rescue IOError
|
201
|
-
# nio4r on jruby seems to throw an IOError here if the IO is closed, so
|
202
|
-
# we need to swallow it.
|
203
|
-
end
|
204
|
-
true
|
205
|
-
end
|
206
|
-
end
|
207
|
-
when "!"
|
208
|
-
return
|
209
|
-
end
|
210
|
-
end
|
211
|
-
else
|
212
|
-
c = mon.value
|
213
|
-
|
214
|
-
# We have to be sure to remove it from the timeout
|
215
|
-
# list or we'll accidentally close the socket when
|
216
|
-
# it's in use!
|
217
|
-
if c.timeout_at
|
218
|
-
@mutex.synchronize do
|
219
|
-
@timeouts.delete mon
|
220
|
-
end
|
221
|
-
end
|
222
|
-
|
223
|
-
begin
|
224
|
-
if c.try_to_finish
|
225
|
-
@app_pool << c
|
226
|
-
clear_monitor mon
|
227
|
-
end
|
228
|
-
|
229
|
-
# Don't report these to the lowlevel_error handler, otherwise
|
230
|
-
# will be flooding them with errors when persistent connections
|
231
|
-
# are closed.
|
232
|
-
rescue ConnectionError
|
233
|
-
c.write_error(500)
|
234
|
-
c.close
|
235
|
-
|
236
|
-
clear_monitor mon
|
237
|
-
|
238
|
-
# SSL handshake failure
|
239
|
-
rescue MiniSSL::SSLError => e
|
240
|
-
@server.lowlevel_error e, c.env
|
241
|
-
@events.ssl_error e, c.io
|
242
|
-
|
243
|
-
c.close
|
244
|
-
clear_monitor mon
|
245
|
-
|
246
|
-
# The client doesn't know HTTP well
|
247
|
-
rescue HttpParserError => e
|
248
|
-
@server.lowlevel_error(e, c.env)
|
249
|
-
|
250
|
-
c.write_error(400)
|
251
|
-
c.close
|
252
|
-
|
253
|
-
clear_monitor mon
|
254
|
-
|
255
|
-
@events.parse_error e, c
|
256
|
-
rescue StandardError => e
|
257
|
-
@server.lowlevel_error(e, c.env)
|
258
|
-
|
259
|
-
c.write_error(500)
|
260
|
-
c.close
|
261
|
-
|
262
|
-
clear_monitor mon
|
263
|
-
end
|
264
|
-
end
|
265
|
-
end
|
266
|
-
end
|
267
|
-
|
268
|
-
unless @timeouts.empty?
|
269
|
-
@mutex.synchronize do
|
270
|
-
now = Time.now
|
271
|
-
|
272
|
-
while @timeouts.first.value.timeout_at < now
|
273
|
-
mon = @timeouts.shift
|
274
|
-
c = mon.value
|
275
|
-
c.write_error(408) if c.in_data_phase
|
276
|
-
c.close
|
277
|
-
|
278
|
-
clear_monitor mon
|
279
|
-
|
280
|
-
break if @timeouts.empty?
|
281
|
-
end
|
282
|
-
|
283
|
-
calculate_sleep
|
284
|
-
end
|
29
|
+
# Run the internal select loop, using a background thread by default.
|
30
|
+
def run(background=true)
|
31
|
+
if background
|
32
|
+
@thread = Thread.new do
|
33
|
+
Puma.set_thread_name "reactor"
|
34
|
+
select_loop
|
285
35
|
end
|
36
|
+
else
|
37
|
+
select_loop
|
286
38
|
end
|
287
39
|
end
|
288
40
|
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
ensure
|
299
|
-
@trigger.close
|
300
|
-
@ready.close
|
41
|
+
# Add a new IO object to monitor.
|
42
|
+
# The object must respond to #timeout and #timeout_at.
|
43
|
+
# Returns false if the reactor is already shut down.
|
44
|
+
def add(io)
|
45
|
+
@input << io
|
46
|
+
@selector.wakeup
|
47
|
+
true
|
48
|
+
rescue ClosedQueueError
|
49
|
+
false
|
301
50
|
end
|
302
51
|
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
310
|
-
STDERR.puts e.backtrace
|
311
|
-
retry
|
312
|
-
ensure
|
313
|
-
@trigger.close
|
314
|
-
@ready.close
|
315
|
-
end
|
52
|
+
# Shutdown the reactor, blocking until the background thread is finished.
|
53
|
+
def shutdown
|
54
|
+
@input.close
|
55
|
+
begin
|
56
|
+
@selector.wakeup
|
57
|
+
rescue IOError # Ignore if selector is already closed
|
316
58
|
end
|
59
|
+
@thread.join if @thread
|
317
60
|
end
|
318
61
|
|
319
|
-
|
320
|
-
# sleep for in the main reactor loop when no sockets are being written to.
|
321
|
-
#
|
322
|
-
# The values kept in `@timeouts` are sorted so that the first timeout
|
323
|
-
# comes first in the array. When there are no timeouts the default timeout is used.
|
324
|
-
#
|
325
|
-
# Otherwise a sleep value is set that is the same as the amount of time it
|
326
|
-
# would take for the first element to time out.
|
327
|
-
#
|
328
|
-
# If that value is in the past, then a sleep value of zero is used.
|
329
|
-
def calculate_sleep
|
330
|
-
if @timeouts.empty?
|
331
|
-
@sleep_for = DefaultSleepFor
|
332
|
-
else
|
333
|
-
diff = @timeouts.first.value.timeout_at.to_f - Time.now.to_f
|
62
|
+
private
|
334
63
|
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
64
|
+
def select_loop
|
65
|
+
begin
|
66
|
+
until @input.closed? && @input.empty?
|
67
|
+
# Wakeup any registered object that receives incoming data.
|
68
|
+
# Block until the earliest timeout or Selector#wakeup is called.
|
69
|
+
timeout = (earliest = @timeouts.first) && earliest.timeout
|
70
|
+
@selector.select(timeout) {|mon| wakeup!(mon.value)}
|
71
|
+
|
72
|
+
# Wakeup all objects that timed out.
|
73
|
+
timed_out = @timeouts.take_while {|t| t.timeout == 0}
|
74
|
+
timed_out.each(&method(:wakeup!))
|
75
|
+
|
76
|
+
unless @input.empty?
|
77
|
+
until @input.empty?
|
78
|
+
client = @input.pop
|
79
|
+
register(client) if client.io_ok?
|
80
|
+
end
|
81
|
+
@timeouts.sort_by!(&:timeout_at)
|
82
|
+
end
|
339
83
|
end
|
84
|
+
rescue StandardError => e
|
85
|
+
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
86
|
+
STDERR.puts e.backtrace
|
87
|
+
retry
|
340
88
|
end
|
89
|
+
# Wakeup all remaining objects on shutdown.
|
90
|
+
@timeouts.each(&@block)
|
91
|
+
@selector.close
|
341
92
|
end
|
342
93
|
|
343
|
-
#
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
# object.
|
348
|
-
#
|
349
|
-
# The main body of the reactor loop is in `run_internal` and it
|
350
|
-
# will sleep on `NIO::Selector#select`. When a new connection is added to the
|
351
|
-
# reactor it cannot be added directly to the `sockets` array, because
|
352
|
-
# the `NIO::Selector#select` will not be watching for it yet.
|
353
|
-
#
|
354
|
-
# Instead what needs to happen is that `NIO::Selector#select` needs to be woken up,
|
355
|
-
# the contents of `@input` added to the `sockets` array, and then
|
356
|
-
# another call to `NIO::Selector#select` needs to happen. Since the `Puma::Client`
|
357
|
-
# object can be read immediately, it does not block, but instead returns
|
358
|
-
# right away.
|
359
|
-
#
|
360
|
-
# This behavior is accomplished by writing to `@trigger` which wakes up
|
361
|
-
# the `NIO::Selector#select` and then there is logic to detect the value of `*`,
|
362
|
-
# pull the contents from `@input` and add them to the sockets array.
|
363
|
-
#
|
364
|
-
# If the object passed in has a timeout value in `timeout_at` then
|
365
|
-
# it is added to a `@timeouts` array. This array is then re-arranged
|
366
|
-
# so that the first element to timeout will be at the front of the
|
367
|
-
# array. Then a value to sleep for is derived in the call to `calculate_sleep`
|
368
|
-
def add(c)
|
369
|
-
@mutex.synchronize do
|
370
|
-
@input << c
|
371
|
-
@trigger << "*"
|
372
|
-
end
|
373
|
-
end
|
374
|
-
|
375
|
-
# Close all watched sockets and clear them from being watched
|
376
|
-
def clear!
|
377
|
-
begin
|
378
|
-
@trigger << "c"
|
379
|
-
rescue IOError
|
380
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
381
|
-
end
|
94
|
+
# Start monitoring the object.
|
95
|
+
def register(io)
|
96
|
+
@selector.register(io, :r).value = io
|
97
|
+
@timeouts << io
|
382
98
|
end
|
383
99
|
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
100
|
+
# 'Wake up' a monitored object by calling the provided block.
|
101
|
+
# Stop monitoring the object if the block returns `true`.
|
102
|
+
def wakeup!(io)
|
103
|
+
if @block.call(io)
|
104
|
+
@selector.deregister(io)
|
105
|
+
@timeouts.delete(io)
|
389
106
|
end
|
390
|
-
|
391
|
-
@thread.join
|
392
107
|
end
|
393
108
|
end
|
394
109
|
end
|