puma 3.11.1 → 6.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/History.md +2092 -422
- data/LICENSE +23 -20
- data/README.md +301 -69
- data/bin/puma-wild +3 -9
- data/docs/architecture.md +59 -21
- data/docs/compile_options.md +55 -0
- data/docs/deployment.md +69 -58
- data/docs/fork_worker.md +41 -0
- data/docs/java_options.md +54 -0
- data/docs/jungle/README.md +9 -0
- data/docs/jungle/rc.d/README.md +74 -0
- data/docs/jungle/rc.d/puma +61 -0
- data/docs/jungle/rc.d/puma.conf +10 -0
- data/docs/kubernetes.md +78 -0
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +26 -12
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +48 -22
- data/docs/signals.md +13 -11
- data/docs/stats.md +147 -0
- data/docs/systemd.md +108 -117
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +68 -3
- data/ext/puma_http11/http11_parser.c +106 -118
- data/ext/puma_http11/http11_parser.h +2 -2
- data/ext/puma_http11/http11_parser.java.rl +22 -38
- data/ext/puma_http11/http11_parser.rl +6 -4
- data/ext/puma_http11/http11_parser_common.rl +6 -6
- data/ext/puma_http11/mini_ssl.c +474 -94
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +136 -121
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +84 -99
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +251 -88
- data/ext/puma_http11/puma_http11.c +53 -58
- data/lib/puma/app/status.rb +71 -49
- data/lib/puma/binder.rb +257 -151
- data/lib/puma/cli.rb +61 -38
- data/lib/puma/client.rb +464 -224
- data/lib/puma/cluster/worker.rb +183 -0
- data/lib/puma/cluster/worker_handle.rb +96 -0
- data/lib/puma/cluster.rb +343 -239
- data/lib/puma/commonlogger.rb +23 -14
- data/lib/puma/configuration.rb +144 -96
- data/lib/puma/const.rb +194 -115
- data/lib/puma/control_cli.rb +135 -81
- data/lib/puma/detect.rb +34 -2
- data/lib/puma/dsl.rb +1092 -153
- data/lib/puma/error_logger.rb +113 -0
- data/lib/puma/events.rb +17 -111
- data/lib/puma/io_buffer.rb +44 -5
- data/lib/puma/jruby_restart.rb +2 -73
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +205 -138
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +96 -0
- data/lib/puma/minissl.rb +279 -70
- data/lib/puma/null_io.rb +61 -2
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +3 -1
- data/lib/puma/plugin.rb +9 -13
- data/lib/puma/rack/builder.rb +10 -11
- data/lib/puma/rack/urlmap.rb +3 -1
- data/lib/puma/rack_default.rb +21 -4
- data/lib/puma/reactor.rb +97 -185
- data/lib/puma/request.rb +688 -0
- data/lib/puma/runner.rb +114 -69
- data/lib/puma/sd_notify.rb +146 -0
- data/lib/puma/server.rb +409 -704
- data/lib/puma/single.rb +29 -72
- data/lib/puma/state_file.rb +48 -9
- data/lib/puma/thread_pool.rb +234 -93
- data/lib/puma/util.rb +23 -10
- data/lib/puma.rb +68 -5
- data/lib/rack/handler/puma.rb +119 -86
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +0 -1
- metadata +55 -33
- data/ext/puma_http11/io_buffer.c +0 -155
- data/lib/puma/accept_nonblock.rb +0 -23
- data/lib/puma/compat.rb +0 -14
- data/lib/puma/convenient.rb +0 -23
- data/lib/puma/daemon_ext.rb +0 -31
- data/lib/puma/delegation.rb +0 -11
- data/lib/puma/java_io_buffer.rb +0 -45
- data/lib/puma/rack/backports/uri/common_193.rb +0 -33
- data/lib/puma/tcp_logger.rb +0 -39
- data/tools/jungle/README.md +0 -13
- data/tools/jungle/init.d/README.md +0 -59
- data/tools/jungle/init.d/puma +0 -421
- data/tools/jungle/init.d/run-puma +0 -18
- data/tools/jungle/upstart/README.md +0 -61
- data/tools/jungle/upstart/puma-manager.conf +0 -31
- data/tools/jungle/upstart/puma.conf +0 -69
@@ -0,0 +1,90 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative '../plugin'
|
4
|
+
|
5
|
+
# Puma's systemd integration allows Puma to inform systemd:
|
6
|
+
# 1. when it has successfully started
|
7
|
+
# 2. when it is starting shutdown
|
8
|
+
# 3. periodically for a liveness check with a watchdog thread
|
9
|
+
# 4. periodically set the status
|
10
|
+
Puma::Plugin.create do
|
11
|
+
def start(launcher)
|
12
|
+
require_relative '../sd_notify'
|
13
|
+
|
14
|
+
launcher.log_writer.log "* Enabling systemd notification integration"
|
15
|
+
|
16
|
+
# hook_events
|
17
|
+
launcher.events.on_booted { Puma::SdNotify.ready }
|
18
|
+
launcher.events.on_stopped { Puma::SdNotify.stopping }
|
19
|
+
launcher.events.on_restart { Puma::SdNotify.reloading }
|
20
|
+
|
21
|
+
# start watchdog
|
22
|
+
if Puma::SdNotify.watchdog?
|
23
|
+
ping_f = watchdog_sleep_time
|
24
|
+
|
25
|
+
in_background do
|
26
|
+
launcher.log_writer.log "Pinging systemd watchdog every #{ping_f.round(1)} sec"
|
27
|
+
loop do
|
28
|
+
sleep ping_f
|
29
|
+
Puma::SdNotify.watchdog
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
# start status loop
|
35
|
+
instance = self
|
36
|
+
sleep_time = 1.0
|
37
|
+
in_background do
|
38
|
+
launcher.log_writer.log "Sending status to systemd every #{sleep_time.round(1)} sec"
|
39
|
+
|
40
|
+
loop do
|
41
|
+
sleep sleep_time
|
42
|
+
# TODO: error handling?
|
43
|
+
Puma::SdNotify.status(instance.status)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def status
|
49
|
+
if clustered?
|
50
|
+
messages = stats[:worker_status].map do |worker|
|
51
|
+
common_message(worker[:last_status])
|
52
|
+
end.join(',')
|
53
|
+
|
54
|
+
"Puma #{Puma::Const::VERSION}: cluster: #{booted_workers}/#{workers}, worker_status: [#{messages}]"
|
55
|
+
else
|
56
|
+
"Puma #{Puma::Const::VERSION}: worker: #{common_message(stats)}"
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
private
|
61
|
+
|
62
|
+
def watchdog_sleep_time
|
63
|
+
usec = Integer(ENV["WATCHDOG_USEC"])
|
64
|
+
|
65
|
+
sec_f = usec / 1_000_000.0
|
66
|
+
# "It is recommended that a daemon sends a keep-alive notification message
|
67
|
+
# to the service manager every half of the time returned here."
|
68
|
+
sec_f / 2
|
69
|
+
end
|
70
|
+
|
71
|
+
def stats
|
72
|
+
Puma.stats_hash
|
73
|
+
end
|
74
|
+
|
75
|
+
def clustered?
|
76
|
+
stats.has_key?(:workers)
|
77
|
+
end
|
78
|
+
|
79
|
+
def workers
|
80
|
+
stats.fetch(:workers, 1)
|
81
|
+
end
|
82
|
+
|
83
|
+
def booted_workers
|
84
|
+
stats.fetch(:booted_workers, 1)
|
85
|
+
end
|
86
|
+
|
87
|
+
def common_message(stats)
|
88
|
+
"{ #{stats[:running]}/#{stats[:max_threads]} threads, #{stats[:pool_capacity]} available, #{stats[:backlog]} backlog }"
|
89
|
+
end
|
90
|
+
end
|
data/lib/puma/plugin.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Puma
|
2
4
|
class UnknownPlugin < RuntimeError; end
|
3
5
|
|
@@ -8,7 +10,7 @@ module Puma
|
|
8
10
|
|
9
11
|
def create(name)
|
10
12
|
if cls = Plugins.find(name)
|
11
|
-
plugin = cls.new
|
13
|
+
plugin = cls.new
|
12
14
|
@instances << plugin
|
13
15
|
return plugin
|
14
16
|
end
|
@@ -60,8 +62,11 @@ module Puma
|
|
60
62
|
end
|
61
63
|
|
62
64
|
def fire_background
|
63
|
-
@background.
|
64
|
-
Thread.new
|
65
|
+
@background.each_with_index do |b, i|
|
66
|
+
Thread.new do
|
67
|
+
Puma.set_thread_name "plgn bg #{i}"
|
68
|
+
b.call
|
69
|
+
end
|
65
70
|
end
|
66
71
|
end
|
67
72
|
end
|
@@ -86,7 +91,7 @@ module Puma
|
|
86
91
|
path = ary.first[CALLER_FILE]
|
87
92
|
|
88
93
|
m = %r!puma/plugin/([^/]*)\.rb$!.match(path)
|
89
|
-
|
94
|
+
m[1]
|
90
95
|
end
|
91
96
|
|
92
97
|
def self.create(&blk)
|
@@ -99,17 +104,8 @@ module Puma
|
|
99
104
|
Plugins.register name, cls
|
100
105
|
end
|
101
106
|
|
102
|
-
def initialize(loader)
|
103
|
-
@loader = loader
|
104
|
-
end
|
105
|
-
|
106
107
|
def in_background(&blk)
|
107
108
|
Plugins.add_background blk
|
108
109
|
end
|
109
|
-
|
110
|
-
def workers_supported?
|
111
|
-
return false if Puma.jruby? || Puma.windows?
|
112
|
-
true
|
113
|
-
end
|
114
110
|
end
|
115
111
|
end
|
data/lib/puma/rack/builder.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Puma
|
2
4
|
end
|
3
5
|
|
@@ -65,10 +67,6 @@ module Puma::Rack
|
|
65
67
|
options[:environment] = e
|
66
68
|
}
|
67
69
|
|
68
|
-
opts.on("-D", "--daemonize", "run daemonized in the background") { |d|
|
69
|
-
options[:daemonize] = d ? true : false
|
70
|
-
}
|
71
|
-
|
72
70
|
opts.on("-P", "--pid FILE", "file to store PID") { |f|
|
73
71
|
options[:pid] = ::File.expand_path(f)
|
74
72
|
}
|
@@ -104,13 +102,14 @@ module Puma::Rack
|
|
104
102
|
begin
|
105
103
|
info = []
|
106
104
|
server = Rack::Handler.get(options[:server]) || Rack::Handler.default(options)
|
107
|
-
if server
|
105
|
+
if server&.respond_to?(:valid_options)
|
108
106
|
info << ""
|
109
107
|
info << "Server-specific options for #{server.name}:"
|
110
108
|
|
111
109
|
has_options = false
|
112
110
|
server.valid_options.each do |name, description|
|
113
|
-
next if
|
111
|
+
next if /^(Host|Port)[^a-zA-Z]/.match? name.to_s # ignore handler's host and port options, we do our own.
|
112
|
+
|
114
113
|
info << " -O %-21s %s" % [name, description]
|
115
114
|
has_options = true
|
116
115
|
end
|
@@ -166,7 +165,7 @@ module Puma::Rack
|
|
166
165
|
require config
|
167
166
|
app = Object.const_get(::File.basename(config, '.rb').capitalize)
|
168
167
|
end
|
169
|
-
|
168
|
+
[app, options]
|
170
169
|
end
|
171
170
|
|
172
171
|
def self.new_from_string(builder_script, file="(rackup)")
|
@@ -174,7 +173,7 @@ module Puma::Rack
|
|
174
173
|
TOPLEVEL_BINDING, file, 0
|
175
174
|
end
|
176
175
|
|
177
|
-
def initialize(default_app = nil
|
176
|
+
def initialize(default_app = nil, &block)
|
178
177
|
@use, @map, @run, @warmup = [], nil, default_app, nil
|
179
178
|
|
180
179
|
# Conditionally load rack now, so that any rack middlewares,
|
@@ -184,7 +183,7 @@ module Puma::Rack
|
|
184
183
|
rescue LoadError
|
185
184
|
end
|
186
185
|
|
187
|
-
instance_eval(&block) if
|
186
|
+
instance_eval(&block) if block
|
188
187
|
end
|
189
188
|
|
190
189
|
def self.app(default_app = nil, &block)
|
@@ -277,7 +276,7 @@ module Puma::Rack
|
|
277
276
|
app = @map ? generate_map(@run, @map) : @run
|
278
277
|
fail "missing run or map statement" unless app
|
279
278
|
app = @use.reverse.inject(app) { |a,e| e[a] }
|
280
|
-
@warmup
|
279
|
+
@warmup&.call app
|
281
280
|
app
|
282
281
|
end
|
283
282
|
|
@@ -288,7 +287,7 @@ module Puma::Rack
|
|
288
287
|
private
|
289
288
|
|
290
289
|
def generate_map(default_app, mapping)
|
291
|
-
|
290
|
+
require_relative 'urlmap'
|
292
291
|
|
293
292
|
mapped = default_app ? {'/' => default_app} : {}
|
294
293
|
mapping.each { |r,b| mapped[r] = self.class.new(default_app, &b).to_app }
|
data/lib/puma/rack/urlmap.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Puma::Rack
|
2
4
|
# Rack::URLMap takes a hash mapping urls or paths to apps, and
|
3
5
|
# dispatches accordingly. Support for HTTP/1.1 host names exists if
|
@@ -32,7 +34,7 @@ module Puma::Rack
|
|
32
34
|
end
|
33
35
|
|
34
36
|
location = location.chomp('/')
|
35
|
-
match = Regexp.new("^#{Regexp.quote(location).gsub('/', '/+')}(.*)",
|
37
|
+
match = Regexp.new("^#{Regexp.quote(location).gsub('/', '/+')}(.*)", Regexp::NOENCODING)
|
36
38
|
|
37
39
|
[host, location, match, app]
|
38
40
|
}.sort_by do |(host, location, _, _)|
|
data/lib/puma/rack_default.rb
CHANGED
@@ -1,7 +1,24 @@
|
|
1
|
-
|
1
|
+
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
3
|
+
require_relative '../rack/handler/puma'
|
4
|
+
|
5
|
+
# rackup was removed in Rack 3, it is now a separate gem
|
6
|
+
if Object.const_defined? :Rackup
|
7
|
+
module Rackup
|
8
|
+
module Handler
|
9
|
+
def self.default(options = {})
|
10
|
+
::Rackup::Handler::Puma
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
14
|
+
elsif Object.const_defined?(:Rack) && Rack.release < '3'
|
15
|
+
module Rack
|
16
|
+
module Handler
|
17
|
+
def self.default(options = {})
|
18
|
+
::Rack::Handler::Puma
|
19
|
+
end
|
20
|
+
end
|
6
21
|
end
|
22
|
+
else
|
23
|
+
raise "Rack 3 must be used with the Rackup gem"
|
7
24
|
end
|
data/lib/puma/reactor.rb
CHANGED
@@ -1,213 +1,125 @@
|
|
1
|
-
|
2
|
-
require 'puma/minissl'
|
1
|
+
# frozen_string_literal: true
|
3
2
|
|
4
3
|
module Puma
|
4
|
+
class UnsupportedBackend < StandardError; end
|
5
|
+
|
6
|
+
# Monitors a collection of IO objects, calling a block whenever
|
7
|
+
# any monitored object either receives data or times out, or when the Reactor shuts down.
|
8
|
+
#
|
9
|
+
# The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev,
|
10
|
+
# Java NIO or just plain IO#select). The call to `NIO::Selector#select` will
|
11
|
+
# 'wakeup' any IO object that receives data.
|
12
|
+
#
|
13
|
+
# This class additionally tracks a timeout for every added object,
|
14
|
+
# and wakes up any object when its timeout elapses.
|
15
|
+
#
|
16
|
+
# The implementation uses a Queue to synchronize adding new objects from the internal select loop.
|
5
17
|
class Reactor
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
18
|
+
# Create a new Reactor to monitor IO objects added by #add.
|
19
|
+
# The provided block will be invoked when an IO has data available to read,
|
20
|
+
# its timeout elapses, or when the Reactor shuts down.
|
21
|
+
def initialize(backend, &block)
|
22
|
+
require 'nio'
|
23
|
+
valid_backends = [:auto, *::NIO::Selector.backends]
|
24
|
+
unless valid_backends.include?(backend)
|
25
|
+
raise ArgumentError.new("unsupported IO selector backend: #{backend} (available backends: #{valid_backends.join(', ')})")
|
26
|
+
end
|
12
27
|
|
13
|
-
@
|
14
|
-
@
|
15
|
-
@input = []
|
16
|
-
@sleep_for = DefaultSleepFor
|
28
|
+
@selector = ::NIO::Selector.new(NIO::Selector.backends.delete(backend))
|
29
|
+
@input = Queue.new
|
17
30
|
@timeouts = []
|
18
|
-
|
19
|
-
@sockets = [@ready]
|
31
|
+
@block = block
|
20
32
|
end
|
21
33
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
begin
|
29
|
-
ready = IO.select sockets, nil, nil, @sleep_for
|
30
|
-
rescue IOError => e
|
31
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
32
|
-
if sockets.any? { |socket| socket.closed? }
|
33
|
-
STDERR.puts "Error in select: #{e.message} (#{e.class})"
|
34
|
-
STDERR.puts e.backtrace
|
35
|
-
sockets = sockets.reject { |socket| socket.closed? }
|
36
|
-
retry
|
37
|
-
else
|
38
|
-
raise
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
|
-
if ready and reads = ready[0]
|
43
|
-
reads.each do |c|
|
44
|
-
if c == @ready
|
45
|
-
@mutex.synchronize do
|
46
|
-
case @ready.read(1)
|
47
|
-
when "*"
|
48
|
-
sockets += @input
|
49
|
-
@input.clear
|
50
|
-
when "c"
|
51
|
-
sockets.delete_if do |s|
|
52
|
-
if s == @ready
|
53
|
-
false
|
54
|
-
else
|
55
|
-
s.close
|
56
|
-
true
|
57
|
-
end
|
58
|
-
end
|
59
|
-
when "!"
|
60
|
-
return
|
61
|
-
end
|
62
|
-
end
|
63
|
-
else
|
64
|
-
# We have to be sure to remove it from the timeout
|
65
|
-
# list or we'll accidentally close the socket when
|
66
|
-
# it's in use!
|
67
|
-
if c.timeout_at
|
68
|
-
@mutex.synchronize do
|
69
|
-
@timeouts.delete c
|
70
|
-
end
|
71
|
-
end
|
72
|
-
|
73
|
-
begin
|
74
|
-
if c.try_to_finish
|
75
|
-
@app_pool << c
|
76
|
-
sockets.delete c
|
77
|
-
end
|
78
|
-
|
79
|
-
# Don't report these to the lowlevel_error handler, otherwise
|
80
|
-
# will be flooding them with errors when persistent connections
|
81
|
-
# are closed.
|
82
|
-
rescue ConnectionError
|
83
|
-
c.write_500
|
84
|
-
c.close
|
85
|
-
|
86
|
-
sockets.delete c
|
87
|
-
|
88
|
-
# SSL handshake failure
|
89
|
-
rescue MiniSSL::SSLError => e
|
90
|
-
@server.lowlevel_error(e, c.env)
|
91
|
-
|
92
|
-
ssl_socket = c.io
|
93
|
-
addr = ssl_socket.peeraddr.last
|
94
|
-
cert = ssl_socket.peercert
|
95
|
-
|
96
|
-
c.close
|
97
|
-
sockets.delete c
|
98
|
-
|
99
|
-
@events.ssl_error @server, addr, cert, e
|
100
|
-
|
101
|
-
# The client doesn't know HTTP well
|
102
|
-
rescue HttpParserError => e
|
103
|
-
@server.lowlevel_error(e, c.env)
|
104
|
-
|
105
|
-
c.write_400
|
106
|
-
c.close
|
107
|
-
|
108
|
-
sockets.delete c
|
109
|
-
|
110
|
-
@events.parse_error @server, c.env, e
|
111
|
-
rescue StandardError => e
|
112
|
-
@server.lowlevel_error(e, c.env)
|
113
|
-
|
114
|
-
c.write_500
|
115
|
-
c.close
|
116
|
-
|
117
|
-
sockets.delete c
|
118
|
-
end
|
119
|
-
end
|
120
|
-
end
|
121
|
-
end
|
122
|
-
|
123
|
-
unless @timeouts.empty?
|
124
|
-
@mutex.synchronize do
|
125
|
-
now = Time.now
|
126
|
-
|
127
|
-
while @timeouts.first.timeout_at < now
|
128
|
-
c = @timeouts.shift
|
129
|
-
c.write_408 if c.in_data_phase
|
130
|
-
c.close
|
131
|
-
sockets.delete c
|
132
|
-
|
133
|
-
break if @timeouts.empty?
|
134
|
-
end
|
135
|
-
|
136
|
-
calculate_sleep
|
137
|
-
end
|
34
|
+
# Run the internal select loop, using a background thread by default.
|
35
|
+
def run(background=true)
|
36
|
+
if background
|
37
|
+
@thread = Thread.new do
|
38
|
+
Puma.set_thread_name "reactor"
|
39
|
+
select_loop
|
138
40
|
end
|
41
|
+
else
|
42
|
+
select_loop
|
139
43
|
end
|
140
44
|
end
|
141
45
|
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
@
|
148
|
-
|
46
|
+
# Add a new client to monitor.
|
47
|
+
# The object must respond to #timeout and #timeout_at.
|
48
|
+
# Returns false if the reactor is already shut down.
|
49
|
+
def add(client)
|
50
|
+
@input << client
|
51
|
+
@selector.wakeup
|
52
|
+
true
|
53
|
+
rescue ClosedQueueError, IOError # Ignore if selector is already closed
|
54
|
+
false
|
149
55
|
end
|
150
56
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
STDERR.puts e.backtrace
|
158
|
-
retry
|
159
|
-
ensure
|
160
|
-
@trigger.close
|
161
|
-
@ready.close
|
162
|
-
end
|
57
|
+
# Shutdown the reactor, blocking until the background thread is finished.
|
58
|
+
def shutdown
|
59
|
+
@input.close
|
60
|
+
begin
|
61
|
+
@selector.wakeup
|
62
|
+
rescue IOError # Ignore if selector is already closed
|
163
63
|
end
|
64
|
+
@thread&.join
|
164
65
|
end
|
165
66
|
|
166
|
-
|
167
|
-
if @timeouts.empty?
|
168
|
-
@sleep_for = DefaultSleepFor
|
169
|
-
else
|
170
|
-
diff = @timeouts.first.timeout_at.to_f - Time.now.to_f
|
67
|
+
private
|
171
68
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
69
|
+
def select_loop
|
70
|
+
close_selector = true
|
71
|
+
begin
|
72
|
+
until @input.closed? && @input.empty?
|
73
|
+
# Wakeup any registered object that receives incoming data.
|
74
|
+
# Block until the earliest timeout or Selector#wakeup is called.
|
75
|
+
timeout = (earliest = @timeouts.first) && earliest.timeout
|
76
|
+
@selector.select(timeout) {|mon| wakeup!(mon.value)}
|
77
|
+
|
78
|
+
# Wakeup all objects that timed out.
|
79
|
+
timed_out = @timeouts.take_while {|t| t.timeout == 0}
|
80
|
+
timed_out.each { |c| wakeup! c }
|
81
|
+
|
82
|
+
unless @input.empty?
|
83
|
+
until @input.empty?
|
84
|
+
client = @input.pop
|
85
|
+
register(client) if client.io_ok?
|
86
|
+
end
|
87
|
+
@timeouts.sort_by!(&:timeout_at)
|
88
|
+
end
|
176
89
|
end
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
@
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
calculate_sleep
|
90
|
+
rescue StandardError => e
|
91
|
+
STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})"
|
92
|
+
STDERR.puts e.backtrace
|
93
|
+
|
94
|
+
# NoMethodError may be rarely raised when calling @selector.select, which
|
95
|
+
# is odd. Regardless, it may continue for thousands of calls if retried.
|
96
|
+
# Also, when it raises, @selector.close also raises an error.
|
97
|
+
if NoMethodError === e
|
98
|
+
close_selector = false
|
99
|
+
else
|
100
|
+
retry
|
190
101
|
end
|
191
102
|
end
|
103
|
+
# Wakeup all remaining objects on shutdown.
|
104
|
+
@timeouts.each(&@block)
|
105
|
+
@selector.close if close_selector
|
192
106
|
end
|
193
107
|
|
194
|
-
#
|
195
|
-
def
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
end
|
108
|
+
# Start monitoring the object.
|
109
|
+
def register(client)
|
110
|
+
@selector.register(client.to_io, :r).value = client
|
111
|
+
@timeouts << client
|
112
|
+
rescue ArgumentError
|
113
|
+
# unreadable clients raise error when processed by NIO
|
201
114
|
end
|
202
115
|
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
116
|
+
# 'Wake up' a monitored object by calling the provided block.
|
117
|
+
# Stop monitoring the object if the block returns `true`.
|
118
|
+
def wakeup!(client)
|
119
|
+
if @block.call client
|
120
|
+
@selector.deregister client.to_io
|
121
|
+
@timeouts.delete client
|
208
122
|
end
|
209
|
-
|
210
|
-
@thread.join
|
211
123
|
end
|
212
124
|
end
|
213
125
|
end
|