puma 3.0.0.rc1 → 5.0.0.beta1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +5 -5
- data/{History.txt → History.md} +703 -70
- data/LICENSE +23 -20
- data/README.md +173 -163
- data/docs/architecture.md +37 -0
- data/{DEPLOYMENT.md → docs/deployment.md} +28 -6
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +13 -0
- data/docs/jungle/rc.d/README.md +74 -0
- data/docs/jungle/rc.d/puma +61 -0
- data/docs/jungle/rc.d/puma.conf +10 -0
- data/{tools → docs}/jungle/upstart/README.md +0 -0
- data/{tools → docs}/jungle/upstart/puma-manager.conf +0 -0
- data/{tools → docs}/jungle/upstart/puma.conf +1 -1
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +38 -0
- data/docs/restart.md +41 -0
- data/docs/signals.md +57 -3
- data/docs/systemd.md +228 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/extconf.rb +16 -0
- data/ext/puma_http11/http11_parser.c +287 -468
- data/ext/puma_http11/http11_parser.h +1 -0
- data/ext/puma_http11/http11_parser.java.rl +21 -37
- data/ext/puma_http11/http11_parser.rl +10 -9
- data/ext/puma_http11/http11_parser_common.rl +4 -4
- data/ext/puma_http11/mini_ssl.c +159 -10
- data/ext/puma_http11/org/jruby/puma/Http11.java +108 -116
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +99 -132
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +30 -6
- data/ext/puma_http11/puma_http11.c +6 -38
- data/lib/puma.rb +25 -5
- data/lib/puma/accept_nonblock.rb +7 -1
- data/lib/puma/app/status.rb +53 -26
- data/lib/puma/binder.rb +150 -119
- data/lib/puma/cli.rb +56 -38
- data/lib/puma/client.rb +277 -80
- data/lib/puma/cluster.rb +326 -130
- data/lib/puma/commonlogger.rb +21 -20
- data/lib/puma/configuration.rb +160 -161
- data/lib/puma/const.rb +50 -47
- data/lib/puma/control_cli.rb +104 -63
- data/lib/puma/detect.rb +13 -1
- data/lib/puma/dsl.rb +463 -114
- data/lib/puma/events.rb +22 -13
- data/lib/puma/io_buffer.rb +9 -5
- data/lib/puma/jruby_restart.rb +2 -59
- data/lib/puma/launcher.rb +195 -105
- data/lib/puma/minissl.rb +110 -4
- data/lib/puma/minissl/context_builder.rb +76 -0
- data/lib/puma/null_io.rb +9 -14
- data/lib/puma/plugin.rb +32 -12
- data/lib/puma/plugin/tmp_restart.rb +19 -6
- data/lib/puma/rack/builder.rb +7 -5
- data/lib/puma/rack/urlmap.rb +11 -8
- data/lib/puma/rack_default.rb +2 -0
- data/lib/puma/reactor.rb +242 -32
- data/lib/puma/runner.rb +41 -30
- data/lib/puma/server.rb +265 -183
- data/lib/puma/single.rb +22 -63
- data/lib/puma/state_file.rb +9 -2
- data/lib/puma/thread_pool.rb +179 -68
- data/lib/puma/util.rb +3 -11
- data/lib/rack/handler/puma.rb +60 -11
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +1 -2
- metadata +35 -99
- data/COPYING +0 -55
- data/Gemfile +0 -13
- data/Manifest.txt +0 -79
- data/Rakefile +0 -158
- data/docs/config.md +0 -0
- data/ext/puma_http11/io_buffer.c +0 -155
- data/lib/puma/capistrano.rb +0 -94
- data/lib/puma/compat.rb +0 -18
- data/lib/puma/convenient.rb +0 -23
- data/lib/puma/daemon_ext.rb +0 -31
- data/lib/puma/delegation.rb +0 -11
- data/lib/puma/java_io_buffer.rb +0 -45
- data/lib/puma/rack/backports/uri/common_18.rb +0 -56
- data/lib/puma/rack/backports/uri/common_192.rb +0 -52
- data/lib/puma/rack/backports/uri/common_193.rb +0 -29
- data/lib/puma/tcp_logger.rb +0 -32
- data/puma.gemspec +0 -52
- data/tools/jungle/README.md +0 -9
- data/tools/jungle/init.d/README.md +0 -54
- data/tools/jungle/init.d/puma +0 -394
- data/tools/jungle/init.d/run-puma +0 -3
data/lib/puma/single.rb
CHANGED
@@ -1,11 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require 'puma/runner'
|
4
|
+
require 'puma/detect'
|
5
|
+
require 'puma/plugin'
|
2
6
|
|
3
7
|
module Puma
|
8
|
+
# This class is instantiated by the `Puma::Launcher` and used
|
9
|
+
# to boot and serve a Ruby application when no puma "workers" are needed
|
10
|
+
# i.e. only using "threaded" mode. For example `$ puma -t 1:5`
|
11
|
+
#
|
12
|
+
# At the core of this class is running an instance of `Puma::Server` which
|
13
|
+
# gets created via the `start_server` method from the `Puma::Runner` class
|
14
|
+
# that this inherits from.
|
4
15
|
class Single < Runner
|
5
16
|
def stats
|
6
|
-
|
7
|
-
|
8
|
-
|
17
|
+
{
|
18
|
+
started_at: @started_at.utc.iso8601
|
19
|
+
}.merge(@server.stats)
|
9
20
|
end
|
10
21
|
|
11
22
|
def restart
|
@@ -13,7 +24,7 @@ module Puma
|
|
13
24
|
end
|
14
25
|
|
15
26
|
def stop
|
16
|
-
@server.stop
|
27
|
+
@server.stop(false) if @server
|
17
28
|
end
|
18
29
|
|
19
30
|
def halt
|
@@ -23,66 +34,15 @@ module Puma
|
|
23
34
|
def stop_blocked
|
24
35
|
log "- Gracefully stopping, waiting for requests to finish"
|
25
36
|
@control.stop(true) if @control
|
26
|
-
@server.stop(true)
|
27
|
-
end
|
28
|
-
|
29
|
-
def jruby_daemon?
|
30
|
-
daemon? and Puma.jruby?
|
31
|
-
end
|
32
|
-
|
33
|
-
def jruby_daemon_start
|
34
|
-
require 'puma/jruby_restart'
|
35
|
-
JRubyRestart.daemon_start(@restart_dir, restart_args)
|
37
|
+
@server.stop(true) if @server
|
36
38
|
end
|
37
39
|
|
38
40
|
def run
|
39
|
-
already_daemon = false
|
40
|
-
|
41
|
-
if jruby_daemon?
|
42
|
-
require 'puma/jruby_restart'
|
43
|
-
|
44
|
-
if JRubyRestart.daemon?
|
45
|
-
# load and bind before redirecting IO so errors show up on stdout/stderr
|
46
|
-
load_and_bind
|
47
|
-
end
|
48
|
-
|
49
|
-
already_daemon = JRubyRestart.daemon_init
|
50
|
-
end
|
51
|
-
|
52
41
|
output_header "single"
|
53
42
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
else
|
58
|
-
pid = nil
|
59
|
-
|
60
|
-
Signal.trap "SIGUSR2" do
|
61
|
-
log "* Started new process #{pid} as daemon..."
|
62
|
-
|
63
|
-
# Must use exit! so we don't unwind and run the ensures
|
64
|
-
# that will be run by the new child (such as deleting the
|
65
|
-
# pidfile)
|
66
|
-
exit!(true)
|
67
|
-
end
|
68
|
-
|
69
|
-
Signal.trap "SIGCHLD" do
|
70
|
-
log "! Error starting new process as daemon, exiting"
|
71
|
-
exit 1
|
72
|
-
end
|
73
|
-
|
74
|
-
pid = jruby_daemon_start
|
75
|
-
sleep
|
76
|
-
end
|
77
|
-
else
|
78
|
-
if daemon?
|
79
|
-
log "* Daemonizing..."
|
80
|
-
Process.daemon(true)
|
81
|
-
redirect_io
|
82
|
-
end
|
83
|
-
|
84
|
-
load_and_bind
|
85
|
-
end
|
43
|
+
load_and_bind
|
44
|
+
|
45
|
+
Plugins.fire_background
|
86
46
|
|
87
47
|
@launcher.write_state
|
88
48
|
|
@@ -90,10 +50,9 @@ module Puma
|
|
90
50
|
|
91
51
|
@server = server = start_server
|
92
52
|
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
end
|
53
|
+
|
54
|
+
log "Use Ctrl-C to stop"
|
55
|
+
redirect_io
|
97
56
|
|
98
57
|
@launcher.events.fire_on_booted!
|
99
58
|
|
data/lib/puma/state_file.rb
CHANGED
@@ -1,11 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'yaml'
|
4
|
+
|
1
5
|
module Puma
|
2
6
|
class StateFile
|
3
7
|
def initialize
|
4
8
|
@options = {}
|
5
9
|
end
|
6
10
|
|
7
|
-
def save(path)
|
8
|
-
File.
|
11
|
+
def save(path, permission = nil)
|
12
|
+
File.open(path, "w") do |file|
|
13
|
+
file.chmod(permission) if permission
|
14
|
+
file.write(YAML.dump(@options))
|
15
|
+
end
|
9
16
|
end
|
10
17
|
|
11
18
|
def load(path)
|
data/lib/puma/thread_pool.rb
CHANGED
@@ -1,9 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require 'thread'
|
2
4
|
|
3
5
|
module Puma
|
4
|
-
# A simple thread pool management object.
|
6
|
+
# Internal Docs for A simple thread pool management object.
|
7
|
+
#
|
8
|
+
# Each Puma "worker" has a thread pool to process requests.
|
9
|
+
#
|
10
|
+
# First a connection to a client is made in `Puma::Server`. It is wrapped in a
|
11
|
+
# `Puma::Client` instance and then passed to the `Puma::Reactor` to ensure
|
12
|
+
# the whole request is buffered into memory. Once the request is ready, it is passed into
|
13
|
+
# a thread pool via the `Puma::ThreadPool#<<` operator where it is stored in a `@todo` array.
|
5
14
|
#
|
15
|
+
# Each thread in the pool has an internal loop where it pulls a request from the `@todo` array
|
16
|
+
# and proceses it.
|
6
17
|
class ThreadPool
|
18
|
+
class ForceShutdown < RuntimeError
|
19
|
+
end
|
20
|
+
|
21
|
+
# How long, after raising the ForceShutdown of a thread during
|
22
|
+
# forced shutdown mode, to wait for the thread to try and finish
|
23
|
+
# up its work before leaving the thread to die on the vine.
|
24
|
+
SHUTDOWN_GRACE_TIME = 5 # seconds
|
7
25
|
|
8
26
|
# Maintain a minimum of +min+ and maximum of +max+ threads
|
9
27
|
# in the pool.
|
@@ -29,6 +47,7 @@ module Puma
|
|
29
47
|
@shutdown = false
|
30
48
|
|
31
49
|
@trim_requested = 0
|
50
|
+
@out_of_band_pending = false
|
32
51
|
|
33
52
|
@workers = []
|
34
53
|
|
@@ -36,19 +55,37 @@ module Puma
|
|
36
55
|
@reaper = nil
|
37
56
|
|
38
57
|
@mutex.synchronize do
|
39
|
-
@min.times
|
58
|
+
@min.times do
|
59
|
+
spawn_thread
|
60
|
+
@not_full.wait(@mutex)
|
61
|
+
end
|
40
62
|
end
|
41
63
|
|
42
64
|
@clean_thread_locals = false
|
43
65
|
end
|
44
66
|
|
45
|
-
attr_reader :spawned, :trim_requested
|
67
|
+
attr_reader :spawned, :trim_requested, :waiting
|
46
68
|
attr_accessor :clean_thread_locals
|
69
|
+
attr_accessor :out_of_band_hook
|
70
|
+
|
71
|
+
def self.clean_thread_locals
|
72
|
+
Thread.current.keys.each do |key| # rubocop: disable Performance/HashEachMethods
|
73
|
+
Thread.current[key] = nil unless key == :__recursive_key__
|
74
|
+
end
|
75
|
+
end
|
47
76
|
|
48
77
|
# How many objects have yet to be processed by the pool?
|
49
78
|
#
|
50
79
|
def backlog
|
51
|
-
|
80
|
+
with_mutex { @todo.size }
|
81
|
+
end
|
82
|
+
|
83
|
+
def pool_capacity
|
84
|
+
waiting + (@max - spawned)
|
85
|
+
end
|
86
|
+
|
87
|
+
def busy_threads
|
88
|
+
with_mutex { @spawned - @waiting + @todo.size }
|
52
89
|
end
|
53
90
|
|
54
91
|
# :nodoc:
|
@@ -58,7 +95,8 @@ module Puma
|
|
58
95
|
def spawn_thread
|
59
96
|
@spawned += 1
|
60
97
|
|
61
|
-
th = Thread.new do
|
98
|
+
th = Thread.new(@spawned) do |spawned|
|
99
|
+
Puma.set_thread_name 'threadpool %03i' % spawned
|
62
100
|
todo = @todo
|
63
101
|
block = @block
|
64
102
|
mutex = @mutex
|
@@ -70,48 +108,37 @@ module Puma
|
|
70
108
|
while true
|
71
109
|
work = nil
|
72
110
|
|
73
|
-
continue = true
|
74
|
-
|
75
111
|
mutex.synchronize do
|
76
112
|
while todo.empty?
|
77
113
|
if @trim_requested > 0
|
78
114
|
@trim_requested -= 1
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
if @shutdown
|
84
|
-
continue = false
|
85
|
-
break
|
115
|
+
@spawned -= 1
|
116
|
+
@workers.delete th
|
117
|
+
Thread.exit
|
86
118
|
end
|
87
119
|
|
88
120
|
@waiting += 1
|
121
|
+
if @out_of_band_pending && trigger_out_of_band_hook
|
122
|
+
@out_of_band_pending = false
|
123
|
+
end
|
89
124
|
not_full.signal
|
90
125
|
not_empty.wait mutex
|
91
126
|
@waiting -= 1
|
92
127
|
end
|
93
128
|
|
94
|
-
work = todo.shift
|
129
|
+
work = todo.shift
|
95
130
|
end
|
96
131
|
|
97
|
-
break unless continue
|
98
|
-
|
99
132
|
if @clean_thread_locals
|
100
|
-
|
101
|
-
Thread.current[key] = nil unless key == :__recursive_key__
|
102
|
-
end
|
133
|
+
ThreadPool.clean_thread_locals
|
103
134
|
end
|
104
135
|
|
105
136
|
begin
|
106
|
-
block.call(work, *extra)
|
107
|
-
rescue Exception
|
137
|
+
@out_of_band_pending = true if block.call(work, *extra)
|
138
|
+
rescue Exception => e
|
139
|
+
STDERR.puts "Error reached top of thread-pool: #{e.message} (#{e.class})"
|
108
140
|
end
|
109
141
|
end
|
110
|
-
|
111
|
-
mutex.synchronize do
|
112
|
-
@spawned -= 1
|
113
|
-
@workers.delete th
|
114
|
-
end
|
115
142
|
end
|
116
143
|
|
117
144
|
@workers << th
|
@@ -121,9 +148,30 @@ module Puma
|
|
121
148
|
|
122
149
|
private :spawn_thread
|
123
150
|
|
151
|
+
def trigger_out_of_band_hook
|
152
|
+
return false unless out_of_band_hook && out_of_band_hook.any?
|
153
|
+
|
154
|
+
# we execute on idle hook when all threads are free
|
155
|
+
return false unless @spawned == @waiting
|
156
|
+
|
157
|
+
out_of_band_hook.each(&:call)
|
158
|
+
true
|
159
|
+
rescue Exception => e
|
160
|
+
STDERR.puts "Exception calling out_of_band_hook: #{e.message} (#{e.class})"
|
161
|
+
true
|
162
|
+
end
|
163
|
+
|
164
|
+
private :trigger_out_of_band_hook
|
165
|
+
|
166
|
+
def with_mutex(&block)
|
167
|
+
@mutex.owned? ?
|
168
|
+
yield :
|
169
|
+
@mutex.synchronize(&block)
|
170
|
+
end
|
171
|
+
|
124
172
|
# Add +work+ to the todo list for a Thread to pickup and process.
|
125
173
|
def <<(work)
|
126
|
-
|
174
|
+
with_mutex do
|
127
175
|
if @shutdown
|
128
176
|
raise "Unable to add work while shutting down"
|
129
177
|
end
|
@@ -138,21 +186,75 @@ module Puma
|
|
138
186
|
end
|
139
187
|
end
|
140
188
|
|
189
|
+
# This method is used by `Puma::Server` to let the server know when
|
190
|
+
# the thread pool can pull more requests from the socket and
|
191
|
+
# pass to the reactor.
|
192
|
+
#
|
193
|
+
# The general idea is that the thread pool can only work on a fixed
|
194
|
+
# number of requests at the same time. If it is already processing that
|
195
|
+
# number of requests then it is at capacity. If another Puma process has
|
196
|
+
# spare capacity, then the request can be left on the socket so the other
|
197
|
+
# worker can pick it up and process it.
|
198
|
+
#
|
199
|
+
# For example: if there are 5 threads, but only 4 working on
|
200
|
+
# requests, this method will not wait and the `Puma::Server`
|
201
|
+
# can pull a request right away.
|
202
|
+
#
|
203
|
+
# If there are 5 threads and all 5 of them are busy, then it will
|
204
|
+
# pause here, and wait until the `not_full` condition variable is
|
205
|
+
# signaled, usually this indicates that a request has been processed.
|
206
|
+
#
|
207
|
+
# It's important to note that even though the server might accept another
|
208
|
+
# request, it might not be added to the `@todo` array right away.
|
209
|
+
# For example if a slow client has only sent a header, but not a body
|
210
|
+
# then the `@todo` array would stay the same size as the reactor works
|
211
|
+
# to try to buffer the request. In that scenario the next call to this
|
212
|
+
# method would not block and another request would be added into the reactor
|
213
|
+
# by the server. This would continue until a fully bufferend request
|
214
|
+
# makes it through the reactor and can then be processed by the thread pool.
|
141
215
|
def wait_until_not_full
|
142
|
-
|
143
|
-
|
216
|
+
with_mutex do
|
217
|
+
while true
|
218
|
+
return if @shutdown
|
219
|
+
|
220
|
+
# If we can still spin up new threads and there
|
221
|
+
# is work queued that cannot be handled by waiting
|
222
|
+
# threads, then accept more work until we would
|
223
|
+
# spin up the max number of threads.
|
224
|
+
return if busy_threads < @max
|
225
|
+
|
144
226
|
@not_full.wait @mutex
|
145
227
|
end
|
146
228
|
end
|
147
229
|
end
|
148
230
|
|
149
|
-
|
231
|
+
def wait_for_less_busy_worker(delay_s)
|
232
|
+
# Ruby MRI does GVL, this can result
|
233
|
+
# in processing contention when multiple threads
|
234
|
+
# (requests) are running concurrently
|
235
|
+
return unless Puma.mri?
|
236
|
+
return unless delay_s > 0
|
237
|
+
|
238
|
+
with_mutex do
|
239
|
+
return if @shutdown
|
240
|
+
|
241
|
+
# do not delay, if we are not busy
|
242
|
+
return unless busy_threads > 0
|
243
|
+
|
244
|
+
# this will be signaled once a request finishes,
|
245
|
+
# which can happen earlier than delay
|
246
|
+
@not_full.wait @mutex, delay_s
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
# If there are any free threads in the pool, tell one to go ahead
|
150
251
|
# and exit. If +force+ is true, then a trim request is requested
|
151
252
|
# even if all threads are being utilized.
|
152
253
|
#
|
153
254
|
def trim(force=false)
|
154
|
-
|
155
|
-
|
255
|
+
with_mutex do
|
256
|
+
free = @waiting - @todo.size
|
257
|
+
if (force or free > 0) and @spawned - @trim_requested > @min
|
156
258
|
@trim_requested += 1
|
157
259
|
@not_empty.signal
|
158
260
|
end
|
@@ -162,7 +264,7 @@ module Puma
|
|
162
264
|
# If there are dead threads in the pool make them go away while decreasing
|
163
265
|
# spawned counter so that new healthy threads could be created again.
|
164
266
|
def reap
|
165
|
-
|
267
|
+
with_mutex do
|
166
268
|
dead_workers = @workers.reject(&:alive?)
|
167
269
|
|
168
270
|
dead_workers.each do |worker|
|
@@ -170,14 +272,18 @@ module Puma
|
|
170
272
|
@spawned -= 1
|
171
273
|
end
|
172
274
|
|
173
|
-
@workers
|
275
|
+
@workers.delete_if do |w|
|
276
|
+
dead_workers.include?(w)
|
277
|
+
end
|
174
278
|
end
|
175
279
|
end
|
176
280
|
|
177
|
-
class
|
178
|
-
def initialize(pool, timeout)
|
281
|
+
class Automaton
|
282
|
+
def initialize(pool, timeout, thread_name, message)
|
179
283
|
@pool = pool
|
180
284
|
@timeout = timeout
|
285
|
+
@thread_name = thread_name
|
286
|
+
@message = message
|
181
287
|
@running = false
|
182
288
|
end
|
183
289
|
|
@@ -185,8 +291,9 @@ module Puma
|
|
185
291
|
@running = true
|
186
292
|
|
187
293
|
@thread = Thread.new do
|
294
|
+
Puma.set_thread_name @thread_name
|
188
295
|
while @running
|
189
|
-
@pool.
|
296
|
+
@pool.public_send(@message)
|
190
297
|
sleep @timeout
|
191
298
|
end
|
192
299
|
end
|
@@ -198,45 +305,25 @@ module Puma
|
|
198
305
|
end
|
199
306
|
end
|
200
307
|
|
201
|
-
def auto_trim!(timeout=
|
202
|
-
@auto_trim =
|
308
|
+
def auto_trim!(timeout=30)
|
309
|
+
@auto_trim = Automaton.new(self, timeout, "threadpool trimmer", :trim)
|
203
310
|
@auto_trim.start!
|
204
311
|
end
|
205
312
|
|
206
|
-
class Reaper
|
207
|
-
def initialize(pool, timeout)
|
208
|
-
@pool = pool
|
209
|
-
@timeout = timeout
|
210
|
-
@running = false
|
211
|
-
end
|
212
|
-
|
213
|
-
def start!
|
214
|
-
@running = true
|
215
|
-
|
216
|
-
@thread = Thread.new do
|
217
|
-
while @running
|
218
|
-
@pool.reap
|
219
|
-
sleep @timeout
|
220
|
-
end
|
221
|
-
end
|
222
|
-
end
|
223
|
-
|
224
|
-
def stop
|
225
|
-
@running = false
|
226
|
-
@thread.wakeup
|
227
|
-
end
|
228
|
-
end
|
229
|
-
|
230
313
|
def auto_reap!(timeout=5)
|
231
|
-
@reaper =
|
314
|
+
@reaper = Automaton.new(self, timeout, "threadpool reaper", :reap)
|
232
315
|
@reaper.start!
|
233
316
|
end
|
234
317
|
|
235
318
|
# Tell all threads in the pool to exit and wait for them to finish.
|
319
|
+
# Wait +timeout+ seconds then raise +ForceShutdown+ in remaining threads.
|
320
|
+
# Next, wait an extra +grace+ seconds then force-kill remaining threads.
|
321
|
+
# Finally, wait +kill_grace+ seconds for remaining threads to exit.
|
236
322
|
#
|
237
|
-
def shutdown
|
238
|
-
threads =
|
323
|
+
def shutdown(timeout=-1)
|
324
|
+
threads = with_mutex do
|
239
325
|
@shutdown = true
|
326
|
+
@trim_requested = @spawned
|
240
327
|
@not_empty.broadcast
|
241
328
|
@not_full.broadcast
|
242
329
|
|
@@ -246,7 +333,31 @@ module Puma
|
|
246
333
|
@workers.dup
|
247
334
|
end
|
248
335
|
|
249
|
-
|
336
|
+
if timeout == -1
|
337
|
+
# Wait for threads to finish without force shutdown.
|
338
|
+
threads.each(&:join)
|
339
|
+
else
|
340
|
+
join = ->(inner_timeout) do
|
341
|
+
start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
342
|
+
threads.reject! do |t|
|
343
|
+
elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
|
344
|
+
t.join inner_timeout - elapsed
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
# Wait +timeout+ seconds for threads to finish.
|
349
|
+
join.call(timeout)
|
350
|
+
|
351
|
+
# If threads are still running, raise ForceShutdown and wait to finish.
|
352
|
+
threads.each do |t|
|
353
|
+
t.raise ForceShutdown
|
354
|
+
end
|
355
|
+
join.call(SHUTDOWN_GRACE_TIME)
|
356
|
+
|
357
|
+
# If threads are _still_ running, forcefully kill them and wait to finish.
|
358
|
+
threads.each(&:kill)
|
359
|
+
join.call(1)
|
360
|
+
end
|
250
361
|
|
251
362
|
@spawned = 0
|
252
363
|
@workers = []
|