puma 2.0.0.b5 → 5.0.0.beta1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/History.md +1598 -0
- data/LICENSE +23 -20
- data/README.md +222 -62
- data/bin/puma-wild +31 -0
- data/bin/pumactl +1 -1
- data/docs/architecture.md +37 -0
- data/docs/deployment.md +113 -0
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +13 -0
- data/docs/jungle/rc.d/README.md +74 -0
- data/docs/jungle/rc.d/puma +61 -0
- data/docs/jungle/rc.d/puma.conf +10 -0
- data/docs/jungle/upstart/README.md +61 -0
- data/docs/jungle/upstart/puma-manager.conf +31 -0
- data/docs/jungle/upstart/puma.conf +69 -0
- data/docs/nginx.md +5 -10
- data/docs/plugins.md +38 -0
- data/docs/restart.md +41 -0
- data/docs/signals.md +97 -0
- data/docs/systemd.md +228 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/extconf.rb +23 -2
- data/ext/puma_http11/http11_parser.c +301 -482
- data/ext/puma_http11/http11_parser.h +13 -11
- data/ext/puma_http11/http11_parser.java.rl +26 -42
- data/ext/puma_http11/http11_parser.rl +22 -21
- data/ext/puma_http11/http11_parser_common.rl +5 -5
- data/ext/puma_http11/mini_ssl.c +377 -18
- data/ext/puma_http11/org/jruby/puma/Http11.java +108 -107
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +137 -170
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +265 -191
- data/ext/puma_http11/puma_http11.c +57 -81
- data/lib/puma.rb +25 -4
- data/lib/puma/accept_nonblock.rb +7 -1
- data/lib/puma/app/status.rb +61 -24
- data/lib/puma/binder.rb +212 -78
- data/lib/puma/cli.rb +149 -644
- data/lib/puma/client.rb +316 -65
- data/lib/puma/cluster.rb +659 -0
- data/lib/puma/commonlogger.rb +108 -0
- data/lib/puma/configuration.rb +279 -180
- data/lib/puma/const.rb +126 -39
- data/lib/puma/control_cli.rb +183 -96
- data/lib/puma/detect.rb +20 -1
- data/lib/puma/dsl.rb +776 -0
- data/lib/puma/events.rb +91 -23
- data/lib/puma/io_buffer.rb +9 -5
- data/lib/puma/jruby_restart.rb +9 -5
- data/lib/puma/launcher.rb +487 -0
- data/lib/puma/minissl.rb +239 -93
- data/lib/puma/minissl/context_builder.rb +76 -0
- data/lib/puma/null_io.rb +22 -12
- data/lib/puma/plugin.rb +111 -0
- data/lib/puma/plugin/tmp_restart.rb +36 -0
- data/lib/puma/rack/builder.rb +297 -0
- data/lib/puma/rack/urlmap.rb +93 -0
- data/lib/puma/rack_default.rb +9 -0
- data/lib/puma/reactor.rb +290 -43
- data/lib/puma/runner.rb +163 -0
- data/lib/puma/server.rb +493 -126
- data/lib/puma/single.rb +66 -0
- data/lib/puma/state_file.rb +34 -0
- data/lib/puma/thread_pool.rb +228 -47
- data/lib/puma/util.rb +115 -0
- data/lib/rack/handler/puma.rb +78 -31
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +44 -0
- metadata +60 -155
- data/COPYING +0 -55
- data/Gemfile +0 -8
- data/History.txt +0 -196
- data/Manifest.txt +0 -56
- data/Rakefile +0 -121
- data/TODO +0 -5
- data/docs/config.md +0 -0
- data/ext/puma_http11/io_buffer.c +0 -154
- data/lib/puma/capistrano.rb +0 -26
- data/lib/puma/compat.rb +0 -11
- data/lib/puma/daemon_ext.rb +0 -20
- data/lib/puma/delegation.rb +0 -11
- data/lib/puma/java_io_buffer.rb +0 -45
- data/lib/puma/rack_patch.rb +0 -25
- data/puma.gemspec +0 -45
- data/test/test_app_status.rb +0 -88
- data/test/test_cli.rb +0 -171
- data/test/test_config.rb +0 -16
- data/test/test_http10.rb +0 -27
- data/test/test_http11.rb +0 -126
- data/test/test_integration.rb +0 -150
- data/test/test_iobuffer.rb +0 -38
- data/test/test_minissl.rb +0 -22
- data/test/test_null_io.rb +0 -31
- data/test/test_persistent.rb +0 -238
- data/test/test_puma_server.rb +0 -128
- data/test/test_rack_handler.rb +0 -10
- data/test/test_rack_server.rb +0 -141
- data/test/test_thread_pool.rb +0 -146
- data/test/test_unix_socket.rb +0 -39
- data/test/test_ws.rb +0 -89
- data/tools/jungle/README.md +0 -54
- data/tools/jungle/puma +0 -332
- data/tools/jungle/run-puma +0 -3
data/lib/puma/single.rb
ADDED
@@ -0,0 +1,66 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'puma/runner'
|
4
|
+
require 'puma/detect'
|
5
|
+
require 'puma/plugin'
|
6
|
+
|
7
|
+
module Puma
|
8
|
+
# This class is instantiated by the `Puma::Launcher` and used
|
9
|
+
# to boot and serve a Ruby application when no puma "workers" are needed
|
10
|
+
# i.e. only using "threaded" mode. For example `$ puma -t 1:5`
|
11
|
+
#
|
12
|
+
# At the core of this class is running an instance of `Puma::Server` which
|
13
|
+
# gets created via the `start_server` method from the `Puma::Runner` class
|
14
|
+
# that this inherits from.
|
15
|
+
class Single < Runner
|
16
|
+
def stats
|
17
|
+
{
|
18
|
+
started_at: @started_at.utc.iso8601
|
19
|
+
}.merge(@server.stats)
|
20
|
+
end
|
21
|
+
|
22
|
+
def restart
|
23
|
+
@server.begin_restart
|
24
|
+
end
|
25
|
+
|
26
|
+
def stop
|
27
|
+
@server.stop(false) if @server
|
28
|
+
end
|
29
|
+
|
30
|
+
def halt
|
31
|
+
@server.halt
|
32
|
+
end
|
33
|
+
|
34
|
+
def stop_blocked
|
35
|
+
log "- Gracefully stopping, waiting for requests to finish"
|
36
|
+
@control.stop(true) if @control
|
37
|
+
@server.stop(true) if @server
|
38
|
+
end
|
39
|
+
|
40
|
+
def run
|
41
|
+
output_header "single"
|
42
|
+
|
43
|
+
load_and_bind
|
44
|
+
|
45
|
+
Plugins.fire_background
|
46
|
+
|
47
|
+
@launcher.write_state
|
48
|
+
|
49
|
+
start_control
|
50
|
+
|
51
|
+
@server = server = start_server
|
52
|
+
|
53
|
+
|
54
|
+
log "Use Ctrl-C to stop"
|
55
|
+
redirect_io
|
56
|
+
|
57
|
+
@launcher.events.fire_on_booted!
|
58
|
+
|
59
|
+
begin
|
60
|
+
server.run.join
|
61
|
+
rescue Interrupt
|
62
|
+
# Swallow it
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'yaml'
|
4
|
+
|
5
|
+
module Puma
|
6
|
+
class StateFile
|
7
|
+
def initialize
|
8
|
+
@options = {}
|
9
|
+
end
|
10
|
+
|
11
|
+
def save(path, permission = nil)
|
12
|
+
File.open(path, "w") do |file|
|
13
|
+
file.chmod(permission) if permission
|
14
|
+
file.write(YAML.dump(@options))
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def load(path)
|
19
|
+
@options = YAML.load File.read(path)
|
20
|
+
end
|
21
|
+
|
22
|
+
FIELDS = %w!control_url control_auth_token pid!
|
23
|
+
|
24
|
+
FIELDS.each do |f|
|
25
|
+
define_method f do
|
26
|
+
@options[f]
|
27
|
+
end
|
28
|
+
|
29
|
+
define_method "#{f}=" do |v|
|
30
|
+
@options[f] = v
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
data/lib/puma/thread_pool.rb
CHANGED
@@ -1,9 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require 'thread'
|
2
4
|
|
3
5
|
module Puma
|
4
|
-
# A simple thread pool management object.
|
6
|
+
# Internal Docs for A simple thread pool management object.
|
7
|
+
#
|
8
|
+
# Each Puma "worker" has a thread pool to process requests.
|
9
|
+
#
|
10
|
+
# First a connection to a client is made in `Puma::Server`. It is wrapped in a
|
11
|
+
# `Puma::Client` instance and then passed to the `Puma::Reactor` to ensure
|
12
|
+
# the whole request is buffered into memory. Once the request is ready, it is passed into
|
13
|
+
# a thread pool via the `Puma::ThreadPool#<<` operator where it is stored in a `@todo` array.
|
5
14
|
#
|
15
|
+
# Each thread in the pool has an internal loop where it pulls a request from the `@todo` array
|
16
|
+
# and proceses it.
|
6
17
|
class ThreadPool
|
18
|
+
class ForceShutdown < RuntimeError
|
19
|
+
end
|
20
|
+
|
21
|
+
# How long, after raising the ForceShutdown of a thread during
|
22
|
+
# forced shutdown mode, to wait for the thread to try and finish
|
23
|
+
# up its work before leaving the thread to die on the vine.
|
24
|
+
SHUTDOWN_GRACE_TIME = 5 # seconds
|
7
25
|
|
8
26
|
# Maintain a minimum of +min+ and maximum of +max+ threads
|
9
27
|
# in the pool.
|
@@ -11,8 +29,9 @@ module Puma
|
|
11
29
|
# The block passed is the work that will be performed in each
|
12
30
|
# thread.
|
13
31
|
#
|
14
|
-
def initialize(min, max, *extra, &
|
15
|
-
@
|
32
|
+
def initialize(min, max, *extra, &block)
|
33
|
+
@not_empty = ConditionVariable.new
|
34
|
+
@not_full = ConditionVariable.new
|
16
35
|
@mutex = Mutex.new
|
17
36
|
|
18
37
|
@todo = []
|
@@ -20,30 +39,53 @@ module Puma
|
|
20
39
|
@spawned = 0
|
21
40
|
@waiting = 0
|
22
41
|
|
23
|
-
@min = min
|
24
|
-
@max = max
|
25
|
-
@block =
|
42
|
+
@min = Integer(min)
|
43
|
+
@max = Integer(max)
|
44
|
+
@block = block
|
26
45
|
@extra = extra
|
27
46
|
|
28
47
|
@shutdown = false
|
29
48
|
|
30
49
|
@trim_requested = 0
|
50
|
+
@out_of_band_pending = false
|
31
51
|
|
32
52
|
@workers = []
|
33
53
|
|
34
54
|
@auto_trim = nil
|
55
|
+
@reaper = nil
|
35
56
|
|
36
57
|
@mutex.synchronize do
|
37
|
-
min.times
|
58
|
+
@min.times do
|
59
|
+
spawn_thread
|
60
|
+
@not_full.wait(@mutex)
|
61
|
+
end
|
38
62
|
end
|
63
|
+
|
64
|
+
@clean_thread_locals = false
|
39
65
|
end
|
40
66
|
|
41
|
-
attr_reader :spawned, :trim_requested
|
67
|
+
attr_reader :spawned, :trim_requested, :waiting
|
68
|
+
attr_accessor :clean_thread_locals
|
69
|
+
attr_accessor :out_of_band_hook
|
70
|
+
|
71
|
+
def self.clean_thread_locals
|
72
|
+
Thread.current.keys.each do |key| # rubocop: disable Performance/HashEachMethods
|
73
|
+
Thread.current[key] = nil unless key == :__recursive_key__
|
74
|
+
end
|
75
|
+
end
|
42
76
|
|
43
77
|
# How many objects have yet to be processed by the pool?
|
44
78
|
#
|
45
79
|
def backlog
|
46
|
-
|
80
|
+
with_mutex { @todo.size }
|
81
|
+
end
|
82
|
+
|
83
|
+
def pool_capacity
|
84
|
+
waiting + (@max - spawned)
|
85
|
+
end
|
86
|
+
|
87
|
+
def busy_threads
|
88
|
+
with_mutex { @spawned - @waiting + @todo.size }
|
47
89
|
end
|
48
90
|
|
49
91
|
# :nodoc:
|
@@ -53,48 +95,49 @@ module Puma
|
|
53
95
|
def spawn_thread
|
54
96
|
@spawned += 1
|
55
97
|
|
56
|
-
th = Thread.new do
|
98
|
+
th = Thread.new(@spawned) do |spawned|
|
99
|
+
Puma.set_thread_name 'threadpool %03i' % spawned
|
57
100
|
todo = @todo
|
58
101
|
block = @block
|
59
102
|
mutex = @mutex
|
60
|
-
|
103
|
+
not_empty = @not_empty
|
104
|
+
not_full = @not_full
|
61
105
|
|
62
106
|
extra = @extra.map { |i| i.new }
|
63
107
|
|
64
108
|
while true
|
65
109
|
work = nil
|
66
110
|
|
67
|
-
continue = true
|
68
|
-
|
69
111
|
mutex.synchronize do
|
70
112
|
while todo.empty?
|
71
113
|
if @trim_requested > 0
|
72
114
|
@trim_requested -= 1
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
if @shutdown
|
78
|
-
continue = false
|
79
|
-
break
|
115
|
+
@spawned -= 1
|
116
|
+
@workers.delete th
|
117
|
+
Thread.exit
|
80
118
|
end
|
81
119
|
|
82
120
|
@waiting += 1
|
83
|
-
|
121
|
+
if @out_of_band_pending && trigger_out_of_band_hook
|
122
|
+
@out_of_band_pending = false
|
123
|
+
end
|
124
|
+
not_full.signal
|
125
|
+
not_empty.wait mutex
|
84
126
|
@waiting -= 1
|
85
127
|
end
|
86
128
|
|
87
|
-
work = todo.
|
129
|
+
work = todo.shift
|
88
130
|
end
|
89
131
|
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
end
|
132
|
+
if @clean_thread_locals
|
133
|
+
ThreadPool.clean_thread_locals
|
134
|
+
end
|
94
135
|
|
95
|
-
|
96
|
-
|
97
|
-
|
136
|
+
begin
|
137
|
+
@out_of_band_pending = true if block.call(work, *extra)
|
138
|
+
rescue Exception => e
|
139
|
+
STDERR.puts "Error reached top of thread-pool: #{e.message} (#{e.class})"
|
140
|
+
end
|
98
141
|
end
|
99
142
|
end
|
100
143
|
|
@@ -105,40 +148,142 @@ module Puma
|
|
105
148
|
|
106
149
|
private :spawn_thread
|
107
150
|
|
151
|
+
def trigger_out_of_band_hook
|
152
|
+
return false unless out_of_band_hook && out_of_band_hook.any?
|
153
|
+
|
154
|
+
# we execute on idle hook when all threads are free
|
155
|
+
return false unless @spawned == @waiting
|
156
|
+
|
157
|
+
out_of_band_hook.each(&:call)
|
158
|
+
true
|
159
|
+
rescue Exception => e
|
160
|
+
STDERR.puts "Exception calling out_of_band_hook: #{e.message} (#{e.class})"
|
161
|
+
true
|
162
|
+
end
|
163
|
+
|
164
|
+
private :trigger_out_of_band_hook
|
165
|
+
|
166
|
+
def with_mutex(&block)
|
167
|
+
@mutex.owned? ?
|
168
|
+
yield :
|
169
|
+
@mutex.synchronize(&block)
|
170
|
+
end
|
171
|
+
|
108
172
|
# Add +work+ to the todo list for a Thread to pickup and process.
|
109
173
|
def <<(work)
|
110
|
-
|
174
|
+
with_mutex do
|
111
175
|
if @shutdown
|
112
176
|
raise "Unable to add work while shutting down"
|
113
177
|
end
|
114
178
|
|
115
179
|
@todo << work
|
116
180
|
|
117
|
-
if @waiting
|
181
|
+
if @waiting < @todo.size and @spawned < @max
|
118
182
|
spawn_thread
|
119
183
|
end
|
120
184
|
|
121
|
-
@
|
185
|
+
@not_empty.signal
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
# This method is used by `Puma::Server` to let the server know when
|
190
|
+
# the thread pool can pull more requests from the socket and
|
191
|
+
# pass to the reactor.
|
192
|
+
#
|
193
|
+
# The general idea is that the thread pool can only work on a fixed
|
194
|
+
# number of requests at the same time. If it is already processing that
|
195
|
+
# number of requests then it is at capacity. If another Puma process has
|
196
|
+
# spare capacity, then the request can be left on the socket so the other
|
197
|
+
# worker can pick it up and process it.
|
198
|
+
#
|
199
|
+
# For example: if there are 5 threads, but only 4 working on
|
200
|
+
# requests, this method will not wait and the `Puma::Server`
|
201
|
+
# can pull a request right away.
|
202
|
+
#
|
203
|
+
# If there are 5 threads and all 5 of them are busy, then it will
|
204
|
+
# pause here, and wait until the `not_full` condition variable is
|
205
|
+
# signaled, usually this indicates that a request has been processed.
|
206
|
+
#
|
207
|
+
# It's important to note that even though the server might accept another
|
208
|
+
# request, it might not be added to the `@todo` array right away.
|
209
|
+
# For example if a slow client has only sent a header, but not a body
|
210
|
+
# then the `@todo` array would stay the same size as the reactor works
|
211
|
+
# to try to buffer the request. In that scenario the next call to this
|
212
|
+
# method would not block and another request would be added into the reactor
|
213
|
+
# by the server. This would continue until a fully bufferend request
|
214
|
+
# makes it through the reactor and can then be processed by the thread pool.
|
215
|
+
def wait_until_not_full
|
216
|
+
with_mutex do
|
217
|
+
while true
|
218
|
+
return if @shutdown
|
219
|
+
|
220
|
+
# If we can still spin up new threads and there
|
221
|
+
# is work queued that cannot be handled by waiting
|
222
|
+
# threads, then accept more work until we would
|
223
|
+
# spin up the max number of threads.
|
224
|
+
return if busy_threads < @max
|
225
|
+
|
226
|
+
@not_full.wait @mutex
|
227
|
+
end
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
def wait_for_less_busy_worker(delay_s)
|
232
|
+
# Ruby MRI does GVL, this can result
|
233
|
+
# in processing contention when multiple threads
|
234
|
+
# (requests) are running concurrently
|
235
|
+
return unless Puma.mri?
|
236
|
+
return unless delay_s > 0
|
237
|
+
|
238
|
+
with_mutex do
|
239
|
+
return if @shutdown
|
240
|
+
|
241
|
+
# do not delay, if we are not busy
|
242
|
+
return unless busy_threads > 0
|
243
|
+
|
244
|
+
# this will be signaled once a request finishes,
|
245
|
+
# which can happen earlier than delay
|
246
|
+
@not_full.wait @mutex, delay_s
|
122
247
|
end
|
123
248
|
end
|
124
249
|
|
125
|
-
# If
|
250
|
+
# If there are any free threads in the pool, tell one to go ahead
|
126
251
|
# and exit. If +force+ is true, then a trim request is requested
|
127
252
|
# even if all threads are being utilized.
|
128
253
|
#
|
129
254
|
def trim(force=false)
|
130
|
-
|
131
|
-
|
255
|
+
with_mutex do
|
256
|
+
free = @waiting - @todo.size
|
257
|
+
if (force or free > 0) and @spawned - @trim_requested > @min
|
132
258
|
@trim_requested += 1
|
133
|
-
@
|
259
|
+
@not_empty.signal
|
260
|
+
end
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
# If there are dead threads in the pool make them go away while decreasing
|
265
|
+
# spawned counter so that new healthy threads could be created again.
|
266
|
+
def reap
|
267
|
+
with_mutex do
|
268
|
+
dead_workers = @workers.reject(&:alive?)
|
269
|
+
|
270
|
+
dead_workers.each do |worker|
|
271
|
+
worker.kill
|
272
|
+
@spawned -= 1
|
273
|
+
end
|
274
|
+
|
275
|
+
@workers.delete_if do |w|
|
276
|
+
dead_workers.include?(w)
|
134
277
|
end
|
135
278
|
end
|
136
279
|
end
|
137
280
|
|
138
|
-
class
|
139
|
-
def initialize(pool, timeout)
|
281
|
+
class Automaton
|
282
|
+
def initialize(pool, timeout, thread_name, message)
|
140
283
|
@pool = pool
|
141
284
|
@timeout = timeout
|
285
|
+
@thread_name = thread_name
|
286
|
+
@message = message
|
142
287
|
@running = false
|
143
288
|
end
|
144
289
|
|
@@ -146,8 +291,9 @@ module Puma
|
|
146
291
|
@running = true
|
147
292
|
|
148
293
|
@thread = Thread.new do
|
294
|
+
Puma.set_thread_name @thread_name
|
149
295
|
while @running
|
150
|
-
@pool.
|
296
|
+
@pool.public_send(@message)
|
151
297
|
sleep @timeout
|
152
298
|
end
|
153
299
|
end
|
@@ -159,24 +305,59 @@ module Puma
|
|
159
305
|
end
|
160
306
|
end
|
161
307
|
|
162
|
-
def auto_trim!(timeout=
|
163
|
-
@auto_trim =
|
308
|
+
def auto_trim!(timeout=30)
|
309
|
+
@auto_trim = Automaton.new(self, timeout, "threadpool trimmer", :trim)
|
164
310
|
@auto_trim.start!
|
165
311
|
end
|
166
312
|
|
313
|
+
def auto_reap!(timeout=5)
|
314
|
+
@reaper = Automaton.new(self, timeout, "threadpool reaper", :reap)
|
315
|
+
@reaper.start!
|
316
|
+
end
|
317
|
+
|
167
318
|
# Tell all threads in the pool to exit and wait for them to finish.
|
319
|
+
# Wait +timeout+ seconds then raise +ForceShutdown+ in remaining threads.
|
320
|
+
# Next, wait an extra +grace+ seconds then force-kill remaining threads.
|
321
|
+
# Finally, wait +kill_grace+ seconds for remaining threads to exit.
|
168
322
|
#
|
169
|
-
def shutdown
|
170
|
-
|
323
|
+
def shutdown(timeout=-1)
|
324
|
+
threads = with_mutex do
|
171
325
|
@shutdown = true
|
172
|
-
@
|
326
|
+
@trim_requested = @spawned
|
327
|
+
@not_empty.broadcast
|
328
|
+
@not_full.broadcast
|
173
329
|
|
174
330
|
@auto_trim.stop if @auto_trim
|
331
|
+
@reaper.stop if @reaper
|
332
|
+
# dup workers so that we join them all safely
|
333
|
+
@workers.dup
|
175
334
|
end
|
176
335
|
|
177
|
-
|
178
|
-
|
179
|
-
|
336
|
+
if timeout == -1
|
337
|
+
# Wait for threads to finish without force shutdown.
|
338
|
+
threads.each(&:join)
|
339
|
+
else
|
340
|
+
join = ->(inner_timeout) do
|
341
|
+
start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
342
|
+
threads.reject! do |t|
|
343
|
+
elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
|
344
|
+
t.join inner_timeout - elapsed
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
# Wait +timeout+ seconds for threads to finish.
|
349
|
+
join.call(timeout)
|
350
|
+
|
351
|
+
# If threads are still running, raise ForceShutdown and wait to finish.
|
352
|
+
threads.each do |t|
|
353
|
+
t.raise ForceShutdown
|
354
|
+
end
|
355
|
+
join.call(SHUTDOWN_GRACE_TIME)
|
356
|
+
|
357
|
+
# If threads are _still_ running, forcefully kill them and wait to finish.
|
358
|
+
threads.each(&:kill)
|
359
|
+
join.call(1)
|
360
|
+
end
|
180
361
|
|
181
362
|
@spawned = 0
|
182
363
|
@workers = []
|