puma 3.12.1 → 5.3.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of puma might be problematic. Click here for more details.

Files changed (93) hide show
  1. checksums.yaml +4 -4
  2. data/History.md +1414 -448
  3. data/LICENSE +23 -20
  4. data/README.md +131 -60
  5. data/bin/puma-wild +3 -9
  6. data/docs/architecture.md +24 -19
  7. data/docs/compile_options.md +19 -0
  8. data/docs/deployment.md +38 -13
  9. data/docs/fork_worker.md +33 -0
  10. data/docs/jungle/README.md +9 -0
  11. data/{tools → docs}/jungle/rc.d/README.md +1 -1
  12. data/{tools → docs}/jungle/rc.d/puma +2 -2
  13. data/{tools → docs}/jungle/rc.d/puma.conf +0 -0
  14. data/docs/kubernetes.md +66 -0
  15. data/docs/nginx.md +1 -1
  16. data/docs/plugins.md +20 -10
  17. data/docs/rails_dev_mode.md +29 -0
  18. data/docs/restart.md +47 -22
  19. data/docs/signals.md +7 -6
  20. data/docs/stats.md +142 -0
  21. data/docs/systemd.md +48 -70
  22. data/ext/puma_http11/PumaHttp11Service.java +2 -2
  23. data/ext/puma_http11/ext_help.h +1 -1
  24. data/ext/puma_http11/extconf.rb +27 -0
  25. data/ext/puma_http11/http11_parser.c +84 -109
  26. data/ext/puma_http11/http11_parser.h +1 -1
  27. data/ext/puma_http11/http11_parser.java.rl +22 -38
  28. data/ext/puma_http11/http11_parser.rl +4 -2
  29. data/ext/puma_http11/http11_parser_common.rl +3 -3
  30. data/ext/puma_http11/mini_ssl.c +254 -91
  31. data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
  32. data/ext/puma_http11/org/jruby/puma/Http11.java +108 -116
  33. data/ext/puma_http11/org/jruby/puma/Http11Parser.java +89 -106
  34. data/ext/puma_http11/org/jruby/puma/MiniSSL.java +92 -22
  35. data/ext/puma_http11/puma_http11.c +34 -50
  36. data/lib/puma.rb +54 -0
  37. data/lib/puma/app/status.rb +68 -49
  38. data/lib/puma/binder.rb +191 -139
  39. data/lib/puma/cli.rb +15 -15
  40. data/lib/puma/client.rb +257 -228
  41. data/lib/puma/cluster.rb +221 -212
  42. data/lib/puma/cluster/worker.rb +183 -0
  43. data/lib/puma/cluster/worker_handle.rb +90 -0
  44. data/lib/puma/commonlogger.rb +2 -2
  45. data/lib/puma/configuration.rb +58 -51
  46. data/lib/puma/const.rb +39 -19
  47. data/lib/puma/control_cli.rb +109 -67
  48. data/lib/puma/detect.rb +24 -3
  49. data/lib/puma/dsl.rb +519 -121
  50. data/lib/puma/error_logger.rb +104 -0
  51. data/lib/puma/events.rb +55 -31
  52. data/lib/puma/io_buffer.rb +7 -5
  53. data/lib/puma/jruby_restart.rb +0 -58
  54. data/lib/puma/json.rb +96 -0
  55. data/lib/puma/launcher.rb +178 -68
  56. data/lib/puma/minissl.rb +147 -48
  57. data/lib/puma/minissl/context_builder.rb +79 -0
  58. data/lib/puma/null_io.rb +13 -1
  59. data/lib/puma/plugin.rb +6 -12
  60. data/lib/puma/plugin/tmp_restart.rb +2 -0
  61. data/lib/puma/queue_close.rb +26 -0
  62. data/lib/puma/rack/builder.rb +2 -4
  63. data/lib/puma/rack/urlmap.rb +2 -0
  64. data/lib/puma/rack_default.rb +2 -0
  65. data/lib/puma/reactor.rb +85 -316
  66. data/lib/puma/request.rb +467 -0
  67. data/lib/puma/runner.rb +31 -52
  68. data/lib/puma/server.rb +282 -680
  69. data/lib/puma/single.rb +11 -67
  70. data/lib/puma/state_file.rb +8 -3
  71. data/lib/puma/systemd.rb +46 -0
  72. data/lib/puma/thread_pool.rb +129 -81
  73. data/lib/puma/util.rb +13 -6
  74. data/lib/rack/handler/puma.rb +5 -6
  75. data/tools/Dockerfile +16 -0
  76. data/tools/trickletest.rb +0 -1
  77. metadata +42 -26
  78. data/ext/puma_http11/io_buffer.c +0 -155
  79. data/lib/puma/accept_nonblock.rb +0 -23
  80. data/lib/puma/compat.rb +0 -14
  81. data/lib/puma/convenient.rb +0 -25
  82. data/lib/puma/daemon_ext.rb +0 -33
  83. data/lib/puma/delegation.rb +0 -13
  84. data/lib/puma/java_io_buffer.rb +0 -47
  85. data/lib/puma/rack/backports/uri/common_193.rb +0 -33
  86. data/lib/puma/tcp_logger.rb +0 -41
  87. data/tools/jungle/README.md +0 -19
  88. data/tools/jungle/init.d/README.md +0 -61
  89. data/tools/jungle/init.d/puma +0 -421
  90. data/tools/jungle/init.d/run-puma +0 -18
  91. data/tools/jungle/upstart/README.md +0 -61
  92. data/tools/jungle/upstart/puma-manager.conf +0 -31
  93. data/tools/jungle/upstart/puma.conf +0 -69
data/lib/puma/single.rb CHANGED
@@ -13,12 +13,11 @@ module Puma
13
13
  # gets created via the `start_server` method from the `Puma::Runner` class
14
14
  # that this inherits from.
15
15
  class Single < Runner
16
+ # @!attribute [r] stats
16
17
  def stats
17
- b = @server.backlog || 0
18
- r = @server.running || 0
19
- t = @server.pool_capacity || 0
20
- m = @server.max_threads || 0
21
- %Q!{ "backlog": #{b}, "running": #{r}, "pool_capacity": #{t}, "max_threads": #{m} }!
18
+ {
19
+ started_at: @started_at.utc.iso8601
20
+ }.merge(@server.stats)
22
21
  end
23
22
 
24
23
  def restart
@@ -26,7 +25,7 @@ module Puma
26
25
  end
27
26
 
28
27
  def stop
29
- @server.stop false
28
+ @server.stop(false) if @server
30
29
  end
31
30
 
32
31
  def halt
@@ -36,67 +35,13 @@ module Puma
36
35
  def stop_blocked
37
36
  log "- Gracefully stopping, waiting for requests to finish"
38
37
  @control.stop(true) if @control
39
- @server.stop(true)
40
- end
41
-
42
- def jruby_daemon?
43
- daemon? and Puma.jruby?
44
- end
45
-
46
- def jruby_daemon_start
47
- require 'puma/jruby_restart'
48
- JRubyRestart.daemon_start(@restart_dir, @launcher.restart_args)
38
+ @server.stop(true) if @server
49
39
  end
50
40
 
51
41
  def run
52
- already_daemon = false
53
-
54
- if jruby_daemon?
55
- require 'puma/jruby_restart'
56
-
57
- if JRubyRestart.daemon?
58
- # load and bind before redirecting IO so errors show up on stdout/stderr
59
- load_and_bind
60
- redirect_io
61
- end
62
-
63
- already_daemon = JRubyRestart.daemon_init
64
- end
65
-
66
42
  output_header "single"
67
43
 
68
- if jruby_daemon?
69
- if already_daemon
70
- JRubyRestart.perm_daemonize
71
- else
72
- pid = nil
73
-
74
- Signal.trap "SIGUSR2" do
75
- log "* Started new process #{pid} as daemon..."
76
-
77
- # Must use exit! so we don't unwind and run the ensures
78
- # that will be run by the new child (such as deleting the
79
- # pidfile)
80
- exit!(true)
81
- end
82
-
83
- Signal.trap "SIGCHLD" do
84
- log "! Error starting new process as daemon, exiting"
85
- exit 1
86
- end
87
-
88
- jruby_daemon_start
89
- sleep
90
- end
91
- else
92
- if daemon?
93
- log "* Daemonizing..."
94
- Process.daemon(true)
95
- redirect_io
96
- end
97
-
98
- load_and_bind
99
- end
44
+ load_and_bind
100
45
 
101
46
  Plugins.fire_background
102
47
 
@@ -105,16 +50,15 @@ module Puma
105
50
  start_control
106
51
 
107
52
  @server = server = start_server
53
+ server_thread = server.run
108
54
 
109
- unless daemon?
110
- log "Use Ctrl-C to stop"
111
- redirect_io
112
- end
55
+ log "Use Ctrl-C to stop"
56
+ redirect_io
113
57
 
114
58
  @launcher.events.fire_on_booted!
115
59
 
116
60
  begin
117
- server.run.join
61
+ server_thread.join
118
62
  rescue Interrupt
119
63
  # Swallow it
120
64
  end
@@ -8,15 +8,20 @@ module Puma
8
8
  @options = {}
9
9
  end
10
10
 
11
- def save(path)
12
- File.write path, YAML.dump(@options)
11
+ def save(path, permission = nil)
12
+ contents =YAML.dump @options
13
+ if permission
14
+ File.write path, contents, mode: 'wb:UTF-8'
15
+ else
16
+ File.write path, contents, mode: 'wb:UTF-8', perm: permission
17
+ end
13
18
  end
14
19
 
15
20
  def load(path)
16
21
  @options = YAML.load File.read(path)
17
22
  end
18
23
 
19
- FIELDS = %w!control_url control_auth_token pid!
24
+ FIELDS = %w!control_url control_auth_token pid running_from!
20
25
 
21
26
  FIELDS.each do |f|
22
27
  define_method f do
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'sd_notify'
4
+
5
+ module Puma
6
+ class Systemd
7
+ def initialize(events)
8
+ @events = events
9
+ end
10
+
11
+ def hook_events
12
+ @events.on_booted { SdNotify.ready }
13
+ @events.on_stopped { SdNotify.stopping }
14
+ @events.on_restart { SdNotify.reloading }
15
+ end
16
+
17
+ def start_watchdog
18
+ return unless SdNotify.watchdog?
19
+
20
+ ping_f = watchdog_sleep_time
21
+
22
+ log "Pinging systemd watchdog every #{ping_f.round(1)} sec"
23
+ Thread.new do
24
+ loop do
25
+ sleep ping_f
26
+ SdNotify.watchdog
27
+ end
28
+ end
29
+ end
30
+
31
+ private
32
+
33
+ def watchdog_sleep_time
34
+ usec = Integer(ENV["WATCHDOG_USEC"])
35
+
36
+ sec_f = usec / 1_000_000.0
37
+ # "It is recommended that a daemon sends a keep-alive notification message
38
+ # to the service manager every half of the time returned here."
39
+ sec_f / 2
40
+ end
41
+
42
+ def log(str)
43
+ @events.log str
44
+ end
45
+ end
46
+ end
@@ -13,7 +13,7 @@ module Puma
13
13
  # a thread pool via the `Puma::ThreadPool#<<` operator where it is stored in a `@todo` array.
14
14
  #
15
15
  # Each thread in the pool has an internal loop where it pulls a request from the `@todo` array
16
- # and proceses it.
16
+ # and processes it.
17
17
  class ThreadPool
18
18
  class ForceShutdown < RuntimeError
19
19
  end
@@ -47,6 +47,7 @@ module Puma
47
47
  @shutdown = false
48
48
 
49
49
  @trim_requested = 0
50
+ @out_of_band_pending = false
50
51
 
51
52
  @workers = []
52
53
 
@@ -54,14 +55,20 @@ module Puma
54
55
  @reaper = nil
55
56
 
56
57
  @mutex.synchronize do
57
- @min.times { spawn_thread }
58
+ @min.times do
59
+ spawn_thread
60
+ @not_full.wait(@mutex)
61
+ end
58
62
  end
59
63
 
60
64
  @clean_thread_locals = false
65
+ @force_shutdown = false
66
+ @shutdown_mutex = Mutex.new
61
67
  end
62
68
 
63
69
  attr_reader :spawned, :trim_requested, :waiting
64
70
  attr_accessor :clean_thread_locals
71
+ attr_accessor :out_of_band_hook # @version 5.0.0
65
72
 
66
73
  def self.clean_thread_locals
67
74
  Thread.current.keys.each do |key| # rubocop: disable Performance/HashEachMethods
@@ -72,13 +79,20 @@ module Puma
72
79
  # How many objects have yet to be processed by the pool?
73
80
  #
74
81
  def backlog
75
- @mutex.synchronize { @todo.size }
82
+ with_mutex { @todo.size }
76
83
  end
77
84
 
85
+ # @!attribute [r] pool_capacity
78
86
  def pool_capacity
79
87
  waiting + (@max - spawned)
80
88
  end
81
89
 
90
+ # @!attribute [r] busy_threads
91
+ # @version 5.0.0
92
+ def busy_threads
93
+ with_mutex { @spawned - @waiting + @todo.size }
94
+ end
95
+
82
96
  # :nodoc:
83
97
  #
84
98
  # Must be called with @mutex held!
@@ -87,8 +101,7 @@ module Puma
87
101
  @spawned += 1
88
102
 
89
103
  th = Thread.new(@spawned) do |spawned|
90
- # Thread name is new in Ruby 2.3
91
- Thread.current.name = 'puma %03i' % spawned if Thread.current.respond_to?(:name=)
104
+ Puma.set_thread_name 'threadpool %03i' % spawned
92
105
  todo = @todo
93
106
  block = @block
94
107
  mutex = @mutex
@@ -100,48 +113,40 @@ module Puma
100
113
  while true
101
114
  work = nil
102
115
 
103
- continue = true
104
-
105
116
  mutex.synchronize do
106
117
  while todo.empty?
107
118
  if @trim_requested > 0
108
119
  @trim_requested -= 1
109
- continue = false
110
- not_full.signal
111
- break
112
- end
113
-
114
- if @shutdown
115
- continue = false
116
- break
120
+ @spawned -= 1
121
+ @workers.delete th
122
+ Thread.exit
117
123
  end
118
124
 
119
125
  @waiting += 1
126
+ if @out_of_band_pending && trigger_out_of_band_hook
127
+ @out_of_band_pending = false
128
+ end
120
129
  not_full.signal
121
- not_empty.wait mutex
122
- @waiting -= 1
130
+ begin
131
+ not_empty.wait mutex
132
+ ensure
133
+ @waiting -= 1
134
+ end
123
135
  end
124
136
 
125
- work = todo.shift if continue
137
+ work = todo.shift
126
138
  end
127
139
 
128
- break unless continue
129
-
130
140
  if @clean_thread_locals
131
141
  ThreadPool.clean_thread_locals
132
142
  end
133
143
 
134
144
  begin
135
- block.call(work, *extra)
145
+ @out_of_band_pending = true if block.call(work, *extra)
136
146
  rescue Exception => e
137
147
  STDERR.puts "Error reached top of thread-pool: #{e.message} (#{e.class})"
138
148
  end
139
149
  end
140
-
141
- mutex.synchronize do
142
- @spawned -= 1
143
- @workers.delete th
144
- end
145
150
  end
146
151
 
147
152
  @workers << th
@@ -151,9 +156,32 @@ module Puma
151
156
 
152
157
  private :spawn_thread
153
158
 
159
+ # @version 5.0.0
160
+ def trigger_out_of_band_hook
161
+ return false unless out_of_band_hook && out_of_band_hook.any?
162
+
163
+ # we execute on idle hook when all threads are free
164
+ return false unless @spawned == @waiting
165
+
166
+ out_of_band_hook.each(&:call)
167
+ true
168
+ rescue Exception => e
169
+ STDERR.puts "Exception calling out_of_band_hook: #{e.message} (#{e.class})"
170
+ true
171
+ end
172
+
173
+ private :trigger_out_of_band_hook
174
+
175
+ # @version 5.0.0
176
+ def with_mutex(&block)
177
+ @mutex.owned? ?
178
+ yield :
179
+ @mutex.synchronize(&block)
180
+ end
181
+
154
182
  # Add +work+ to the todo list for a Thread to pickup and process.
155
183
  def <<(work)
156
- @mutex.synchronize do
184
+ with_mutex do
157
185
  if @shutdown
158
186
  raise "Unable to add work while shutting down"
159
187
  end
@@ -190,12 +218,12 @@ module Puma
190
218
  # request, it might not be added to the `@todo` array right away.
191
219
  # For example if a slow client has only sent a header, but not a body
192
220
  # then the `@todo` array would stay the same size as the reactor works
193
- # to try to buffer the request. In tha scenario the next call to this
221
+ # to try to buffer the request. In that scenario the next call to this
194
222
  # method would not block and another request would be added into the reactor
195
- # by the server. This would continue until a fully bufferend request
223
+ # by the server. This would continue until a fully buffered request
196
224
  # makes it through the reactor and can then be processed by the thread pool.
197
225
  def wait_until_not_full
198
- @mutex.synchronize do
226
+ with_mutex do
199
227
  while true
200
228
  return if @shutdown
201
229
 
@@ -203,20 +231,42 @@ module Puma
203
231
  # is work queued that cannot be handled by waiting
204
232
  # threads, then accept more work until we would
205
233
  # spin up the max number of threads.
206
- return if @todo.size - @waiting < @max - @spawned
234
+ return if busy_threads < @max
207
235
 
208
236
  @not_full.wait @mutex
209
237
  end
210
238
  end
211
239
  end
212
240
 
213
- # If too many threads are in the pool, tell one to finish go ahead
241
+ # @version 5.0.0
242
+ def wait_for_less_busy_worker(delay_s)
243
+ return unless delay_s && delay_s > 0
244
+
245
+ # Ruby MRI does GVL, this can result
246
+ # in processing contention when multiple threads
247
+ # (requests) are running concurrently
248
+ return unless Puma.mri?
249
+
250
+ with_mutex do
251
+ return if @shutdown
252
+
253
+ # do not delay, if we are not busy
254
+ return unless busy_threads > 0
255
+
256
+ # this will be signaled once a request finishes,
257
+ # which can happen earlier than delay
258
+ @not_full.wait @mutex, delay_s
259
+ end
260
+ end
261
+
262
+ # If there are any free threads in the pool, tell one to go ahead
214
263
  # and exit. If +force+ is true, then a trim request is requested
215
264
  # even if all threads are being utilized.
216
265
  #
217
266
  def trim(force=false)
218
- @mutex.synchronize do
219
- if (force or @waiting > 0) and @spawned - @trim_requested > @min
267
+ with_mutex do
268
+ free = @waiting - @todo.size
269
+ if (force or free > 0) and @spawned - @trim_requested > @min
220
270
  @trim_requested += 1
221
271
  @not_empty.signal
222
272
  end
@@ -226,7 +276,7 @@ module Puma
226
276
  # If there are dead threads in the pool make them go away while decreasing
227
277
  # spawned counter so that new healthy threads could be created again.
228
278
  def reap
229
- @mutex.synchronize do
279
+ with_mutex do
230
280
  dead_workers = @workers.reject(&:alive?)
231
281
 
232
282
  dead_workers.each do |worker|
@@ -240,10 +290,12 @@ module Puma
240
290
  end
241
291
  end
242
292
 
243
- class AutoTrim
244
- def initialize(pool, timeout)
293
+ class Automaton
294
+ def initialize(pool, timeout, thread_name, message)
245
295
  @pool = pool
246
296
  @timeout = timeout
297
+ @thread_name = thread_name
298
+ @message = message
247
299
  @running = false
248
300
  end
249
301
 
@@ -251,8 +303,9 @@ module Puma
251
303
  @running = true
252
304
 
253
305
  @thread = Thread.new do
306
+ Puma.set_thread_name @thread_name
254
307
  while @running
255
- @pool.trim
308
+ @pool.public_send(@message)
256
309
  sleep @timeout
257
310
  end
258
311
  end
@@ -265,44 +318,37 @@ module Puma
265
318
  end
266
319
 
267
320
  def auto_trim!(timeout=30)
268
- @auto_trim = AutoTrim.new(self, timeout)
321
+ @auto_trim = Automaton.new(self, timeout, "threadpool trimmer", :trim)
269
322
  @auto_trim.start!
270
323
  end
271
324
 
272
- class Reaper
273
- def initialize(pool, timeout)
274
- @pool = pool
275
- @timeout = timeout
276
- @running = false
277
- end
278
-
279
- def start!
280
- @running = true
281
-
282
- @thread = Thread.new do
283
- while @running
284
- @pool.reap
285
- sleep @timeout
286
- end
287
- end
288
- end
289
-
290
- def stop
291
- @running = false
292
- @thread.wakeup
293
- end
294
- end
295
-
296
325
  def auto_reap!(timeout=5)
297
- @reaper = Reaper.new(self, timeout)
326
+ @reaper = Automaton.new(self, timeout, "threadpool reaper", :reap)
298
327
  @reaper.start!
299
328
  end
300
329
 
330
+ # Allows ThreadPool::ForceShutdown to be raised within the
331
+ # provided block if the thread is forced to shutdown during execution.
332
+ def with_force_shutdown
333
+ t = Thread.current
334
+ @shutdown_mutex.synchronize do
335
+ raise ForceShutdown if @force_shutdown
336
+ t[:with_force_shutdown] = true
337
+ end
338
+ yield
339
+ ensure
340
+ t[:with_force_shutdown] = false
341
+ end
342
+
301
343
  # Tell all threads in the pool to exit and wait for them to finish.
344
+ # Wait +timeout+ seconds then raise +ForceShutdown+ in remaining threads.
345
+ # Next, wait an extra +grace+ seconds then force-kill remaining threads.
346
+ # Finally, wait +kill_grace+ seconds for remaining threads to exit.
302
347
  #
303
348
  def shutdown(timeout=-1)
304
- threads = @mutex.synchronize do
349
+ threads = with_mutex do
305
350
  @shutdown = true
351
+ @trim_requested = @spawned
306
352
  @not_empty.broadcast
307
353
  @not_full.broadcast
308
354
 
@@ -316,27 +362,29 @@ module Puma
316
362
  # Wait for threads to finish without force shutdown.
317
363
  threads.each(&:join)
318
364
  else
319
- # Wait for threads to finish after n attempts (+timeout+).
320
- # If threads are still running, it will forcefully kill them.
321
- timeout.times do
322
- threads.delete_if do |t|
323
- t.join 1
324
- end
325
-
326
- if threads.empty?
327
- break
328
- else
329
- sleep 1
365
+ join = ->(inner_timeout) do
366
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
367
+ threads.reject! do |t|
368
+ elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
369
+ t.join inner_timeout - elapsed
330
370
  end
331
371
  end
332
372
 
333
- threads.each do |t|
334
- t.raise ForceShutdown
335
- end
373
+ # Wait +timeout+ seconds for threads to finish.
374
+ join.call(timeout)
336
375
 
337
- threads.each do |t|
338
- t.join SHUTDOWN_GRACE_TIME
376
+ # If threads are still running, raise ForceShutdown and wait to finish.
377
+ @shutdown_mutex.synchronize do
378
+ @force_shutdown = true
379
+ threads.each do |t|
380
+ t.raise ForceShutdown if t[:with_force_shutdown]
381
+ end
339
382
  end
383
+ join.call(SHUTDOWN_GRACE_TIME)
384
+
385
+ # If threads are _still_ running, forcefully kill them and wait to finish.
386
+ threads.each(&:kill)
387
+ join.call(1)
340
388
  end
341
389
 
342
390
  @spawned = 0