puma 4.3.6 → 5.3.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of puma might be problematic. Click here for more details.

Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/History.md +1346 -518
  3. data/LICENSE +23 -20
  4. data/README.md +74 -31
  5. data/bin/puma-wild +3 -9
  6. data/docs/architecture.md +24 -20
  7. data/docs/compile_options.md +19 -0
  8. data/docs/deployment.md +15 -10
  9. data/docs/fork_worker.md +33 -0
  10. data/docs/jungle/README.md +9 -0
  11. data/{tools → docs}/jungle/rc.d/README.md +1 -1
  12. data/{tools → docs}/jungle/rc.d/puma +2 -2
  13. data/{tools → docs}/jungle/rc.d/puma.conf +0 -0
  14. data/docs/kubernetes.md +66 -0
  15. data/docs/nginx.md +1 -1
  16. data/docs/plugins.md +2 -2
  17. data/docs/rails_dev_mode.md +29 -0
  18. data/docs/restart.md +46 -23
  19. data/docs/signals.md +7 -6
  20. data/docs/stats.md +142 -0
  21. data/docs/systemd.md +27 -67
  22. data/ext/puma_http11/PumaHttp11Service.java +2 -4
  23. data/ext/puma_http11/ext_help.h +1 -1
  24. data/ext/puma_http11/extconf.rb +22 -8
  25. data/ext/puma_http11/http11_parser.c +45 -47
  26. data/ext/puma_http11/http11_parser.h +1 -1
  27. data/ext/puma_http11/http11_parser.java.rl +1 -1
  28. data/ext/puma_http11/http11_parser.rl +1 -1
  29. data/ext/puma_http11/mini_ssl.c +211 -118
  30. data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
  31. data/ext/puma_http11/org/jruby/puma/Http11.java +3 -3
  32. data/ext/puma_http11/org/jruby/puma/Http11Parser.java +5 -7
  33. data/ext/puma_http11/org/jruby/puma/MiniSSL.java +77 -18
  34. data/ext/puma_http11/puma_http11.c +31 -50
  35. data/lib/puma.rb +46 -0
  36. data/lib/puma/app/status.rb +47 -36
  37. data/lib/puma/binder.rb +177 -103
  38. data/lib/puma/cli.rb +11 -15
  39. data/lib/puma/client.rb +73 -74
  40. data/lib/puma/cluster.rb +184 -198
  41. data/lib/puma/cluster/worker.rb +183 -0
  42. data/lib/puma/cluster/worker_handle.rb +90 -0
  43. data/lib/puma/commonlogger.rb +2 -2
  44. data/lib/puma/configuration.rb +55 -49
  45. data/lib/puma/const.rb +13 -5
  46. data/lib/puma/control_cli.rb +93 -76
  47. data/lib/puma/detect.rb +24 -3
  48. data/lib/puma/dsl.rb +266 -92
  49. data/lib/puma/error_logger.rb +104 -0
  50. data/lib/puma/events.rb +55 -34
  51. data/lib/puma/io_buffer.rb +9 -2
  52. data/lib/puma/jruby_restart.rb +0 -58
  53. data/lib/puma/json.rb +96 -0
  54. data/lib/puma/launcher.rb +113 -45
  55. data/lib/puma/minissl.rb +114 -33
  56. data/lib/puma/minissl/context_builder.rb +6 -3
  57. data/lib/puma/null_io.rb +13 -1
  58. data/lib/puma/plugin.rb +1 -10
  59. data/lib/puma/queue_close.rb +26 -0
  60. data/lib/puma/rack/builder.rb +0 -4
  61. data/lib/puma/reactor.rb +85 -369
  62. data/lib/puma/request.rb +467 -0
  63. data/lib/puma/runner.rb +29 -58
  64. data/lib/puma/server.rb +267 -729
  65. data/lib/puma/single.rb +9 -65
  66. data/lib/puma/state_file.rb +8 -3
  67. data/lib/puma/systemd.rb +46 -0
  68. data/lib/puma/thread_pool.rb +119 -53
  69. data/lib/puma/util.rb +12 -0
  70. data/lib/rack/handler/puma.rb +2 -3
  71. data/tools/{docker/Dockerfile → Dockerfile} +0 -0
  72. metadata +25 -21
  73. data/docs/tcp_mode.md +0 -96
  74. data/ext/puma_http11/io_buffer.c +0 -155
  75. data/ext/puma_http11/org/jruby/puma/IOBuffer.java +0 -72
  76. data/lib/puma/accept_nonblock.rb +0 -29
  77. data/lib/puma/tcp_logger.rb +0 -41
  78. data/tools/jungle/README.md +0 -19
  79. data/tools/jungle/init.d/README.md +0 -61
  80. data/tools/jungle/init.d/puma +0 -421
  81. data/tools/jungle/init.d/run-puma +0 -18
  82. data/tools/jungle/upstart/README.md +0 -61
  83. data/tools/jungle/upstart/puma-manager.conf +0 -31
  84. data/tools/jungle/upstart/puma.conf +0 -69
data/lib/puma/single.rb CHANGED
@@ -13,12 +13,11 @@ module Puma
13
13
  # gets created via the `start_server` method from the `Puma::Runner` class
14
14
  # that this inherits from.
15
15
  class Single < Runner
16
+ # @!attribute [r] stats
16
17
  def stats
17
- b = @server.backlog || 0
18
- r = @server.running || 0
19
- t = @server.pool_capacity || 0
20
- m = @server.max_threads || 0
21
- %Q!{ "started_at": "#{@started_at.utc.iso8601}", "backlog": #{b}, "running": #{r}, "pool_capacity": #{t}, "max_threads": #{m} }!
18
+ {
19
+ started_at: @started_at.utc.iso8601
20
+ }.merge(@server.stats)
22
21
  end
23
22
 
24
23
  def restart
@@ -39,64 +38,10 @@ module Puma
39
38
  @server.stop(true) if @server
40
39
  end
41
40
 
42
- def jruby_daemon?
43
- daemon? and Puma.jruby?
44
- end
45
-
46
- def jruby_daemon_start
47
- require 'puma/jruby_restart'
48
- JRubyRestart.daemon_start(@restart_dir, @launcher.restart_args)
49
- end
50
-
51
41
  def run
52
- already_daemon = false
53
-
54
- if jruby_daemon?
55
- require 'puma/jruby_restart'
56
-
57
- if JRubyRestart.daemon?
58
- # load and bind before redirecting IO so errors show up on stdout/stderr
59
- load_and_bind
60
- redirect_io
61
- end
62
-
63
- already_daemon = JRubyRestart.daemon_init
64
- end
65
-
66
42
  output_header "single"
67
43
 
68
- if jruby_daemon?
69
- if already_daemon
70
- JRubyRestart.perm_daemonize
71
- else
72
- pid = nil
73
-
74
- Signal.trap "SIGUSR2" do
75
- log "* Started new process #{pid} as daemon..."
76
-
77
- # Must use exit! so we don't unwind and run the ensures
78
- # that will be run by the new child (such as deleting the
79
- # pidfile)
80
- exit!(true)
81
- end
82
-
83
- Signal.trap "SIGCHLD" do
84
- log "! Error starting new process as daemon, exiting"
85
- exit 1
86
- end
87
-
88
- jruby_daemon_start
89
- sleep
90
- end
91
- else
92
- if daemon?
93
- log "* Daemonizing..."
94
- Process.daemon(true)
95
- redirect_io
96
- end
97
-
98
- load_and_bind
99
- end
44
+ load_and_bind
100
45
 
101
46
  Plugins.fire_background
102
47
 
@@ -105,16 +50,15 @@ module Puma
105
50
  start_control
106
51
 
107
52
  @server = server = start_server
53
+ server_thread = server.run
108
54
 
109
- unless daemon?
110
- log "Use Ctrl-C to stop"
111
- redirect_io
112
- end
55
+ log "Use Ctrl-C to stop"
56
+ redirect_io
113
57
 
114
58
  @launcher.events.fire_on_booted!
115
59
 
116
60
  begin
117
- server.run.join
61
+ server_thread.join
118
62
  rescue Interrupt
119
63
  # Swallow it
120
64
  end
@@ -8,15 +8,20 @@ module Puma
8
8
  @options = {}
9
9
  end
10
10
 
11
- def save(path)
12
- File.write path, YAML.dump(@options)
11
+ def save(path, permission = nil)
12
+ contents =YAML.dump @options
13
+ if permission
14
+ File.write path, contents, mode: 'wb:UTF-8'
15
+ else
16
+ File.write path, contents, mode: 'wb:UTF-8', perm: permission
17
+ end
13
18
  end
14
19
 
15
20
  def load(path)
16
21
  @options = YAML.load File.read(path)
17
22
  end
18
23
 
19
- FIELDS = %w!control_url control_auth_token pid!
24
+ FIELDS = %w!control_url control_auth_token pid running_from!
20
25
 
21
26
  FIELDS.each do |f|
22
27
  define_method f do
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'sd_notify'
4
+
5
+ module Puma
6
+ class Systemd
7
+ def initialize(events)
8
+ @events = events
9
+ end
10
+
11
+ def hook_events
12
+ @events.on_booted { SdNotify.ready }
13
+ @events.on_stopped { SdNotify.stopping }
14
+ @events.on_restart { SdNotify.reloading }
15
+ end
16
+
17
+ def start_watchdog
18
+ return unless SdNotify.watchdog?
19
+
20
+ ping_f = watchdog_sleep_time
21
+
22
+ log "Pinging systemd watchdog every #{ping_f.round(1)} sec"
23
+ Thread.new do
24
+ loop do
25
+ sleep ping_f
26
+ SdNotify.watchdog
27
+ end
28
+ end
29
+ end
30
+
31
+ private
32
+
33
+ def watchdog_sleep_time
34
+ usec = Integer(ENV["WATCHDOG_USEC"])
35
+
36
+ sec_f = usec / 1_000_000.0
37
+ # "It is recommended that a daemon sends a keep-alive notification message
38
+ # to the service manager every half of the time returned here."
39
+ sec_f / 2
40
+ end
41
+
42
+ def log(str)
43
+ @events.log str
44
+ end
45
+ end
46
+ end
@@ -13,7 +13,7 @@ module Puma
13
13
  # a thread pool via the `Puma::ThreadPool#<<` operator where it is stored in a `@todo` array.
14
14
  #
15
15
  # Each thread in the pool has an internal loop where it pulls a request from the `@todo` array
16
- # and proceses it.
16
+ # and processes it.
17
17
  class ThreadPool
18
18
  class ForceShutdown < RuntimeError
19
19
  end
@@ -47,6 +47,7 @@ module Puma
47
47
  @shutdown = false
48
48
 
49
49
  @trim_requested = 0
50
+ @out_of_band_pending = false
50
51
 
51
52
  @workers = []
52
53
 
@@ -54,14 +55,20 @@ module Puma
54
55
  @reaper = nil
55
56
 
56
57
  @mutex.synchronize do
57
- @min.times { spawn_thread }
58
+ @min.times do
59
+ spawn_thread
60
+ @not_full.wait(@mutex)
61
+ end
58
62
  end
59
63
 
60
64
  @clean_thread_locals = false
65
+ @force_shutdown = false
66
+ @shutdown_mutex = Mutex.new
61
67
  end
62
68
 
63
69
  attr_reader :spawned, :trim_requested, :waiting
64
70
  attr_accessor :clean_thread_locals
71
+ attr_accessor :out_of_band_hook # @version 5.0.0
65
72
 
66
73
  def self.clean_thread_locals
67
74
  Thread.current.keys.each do |key| # rubocop: disable Performance/HashEachMethods
@@ -72,13 +79,20 @@ module Puma
72
79
  # How many objects have yet to be processed by the pool?
73
80
  #
74
81
  def backlog
75
- @mutex.synchronize { @todo.size }
82
+ with_mutex { @todo.size }
76
83
  end
77
84
 
85
+ # @!attribute [r] pool_capacity
78
86
  def pool_capacity
79
87
  waiting + (@max - spawned)
80
88
  end
81
89
 
90
+ # @!attribute [r] busy_threads
91
+ # @version 5.0.0
92
+ def busy_threads
93
+ with_mutex { @spawned - @waiting + @todo.size }
94
+ end
95
+
82
96
  # :nodoc:
83
97
  #
84
98
  # Must be called with @mutex held!
@@ -99,48 +113,40 @@ module Puma
99
113
  while true
100
114
  work = nil
101
115
 
102
- continue = true
103
-
104
116
  mutex.synchronize do
105
117
  while todo.empty?
106
118
  if @trim_requested > 0
107
119
  @trim_requested -= 1
108
- continue = false
109
- not_full.signal
110
- break
111
- end
112
-
113
- if @shutdown
114
- continue = false
115
- break
120
+ @spawned -= 1
121
+ @workers.delete th
122
+ Thread.exit
116
123
  end
117
124
 
118
125
  @waiting += 1
126
+ if @out_of_band_pending && trigger_out_of_band_hook
127
+ @out_of_band_pending = false
128
+ end
119
129
  not_full.signal
120
- not_empty.wait mutex
121
- @waiting -= 1
130
+ begin
131
+ not_empty.wait mutex
132
+ ensure
133
+ @waiting -= 1
134
+ end
122
135
  end
123
136
 
124
- work = todo.shift if continue
137
+ work = todo.shift
125
138
  end
126
139
 
127
- break unless continue
128
-
129
140
  if @clean_thread_locals
130
141
  ThreadPool.clean_thread_locals
131
142
  end
132
143
 
133
144
  begin
134
- block.call(work, *extra)
145
+ @out_of_band_pending = true if block.call(work, *extra)
135
146
  rescue Exception => e
136
147
  STDERR.puts "Error reached top of thread-pool: #{e.message} (#{e.class})"
137
148
  end
138
149
  end
139
-
140
- mutex.synchronize do
141
- @spawned -= 1
142
- @workers.delete th
143
- end
144
150
  end
145
151
 
146
152
  @workers << th
@@ -150,9 +156,32 @@ module Puma
150
156
 
151
157
  private :spawn_thread
152
158
 
159
+ # @version 5.0.0
160
+ def trigger_out_of_band_hook
161
+ return false unless out_of_band_hook && out_of_band_hook.any?
162
+
163
+ # we execute on idle hook when all threads are free
164
+ return false unless @spawned == @waiting
165
+
166
+ out_of_band_hook.each(&:call)
167
+ true
168
+ rescue Exception => e
169
+ STDERR.puts "Exception calling out_of_band_hook: #{e.message} (#{e.class})"
170
+ true
171
+ end
172
+
173
+ private :trigger_out_of_band_hook
174
+
175
+ # @version 5.0.0
176
+ def with_mutex(&block)
177
+ @mutex.owned? ?
178
+ yield :
179
+ @mutex.synchronize(&block)
180
+ end
181
+
153
182
  # Add +work+ to the todo list for a Thread to pickup and process.
154
183
  def <<(work)
155
- @mutex.synchronize do
184
+ with_mutex do
156
185
  if @shutdown
157
186
  raise "Unable to add work while shutting down"
158
187
  end
@@ -191,13 +220,10 @@ module Puma
191
220
  # then the `@todo` array would stay the same size as the reactor works
192
221
  # to try to buffer the request. In that scenario the next call to this
193
222
  # method would not block and another request would be added into the reactor
194
- # by the server. This would continue until a fully bufferend request
223
+ # by the server. This would continue until a fully buffered request
195
224
  # makes it through the reactor and can then be processed by the thread pool.
196
- #
197
- # Returns the current number of busy threads, or +nil+ if shutting down.
198
- #
199
225
  def wait_until_not_full
200
- @mutex.synchronize do
226
+ with_mutex do
201
227
  while true
202
228
  return if @shutdown
203
229
 
@@ -205,21 +231,42 @@ module Puma
205
231
  # is work queued that cannot be handled by waiting
206
232
  # threads, then accept more work until we would
207
233
  # spin up the max number of threads.
208
- busy_threads = @spawned - @waiting + @todo.size
209
- return busy_threads if @max > busy_threads
234
+ return if busy_threads < @max
210
235
 
211
236
  @not_full.wait @mutex
212
237
  end
213
238
  end
214
239
  end
215
240
 
216
- # If too many threads are in the pool, tell one to finish go ahead
241
+ # @version 5.0.0
242
+ def wait_for_less_busy_worker(delay_s)
243
+ return unless delay_s && delay_s > 0
244
+
245
+ # Ruby MRI does GVL, this can result
246
+ # in processing contention when multiple threads
247
+ # (requests) are running concurrently
248
+ return unless Puma.mri?
249
+
250
+ with_mutex do
251
+ return if @shutdown
252
+
253
+ # do not delay, if we are not busy
254
+ return unless busy_threads > 0
255
+
256
+ # this will be signaled once a request finishes,
257
+ # which can happen earlier than delay
258
+ @not_full.wait @mutex, delay_s
259
+ end
260
+ end
261
+
262
+ # If there are any free threads in the pool, tell one to go ahead
217
263
  # and exit. If +force+ is true, then a trim request is requested
218
264
  # even if all threads are being utilized.
219
265
  #
220
266
  def trim(force=false)
221
- @mutex.synchronize do
222
- if (force or @waiting > 0) and @spawned - @trim_requested > @min
267
+ with_mutex do
268
+ free = @waiting - @todo.size
269
+ if (force or free > 0) and @spawned - @trim_requested > @min
223
270
  @trim_requested += 1
224
271
  @not_empty.signal
225
272
  end
@@ -229,7 +276,7 @@ module Puma
229
276
  # If there are dead threads in the pool make them go away while decreasing
230
277
  # spawned counter so that new healthy threads could be created again.
231
278
  def reap
232
- @mutex.synchronize do
279
+ with_mutex do
233
280
  dead_workers = @workers.reject(&:alive?)
234
281
 
235
282
  dead_workers.each do |worker|
@@ -280,11 +327,28 @@ module Puma
280
327
  @reaper.start!
281
328
  end
282
329
 
330
+ # Allows ThreadPool::ForceShutdown to be raised within the
331
+ # provided block if the thread is forced to shutdown during execution.
332
+ def with_force_shutdown
333
+ t = Thread.current
334
+ @shutdown_mutex.synchronize do
335
+ raise ForceShutdown if @force_shutdown
336
+ t[:with_force_shutdown] = true
337
+ end
338
+ yield
339
+ ensure
340
+ t[:with_force_shutdown] = false
341
+ end
342
+
283
343
  # Tell all threads in the pool to exit and wait for them to finish.
344
+ # Wait +timeout+ seconds then raise +ForceShutdown+ in remaining threads.
345
+ # Next, wait an extra +grace+ seconds then force-kill remaining threads.
346
+ # Finally, wait +kill_grace+ seconds for remaining threads to exit.
284
347
  #
285
348
  def shutdown(timeout=-1)
286
- threads = @mutex.synchronize do
349
+ threads = with_mutex do
287
350
  @shutdown = true
351
+ @trim_requested = @spawned
288
352
  @not_empty.broadcast
289
353
  @not_full.broadcast
290
354
 
@@ -298,27 +362,29 @@ module Puma
298
362
  # Wait for threads to finish without force shutdown.
299
363
  threads.each(&:join)
300
364
  else
301
- # Wait for threads to finish after n attempts (+timeout+).
302
- # If threads are still running, it will forcefully kill them.
303
- timeout.times do
304
- threads.delete_if do |t|
305
- t.join 1
306
- end
307
-
308
- if threads.empty?
309
- break
310
- else
311
- sleep 1
365
+ join = ->(inner_timeout) do
366
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
367
+ threads.reject! do |t|
368
+ elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
369
+ t.join inner_timeout - elapsed
312
370
  end
313
371
  end
314
372
 
315
- threads.each do |t|
316
- t.raise ForceShutdown
317
- end
373
+ # Wait +timeout+ seconds for threads to finish.
374
+ join.call(timeout)
318
375
 
319
- threads.each do |t|
320
- t.join SHUTDOWN_GRACE_TIME
376
+ # If threads are still running, raise ForceShutdown and wait to finish.
377
+ @shutdown_mutex.synchronize do
378
+ @force_shutdown = true
379
+ threads.each do |t|
380
+ t.raise ForceShutdown if t[:with_force_shutdown]
381
+ end
321
382
  end
383
+ join.call(SHUTDOWN_GRACE_TIME)
384
+
385
+ # If threads are _still_ running, forcefully kill them and wait to finish.
386
+ threads.each(&:kill)
387
+ join.call(1)
322
388
  end
323
389
 
324
390
  @spawned = 0