puma 5.1.1 → 5.3.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of puma might be problematic. Click here for more details.

Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/History.md +131 -10
  3. data/README.md +24 -2
  4. data/docs/architecture.md +22 -18
  5. data/docs/compile_options.md +6 -6
  6. data/docs/deployment.md +2 -2
  7. data/docs/jungle/rc.d/README.md +1 -1
  8. data/docs/kubernetes.md +66 -0
  9. data/docs/plugins.md +2 -2
  10. data/docs/rails_dev_mode.md +29 -0
  11. data/docs/restart.md +1 -1
  12. data/docs/stats.md +142 -0
  13. data/docs/systemd.md +1 -1
  14. data/ext/puma_http11/extconf.rb +14 -0
  15. data/ext/puma_http11/http11_parser.c +19 -21
  16. data/ext/puma_http11/http11_parser.h +1 -1
  17. data/ext/puma_http11/http11_parser.java.rl +1 -1
  18. data/ext/puma_http11/http11_parser.rl +1 -1
  19. data/ext/puma_http11/mini_ssl.c +162 -84
  20. data/ext/puma_http11/org/jruby/puma/Http11Parser.java +5 -7
  21. data/ext/puma_http11/puma_http11.c +2 -2
  22. data/lib/puma.rb +34 -8
  23. data/lib/puma/binder.rb +50 -43
  24. data/lib/puma/client.rb +5 -3
  25. data/lib/puma/cluster.rb +40 -8
  26. data/lib/puma/cluster/worker_handle.rb +4 -0
  27. data/lib/puma/configuration.rb +4 -1
  28. data/lib/puma/const.rb +3 -3
  29. data/lib/puma/control_cli.rb +5 -1
  30. data/lib/puma/detect.rb +14 -10
  31. data/lib/puma/dsl.rb +56 -4
  32. data/lib/puma/error_logger.rb +12 -5
  33. data/lib/puma/events.rb +2 -3
  34. data/lib/puma/launcher.rb +4 -3
  35. data/lib/puma/minissl.rb +48 -17
  36. data/lib/puma/minissl/context_builder.rb +6 -0
  37. data/lib/puma/null_io.rb +12 -0
  38. data/lib/puma/queue_close.rb +7 -7
  39. data/lib/puma/reactor.rb +7 -2
  40. data/lib/puma/request.rb +9 -4
  41. data/lib/puma/runner.rb +8 -3
  42. data/lib/puma/server.rb +46 -112
  43. data/lib/puma/thread_pool.rb +4 -3
  44. data/lib/rack/handler/puma.rb +1 -0
  45. metadata +6 -3
@@ -62,6 +62,12 @@ module Puma
62
62
  end
63
63
  end
64
64
 
65
+ if params['verification_flags']
66
+ ctx.verification_flags = params['verification_flags'].split(',').
67
+ map { |flag| MiniSSL::VERIFICATION_FLAGS.fetch(flag) }.
68
+ inject { |sum, flag| sum ? sum | flag : flag }
69
+ end
70
+
65
71
  ctx
66
72
  end
67
73
 
data/lib/puma/null_io.rb CHANGED
@@ -9,6 +9,10 @@ module Puma
9
9
  nil
10
10
  end
11
11
 
12
+ def string
13
+ ""
14
+ end
15
+
12
16
  def each
13
17
  end
14
18
 
@@ -32,6 +36,10 @@ module Puma
32
36
  true
33
37
  end
34
38
 
39
+ def sync
40
+ true
41
+ end
42
+
35
43
  def sync=(v)
36
44
  end
37
45
 
@@ -40,5 +48,9 @@ module Puma
40
48
 
41
49
  def write(*ary)
42
50
  end
51
+
52
+ def flush
53
+ self
54
+ end
43
55
  end
44
56
  end
@@ -5,22 +5,22 @@ module Puma
5
5
  # Add a simple implementation for earlier Ruby versions.
6
6
  #
7
7
  module QueueClose
8
- def initialize
9
- @closed = false
10
- super
11
- end
12
8
  def close
9
+ num_waiting.times {push nil}
13
10
  @closed = true
14
11
  end
15
12
  def closed?
16
- @closed
13
+ @closed ||= false
17
14
  end
18
15
  def push(object)
19
- @closed ||= false
20
- raise ClosedQueueError if @closed
16
+ raise ClosedQueueError if closed?
21
17
  super
22
18
  end
23
19
  alias << push
20
+ def pop(non_block=false)
21
+ return nil if !non_block && closed? && empty?
22
+ super
23
+ end
24
24
  end
25
25
  ::Queue.prepend QueueClose
26
26
  end
data/lib/puma/reactor.rb CHANGED
@@ -3,6 +3,8 @@
3
3
  require 'puma/queue_close' unless ::Queue.instance_methods.include? :close
4
4
 
5
5
  module Puma
6
+ class UnsupportedBackend < StandardError; end
7
+
6
8
  # Monitors a collection of IO objects, calling a block whenever
7
9
  # any monitored object either receives data or times out, or when the Reactor shuts down.
8
10
  #
@@ -18,9 +20,12 @@ module Puma
18
20
  # Create a new Reactor to monitor IO objects added by #add.
19
21
  # The provided block will be invoked when an IO has data available to read,
20
22
  # its timeout elapses, or when the Reactor shuts down.
21
- def initialize(&block)
23
+ def initialize(backend, &block)
22
24
  require 'nio'
23
- @selector = NIO::Selector.new
25
+ unless backend == :auto || NIO::Selector.backends.include?(backend)
26
+ raise "unsupported IO selector backend: #{backend} (available backends: #{NIO::Selector.backends.join(', ')})"
27
+ end
28
+ @selector = backend == :auto ? NIO::Selector.new : NIO::Selector.new(backend)
24
29
  @input = Queue.new
25
30
  @timeouts = []
26
31
  @block = block
data/lib/puma/request.rb CHANGED
@@ -30,7 +30,7 @@ module Puma
30
30
  #
31
31
  def handle_request(client, lines)
32
32
  env = client.env
33
- io = client.io
33
+ io = client.io # io may be a MiniSSL::Socket
34
34
 
35
35
  return false if closed_socket?(io)
36
36
 
@@ -148,8 +148,9 @@ module Puma
148
148
  res_body.each do |part|
149
149
  next if part.bytesize.zero?
150
150
  if chunked
151
- str = part.bytesize.to_s(16) << line_ending << part << line_ending
152
- fast_write io, str
151
+ fast_write io, (part.bytesize.to_s(16) << line_ending)
152
+ fast_write io, part # part may have different encoding
153
+ fast_write io, line_ending
153
154
  else
154
155
  fast_write io, part
155
156
  end
@@ -230,7 +231,11 @@ module Puma
230
231
  #
231
232
  def normalize_env(env, client)
232
233
  if host = env[HTTP_HOST]
233
- if colon = host.index(":")
234
+ # host can be a hostname, ipv4 or bracketed ipv6. Followed by an optional port.
235
+ if colon = host.rindex("]:") # IPV6 with port
236
+ env[SERVER_NAME] = host[0, colon+1]
237
+ env[SERVER_PORT] = host[colon+2, host.bytesize]
238
+ elsif !host.start_with?("[") && colon = host.index(":") # not hostname or IPV4 with port
234
239
  env[SERVER_NAME] = host[0, colon]
235
240
  env[SERVER_PORT] = host[colon+1, host.bytesize]
236
241
  else
data/lib/puma/runner.rb CHANGED
@@ -55,7 +55,7 @@ module Puma
55
55
  app = Puma::App::Status.new @launcher, token
56
56
 
57
57
  control = Puma::Server.new app, @launcher.events,
58
- { min_threads: 0, max_threads: 1 }
58
+ { min_threads: 0, max_threads: 1, queue_requests: false }
59
59
 
60
60
  control.binder.parse [str], self, 'Starting control server'
61
61
 
@@ -113,8 +113,8 @@ module Puma
113
113
  end
114
114
 
115
115
  STDOUT.reopen stdout, (append ? "a" : "w")
116
- STDOUT.sync = true
117
116
  STDOUT.puts "=== puma startup: #{Time.now} ==="
117
+ STDOUT.flush unless STDOUT.sync
118
118
  end
119
119
 
120
120
  if stderr
@@ -123,8 +123,13 @@ module Puma
123
123
  end
124
124
 
125
125
  STDERR.reopen stderr, (append ? "a" : "w")
126
- STDERR.sync = true
127
126
  STDERR.puts "=== puma startup: #{Time.now} ==="
127
+ STDERR.flush unless STDERR.sync
128
+ end
129
+
130
+ if @options[:mutate_stdout_and_stderr_to_sync_on_write]
131
+ STDOUT.sync = true
132
+ STDERR.sync = true
128
133
  end
129
134
  end
130
135
 
data/lib/puma/server.rb CHANGED
@@ -84,13 +84,14 @@ module Puma
84
84
 
85
85
  @options = options
86
86
 
87
- @early_hints = options.fetch :early_hints, nil
88
- @first_data_timeout = options.fetch :first_data_timeout, FIRST_DATA_TIMEOUT
89
- @min_threads = options.fetch :min_threads, 0
90
- @max_threads = options.fetch :max_threads , (Puma.mri? ? 5 : 16)
91
- @persistent_timeout = options.fetch :persistent_timeout, PERSISTENT_TIMEOUT
92
- @queue_requests = options.fetch :queue_requests, true
93
- @max_fast_inline = options.fetch :max_fast_inline, MAX_FAST_INLINE
87
+ @early_hints = options.fetch :early_hints, nil
88
+ @first_data_timeout = options.fetch :first_data_timeout, FIRST_DATA_TIMEOUT
89
+ @min_threads = options.fetch :min_threads, 0
90
+ @max_threads = options.fetch :max_threads , (Puma.mri? ? 5 : 16)
91
+ @persistent_timeout = options.fetch :persistent_timeout, PERSISTENT_TIMEOUT
92
+ @queue_requests = options.fetch :queue_requests, true
93
+ @max_fast_inline = options.fetch :max_fast_inline, MAX_FAST_INLINE
94
+ @io_selector_backend = options.fetch :io_selector_backend, :auto
94
95
 
95
96
  temp = !!(@options[:environment] =~ /\A(development|test)\z/)
96
97
  @leak_stack_on_error = @options[:environment] ? temp : true
@@ -119,17 +120,13 @@ module Puma
119
120
  # :nodoc:
120
121
  # @version 5.0.0
121
122
  def tcp_cork_supported?
122
- RbConfig::CONFIG['host_os'] =~ /linux/ &&
123
- Socket.const_defined?(:IPPROTO_TCP) &&
124
- Socket.const_defined?(:TCP_CORK)
123
+ Socket.const_defined?(:TCP_CORK) && Socket.const_defined?(:IPPROTO_TCP)
125
124
  end
126
125
 
127
126
  # :nodoc:
128
127
  # @version 5.0.0
129
128
  def closed_socket_supported?
130
- RbConfig::CONFIG['host_os'] =~ /linux/ &&
131
- Socket.const_defined?(:IPPROTO_TCP) &&
132
- Socket.const_defined?(:TCP_INFO)
129
+ Socket.const_defined?(:TCP_INFO) && Socket.const_defined?(:IPPROTO_TCP)
133
130
  end
134
131
  private :tcp_cork_supported?
135
132
  private :closed_socket_supported?
@@ -137,24 +134,25 @@ module Puma
137
134
 
138
135
  # On Linux, use TCP_CORK to better control how the TCP stack
139
136
  # packetizes our stream. This improves both latency and throughput.
137
+ # socket parameter may be an MiniSSL::Socket, so use to_io
140
138
  #
141
139
  if tcp_cork_supported?
142
- UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze
143
-
144
140
  # 6 == Socket::IPPROTO_TCP
145
141
  # 3 == TCP_CORK
146
142
  # 1/0 == turn on/off
147
143
  def cork_socket(socket)
144
+ skt = socket.to_io
148
145
  begin
149
- socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 1) if socket.kind_of? TCPSocket
146
+ skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 1) if skt.kind_of? TCPSocket
150
147
  rescue IOError, SystemCallError
151
148
  Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
152
149
  end
153
150
  end
154
151
 
155
152
  def uncork_socket(socket)
153
+ skt = socket.to_io
156
154
  begin
157
- socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 0) if socket.kind_of? TCPSocket
155
+ skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 0) if skt.kind_of? TCPSocket
158
156
  rescue IOError, SystemCallError
159
157
  Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
160
158
  end
@@ -168,12 +166,14 @@ module Puma
168
166
  end
169
167
 
170
168
  if closed_socket_supported?
169
+ UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze
170
+
171
171
  def closed_socket?(socket)
172
- return false unless socket.kind_of? TCPSocket
173
- return false unless @precheck_closing
172
+ skt = socket.to_io
173
+ return false unless skt.kind_of?(TCPSocket) && @precheck_closing
174
174
 
175
175
  begin
176
- tcp_info = socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_INFO)
176
+ tcp_info = skt.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_INFO)
177
177
  rescue IOError, SystemCallError
178
178
  Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
179
179
  @precheck_closing = false
@@ -237,7 +237,7 @@ module Puma
237
237
  @thread_pool.clean_thread_locals = @options[:clean_thread_locals]
238
238
 
239
239
  if @queue_requests
240
- @reactor = Reactor.new(&method(:reactor_wakeup))
240
+ @reactor = Reactor.new(@io_selector_backend, &method(:reactor_wakeup))
241
241
  @reactor.run
242
242
  end
243
243
 
@@ -295,6 +295,9 @@ module Puma
295
295
  @thread_pool << client
296
296
  elsif shutdown || client.timeout == 0
297
297
  client.timeout!
298
+ else
299
+ client.set_timeout(@first_data_timeout)
300
+ false
298
301
  end
299
302
  rescue StandardError => e
300
303
  client_error(e, client)
@@ -308,6 +311,7 @@ module Puma
308
311
  sockets = [check] + @binder.ios
309
312
  pool = @thread_pool
310
313
  queue_requests = @queue_requests
314
+ drain = @options[:drain_on_shutdown] ? 0 : nil
311
315
 
312
316
  remote_addr_value = nil
313
317
  remote_addr_header = nil
@@ -319,22 +323,23 @@ module Puma
319
323
  remote_addr_header = @options[:remote_address_header]
320
324
  end
321
325
 
322
- while @status == :run
326
+ while @status == :run || (drain && shutting_down?)
323
327
  begin
324
- ios = IO.select sockets
328
+ ios = IO.select sockets, nil, nil, (shutting_down? ? 0 : nil)
329
+ break unless ios
325
330
  ios.first.each do |sock|
326
331
  if sock == check
327
332
  break if handle_check
328
333
  else
329
334
  pool.wait_until_not_full
330
- pool.wait_for_less_busy_worker(
331
- @options[:wait_for_less_busy_worker].to_f)
335
+ pool.wait_for_less_busy_worker(@options[:wait_for_less_busy_worker])
332
336
 
333
337
  io = begin
334
338
  sock.accept_nonblock
335
339
  rescue IO::WaitReadable
336
340
  next
337
341
  end
342
+ drain += 1 if shutting_down?
338
343
  client = Client.new io, @binder.env(sock)
339
344
  if remote_addr_value
340
345
  client.peerip = remote_addr_value
@@ -349,6 +354,7 @@ module Puma
349
354
  end
350
355
  end
351
356
 
357
+ @events.debug "Drained #{drain} additional connections." if drain
352
358
  @events.fire :state, @status
353
359
 
354
360
  if queue_requests
@@ -441,15 +447,20 @@ module Puma
441
447
 
442
448
  requests += 1
443
449
 
444
- check_for_more_data = @status == :run
450
+ # Closing keepalive sockets after they've made a reasonable
451
+ # number of requests allows Puma to service many connections
452
+ # fairly, even when the number of concurrent connections exceeds
453
+ # the size of the threadpool. It also allows cluster mode Pumas
454
+ # to keep load evenly distributed across workers, because clients
455
+ # are randomly assigned a new worker when opening a new connection.
456
+ #
457
+ # Previously, Puma would kick connections in this conditional back
458
+ # to the reactor. However, because this causes the todo set to increase
459
+ # in size, the wait_until_full mutex would never unlock, leaving
460
+ # any additional connections unserviced.
461
+ break if requests >= @max_fast_inline
445
462
 
446
- if requests >= @max_fast_inline
447
- # This will mean that reset will only try to use the data it already
448
- # has buffered and won't try to read more data. What this means is that
449
- # every client, independent of their request speed, gets treated like a slow
450
- # one once every max_fast_inline requests.
451
- check_for_more_data = false
452
- end
463
+ check_for_more_data = @status == :run
453
464
 
454
465
  next_request_ready = with_force_shutdown(client) do
455
466
  client.reset(check_for_more_data)
@@ -494,62 +505,6 @@ module Puma
494
505
 
495
506
  # :nocov:
496
507
 
497
- # Given the request +env+ from +client+ and the partial body +body+
498
- # plus a potential Content-Length value +cl+, finish reading
499
- # the body and return it.
500
- #
501
- # If the body is larger than MAX_BODY, a Tempfile object is used
502
- # for the body, otherwise a StringIO is used.
503
- # @deprecated 6.0.0
504
- #
505
- def read_body(env, client, body, cl)
506
- content_length = cl.to_i
507
-
508
- remain = content_length - body.bytesize
509
-
510
- return StringIO.new(body) if remain <= 0
511
-
512
- # Use a Tempfile if there is a lot of data left
513
- if remain > MAX_BODY
514
- stream = Tempfile.new(Const::PUMA_TMP_BASE)
515
- stream.binmode
516
- else
517
- # The body[0,0] trick is to get an empty string in the same
518
- # encoding as body.
519
- stream = StringIO.new body[0,0]
520
- end
521
-
522
- stream.write body
523
-
524
- # Read an odd sized chunk so we can read even sized ones
525
- # after this
526
- chunk = client.readpartial(remain % CHUNK_SIZE)
527
-
528
- # No chunk means a closed socket
529
- unless chunk
530
- stream.close
531
- return nil
532
- end
533
-
534
- remain -= stream.write(chunk)
535
-
536
- # Read the rest of the chunks
537
- while remain > 0
538
- chunk = client.readpartial(CHUNK_SIZE)
539
- unless chunk
540
- stream.close
541
- return nil
542
- end
543
-
544
- remain -= stream.write(chunk)
545
- end
546
-
547
- stream.rewind
548
-
549
- return stream
550
- end
551
- # :nocov:
552
-
553
508
  # Handle various error types thrown by Client I/O operations.
554
509
  def client_error(e, client)
555
510
  # Swallow, do not log
@@ -582,7 +537,8 @@ module Puma
582
537
  end
583
538
 
584
539
  if @leak_stack_on_error
585
- [status, {}, ["Puma caught this error: #{e.message} (#{e.class})\n#{e.backtrace.join("\n")}"]]
540
+ backtrace = e.backtrace.nil? ? '<no backtrace available>' : e.backtrace.join("\n")
541
+ [status, {}, ["Puma caught this error: #{e.message} (#{e.class})\n#{backtrace}"]]
586
542
  else
587
543
  [status, {}, ["An unhandled lowlevel error occurred. The application logs may have details.\n"]]
588
544
  end
@@ -606,28 +562,6 @@ module Puma
606
562
  $stdout.syswrite "#{pid}: === End thread backtrace dump ===\n"
607
563
  end
608
564
 
609
- if @options[:drain_on_shutdown]
610
- count = 0
611
-
612
- while true
613
- ios = IO.select @binder.ios, nil, nil, 0
614
- break unless ios
615
-
616
- ios.first.each do |sock|
617
- begin
618
- if io = sock.accept_nonblock
619
- count += 1
620
- client = Client.new io, @binder.env(sock)
621
- @thread_pool << client
622
- end
623
- rescue SystemCallError
624
- end
625
- end
626
- end
627
-
628
- @events.debug "Drained #{count} additional connections."
629
- end
630
-
631
565
  if @status != :restart
632
566
  @binder.close
633
567
  end
@@ -13,7 +13,7 @@ module Puma
13
13
  # a thread pool via the `Puma::ThreadPool#<<` operator where it is stored in a `@todo` array.
14
14
  #
15
15
  # Each thread in the pool has an internal loop where it pulls a request from the `@todo` array
16
- # and proceses it.
16
+ # and processes it.
17
17
  class ThreadPool
18
18
  class ForceShutdown < RuntimeError
19
19
  end
@@ -220,7 +220,7 @@ module Puma
220
220
  # then the `@todo` array would stay the same size as the reactor works
221
221
  # to try to buffer the request. In that scenario the next call to this
222
222
  # method would not block and another request would be added into the reactor
223
- # by the server. This would continue until a fully bufferend request
223
+ # by the server. This would continue until a fully buffered request
224
224
  # makes it through the reactor and can then be processed by the thread pool.
225
225
  def wait_until_not_full
226
226
  with_mutex do
@@ -240,11 +240,12 @@ module Puma
240
240
 
241
241
  # @version 5.0.0
242
242
  def wait_for_less_busy_worker(delay_s)
243
+ return unless delay_s && delay_s > 0
244
+
243
245
  # Ruby MRI does GVL, this can result
244
246
  # in processing contention when multiple threads
245
247
  # (requests) are running concurrently
246
248
  return unless Puma.mri?
247
- return unless delay_s > 0
248
249
 
249
250
  with_mutex do
250
251
  return if @shutdown