puma 4.3.12 → 6.3.1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +1729 -521
- data/LICENSE +23 -20
- data/README.md +169 -45
- data/bin/puma-wild +3 -9
- data/docs/architecture.md +63 -26
- data/docs/compile_options.md +55 -0
- data/docs/deployment.md +60 -69
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +9 -0
- data/{tools → docs}/jungle/rc.d/README.md +1 -1
- data/{tools → docs}/jungle/rc.d/puma +2 -2
- data/{tools → docs}/jungle/rc.d/puma.conf +0 -0
- data/docs/kubernetes.md +66 -0
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +15 -15
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +46 -23
- data/docs/signals.md +13 -11
- data/docs/stats.md +142 -0
- data/docs/systemd.md +84 -128
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -4
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +49 -12
- data/ext/puma_http11/http11_parser.c +46 -48
- data/ext/puma_http11/http11_parser.h +2 -2
- data/ext/puma_http11/http11_parser.java.rl +3 -3
- data/ext/puma_http11/http11_parser.rl +3 -3
- data/ext/puma_http11/http11_parser_common.rl +2 -2
- data/ext/puma_http11/mini_ssl.c +278 -93
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +6 -6
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +4 -6
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +241 -96
- data/ext/puma_http11/puma_http11.c +46 -57
- data/lib/puma/app/status.rb +53 -39
- data/lib/puma/binder.rb +237 -121
- data/lib/puma/cli.rb +34 -34
- data/lib/puma/client.rb +172 -98
- data/lib/puma/cluster/worker.rb +180 -0
- data/lib/puma/cluster/worker_handle.rb +97 -0
- data/lib/puma/cluster.rb +226 -231
- data/lib/puma/commonlogger.rb +21 -14
- data/lib/puma/configuration.rb +114 -87
- data/lib/puma/const.rb +139 -95
- data/lib/puma/control_cli.rb +99 -79
- data/lib/puma/detect.rb +33 -2
- data/lib/puma/dsl.rb +516 -110
- data/lib/puma/error_logger.rb +113 -0
- data/lib/puma/events.rb +16 -115
- data/lib/puma/io_buffer.rb +44 -2
- data/lib/puma/jruby_restart.rb +2 -59
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +164 -155
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +36 -19
- data/lib/puma/minissl.rb +230 -55
- data/lib/puma/null_io.rb +18 -1
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +1 -1
- data/lib/puma/plugin.rb +3 -12
- data/lib/puma/rack/builder.rb +7 -11
- data/lib/puma/rack/urlmap.rb +0 -0
- data/lib/puma/rack_default.rb +19 -4
- data/lib/puma/reactor.rb +93 -368
- data/lib/puma/request.rb +671 -0
- data/lib/puma/runner.rb +92 -75
- data/lib/puma/sd_notify.rb +149 -0
- data/lib/puma/server.rb +321 -794
- data/lib/puma/single.rb +20 -74
- data/lib/puma/state_file.rb +45 -8
- data/lib/puma/thread_pool.rb +140 -68
- data/lib/puma/util.rb +21 -4
- data/lib/puma.rb +54 -7
- data/lib/rack/handler/puma.rb +113 -87
- data/tools/{docker/Dockerfile → Dockerfile} +1 -1
- data/tools/trickletest.rb +0 -0
- metadata +33 -24
- data/docs/tcp_mode.md +0 -96
- data/ext/puma_http11/io_buffer.c +0 -155
- data/ext/puma_http11/org/jruby/puma/IOBuffer.java +0 -72
- data/lib/puma/accept_nonblock.rb +0 -29
- data/lib/puma/tcp_logger.rb +0 -41
- data/tools/jungle/README.md +0 -19
- data/tools/jungle/init.d/README.md +0 -61
- data/tools/jungle/init.d/puma +0 -421
- data/tools/jungle/init.d/run-puma +0 -18
- data/tools/jungle/upstart/README.md +0 -61
- data/tools/jungle/upstart/puma-manager.conf +0 -31
- data/tools/jungle/upstart/puma.conf +0 -69
data/lib/puma/server.rb
CHANGED
@@ -2,19 +2,19 @@
|
|
2
2
|
|
3
3
|
require 'stringio'
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
require 'puma/puma_http11'
|
5
|
+
require_relative 'thread_pool'
|
6
|
+
require_relative 'const'
|
7
|
+
require_relative 'log_writer'
|
8
|
+
require_relative 'events'
|
9
|
+
require_relative 'null_io'
|
10
|
+
require_relative 'reactor'
|
11
|
+
require_relative 'client'
|
12
|
+
require_relative 'binder'
|
13
|
+
require_relative 'util'
|
14
|
+
require_relative 'request'
|
16
15
|
|
17
16
|
require 'socket'
|
17
|
+
require 'io/wait' unless Puma::HAS_NATIVE_IO_WAIT
|
18
18
|
require 'forwardable'
|
19
19
|
|
20
20
|
module Puma
|
@@ -30,108 +30,168 @@ module Puma
|
|
30
30
|
#
|
31
31
|
# Each `Puma::Server` will have one reactor and one thread pool.
|
32
32
|
class Server
|
33
|
-
|
34
33
|
include Puma::Const
|
34
|
+
include Request
|
35
35
|
extend Forwardable
|
36
36
|
|
37
37
|
attr_reader :thread
|
38
|
+
attr_reader :log_writer
|
38
39
|
attr_reader :events
|
40
|
+
attr_reader :min_threads, :max_threads # for #stats
|
41
|
+
attr_reader :requests_count # @version 5.0.0
|
42
|
+
|
43
|
+
# @todo the following may be deprecated in the future
|
44
|
+
attr_reader :auto_trim_time, :early_hints, :first_data_timeout,
|
45
|
+
:leak_stack_on_error,
|
46
|
+
:persistent_timeout, :reaping_time
|
47
|
+
|
39
48
|
attr_accessor :app
|
49
|
+
attr_accessor :binder
|
40
50
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
attr_accessor :reaping_time
|
46
|
-
attr_accessor :first_data_timeout
|
51
|
+
def_delegators :@binder, :add_tcp_listener, :add_ssl_listener,
|
52
|
+
:add_unix_listener, :connected_ports
|
53
|
+
|
54
|
+
THREAD_LOCAL_KEY = :puma_server
|
47
55
|
|
48
56
|
# Create a server for the rack app +app+.
|
49
57
|
#
|
50
|
-
# +
|
51
|
-
#
|
58
|
+
# +log_writer+ is a Puma::LogWriter object used to log info and error messages.
|
59
|
+
#
|
60
|
+
# +events+ is a Puma::Events object used to notify application status events.
|
52
61
|
#
|
53
62
|
# Server#run returns a thread that you can join on to wait for the server
|
54
63
|
# to do its work.
|
55
64
|
#
|
56
|
-
|
65
|
+
# @note Several instance variables exist so they are available for testing,
|
66
|
+
# and have default values set via +fetch+. Normally the values are set via
|
67
|
+
# `::Puma::Configuration.puma_default_options`.
|
68
|
+
#
|
69
|
+
# @note The `events` parameter is set to nil, and set to `Events.new` in code.
|
70
|
+
# Often `options` needs to be passed, but `events` does not. Using nil allows
|
71
|
+
# calling code to not require events.rb.
|
72
|
+
#
|
73
|
+
def initialize(app, events = nil, options = {})
|
57
74
|
@app = app
|
58
|
-
@events = events
|
59
|
-
|
60
|
-
@check, @notify = Puma::Util.pipe
|
75
|
+
@events = events || Events.new
|
61
76
|
|
77
|
+
@check, @notify = nil
|
62
78
|
@status = :stop
|
63
79
|
|
64
|
-
@min_threads = 0
|
65
|
-
@max_threads = 16
|
66
|
-
@auto_trim_time = 30
|
67
|
-
@reaping_time = 1
|
68
|
-
|
69
80
|
@thread = nil
|
70
81
|
@thread_pool = nil
|
71
|
-
@early_hints = nil
|
72
82
|
|
73
|
-
@
|
74
|
-
|
75
|
-
|
76
|
-
|
83
|
+
@options = if options.is_a?(UserFileDefaultOptions)
|
84
|
+
options
|
85
|
+
else
|
86
|
+
UserFileDefaultOptions.new(options, Configuration::DEFAULTS)
|
87
|
+
end
|
88
|
+
|
89
|
+
@log_writer = @options.fetch :log_writer, LogWriter.stdio
|
90
|
+
@early_hints = @options[:early_hints]
|
91
|
+
@first_data_timeout = @options[:first_data_timeout]
|
92
|
+
@min_threads = @options[:min_threads]
|
93
|
+
@max_threads = @options[:max_threads]
|
94
|
+
@persistent_timeout = @options[:persistent_timeout]
|
95
|
+
@queue_requests = @options[:queue_requests]
|
96
|
+
@max_fast_inline = @options[:max_fast_inline]
|
97
|
+
@io_selector_backend = @options[:io_selector_backend]
|
98
|
+
@http_content_length_limit = @options[:http_content_length_limit]
|
99
|
+
|
100
|
+
# make this a hash, since we prefer `key?` over `include?`
|
101
|
+
@supported_http_methods =
|
102
|
+
if @options[:supported_http_methods] == :any
|
103
|
+
:any
|
104
|
+
else
|
105
|
+
if (ary = @options[:supported_http_methods])
|
106
|
+
ary
|
107
|
+
else
|
108
|
+
SUPPORTED_HTTP_METHODS
|
109
|
+
end.sort.product([nil]).to_h.freeze
|
110
|
+
end
|
77
111
|
|
78
|
-
|
112
|
+
temp = !!(@options[:environment] =~ /\A(development|test)\z/)
|
113
|
+
@leak_stack_on_error = @options[:environment] ? temp : true
|
79
114
|
|
80
|
-
@
|
81
|
-
@queue_requests = options[:queue_requests].nil? ? true : options[:queue_requests]
|
115
|
+
@binder = Binder.new(log_writer)
|
82
116
|
|
83
117
|
ENV['RACK_ENV'] ||= "development"
|
84
118
|
|
85
119
|
@mode = :http
|
86
120
|
|
87
121
|
@precheck_closing = true
|
88
|
-
end
|
89
|
-
|
90
|
-
attr_accessor :binder, :leak_stack_on_error, :early_hints
|
91
122
|
|
92
|
-
|
123
|
+
@requests_count = 0
|
124
|
+
end
|
93
125
|
|
94
126
|
def inherit_binder(bind)
|
95
127
|
@binder = bind
|
96
128
|
end
|
97
129
|
|
98
|
-
|
99
|
-
|
130
|
+
class << self
|
131
|
+
# @!attribute [r] current
|
132
|
+
def current
|
133
|
+
Thread.current[THREAD_LOCAL_KEY]
|
134
|
+
end
|
135
|
+
|
136
|
+
# :nodoc:
|
137
|
+
# @version 5.0.0
|
138
|
+
def tcp_cork_supported?
|
139
|
+
Socket.const_defined?(:TCP_CORK) && Socket.const_defined?(:IPPROTO_TCP)
|
140
|
+
end
|
141
|
+
|
142
|
+
# :nodoc:
|
143
|
+
# @version 5.0.0
|
144
|
+
def closed_socket_supported?
|
145
|
+
Socket.const_defined?(:TCP_INFO) && Socket.const_defined?(:IPPROTO_TCP)
|
146
|
+
end
|
147
|
+
private :tcp_cork_supported?
|
148
|
+
private :closed_socket_supported?
|
100
149
|
end
|
101
150
|
|
102
151
|
# On Linux, use TCP_CORK to better control how the TCP stack
|
103
152
|
# packetizes our stream. This improves both latency and throughput.
|
153
|
+
# socket parameter may be an MiniSSL::Socket, so use to_io
|
104
154
|
#
|
105
|
-
if
|
106
|
-
UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze
|
107
|
-
|
155
|
+
if tcp_cork_supported?
|
108
156
|
# 6 == Socket::IPPROTO_TCP
|
109
157
|
# 3 == TCP_CORK
|
110
158
|
# 1/0 == turn on/off
|
111
159
|
def cork_socket(socket)
|
160
|
+
skt = socket.to_io
|
112
161
|
begin
|
113
|
-
|
162
|
+
skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 1) if skt.kind_of? TCPSocket
|
114
163
|
rescue IOError, SystemCallError
|
115
|
-
|
164
|
+
Puma::Util.purge_interrupt_queue
|
116
165
|
end
|
117
166
|
end
|
118
167
|
|
119
168
|
def uncork_socket(socket)
|
169
|
+
skt = socket.to_io
|
120
170
|
begin
|
121
|
-
|
171
|
+
skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 0) if skt.kind_of? TCPSocket
|
122
172
|
rescue IOError, SystemCallError
|
123
|
-
|
173
|
+
Puma::Util.purge_interrupt_queue
|
124
174
|
end
|
125
175
|
end
|
176
|
+
else
|
177
|
+
def cork_socket(socket)
|
178
|
+
end
|
179
|
+
|
180
|
+
def uncork_socket(socket)
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
if closed_socket_supported?
|
185
|
+
UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze
|
126
186
|
|
127
187
|
def closed_socket?(socket)
|
128
|
-
|
129
|
-
return false unless @precheck_closing
|
188
|
+
skt = socket.to_io
|
189
|
+
return false unless skt.kind_of?(TCPSocket) && @precheck_closing
|
130
190
|
|
131
191
|
begin
|
132
|
-
tcp_info =
|
192
|
+
tcp_info = skt.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_INFO)
|
133
193
|
rescue IOError, SystemCallError
|
134
|
-
|
194
|
+
Puma::Util.purge_interrupt_queue
|
135
195
|
@precheck_closing = false
|
136
196
|
false
|
137
197
|
else
|
@@ -141,23 +201,19 @@ module Puma
|
|
141
201
|
end
|
142
202
|
end
|
143
203
|
else
|
144
|
-
def cork_socket(socket)
|
145
|
-
end
|
146
|
-
|
147
|
-
def uncork_socket(socket)
|
148
|
-
end
|
149
|
-
|
150
204
|
def closed_socket?(socket)
|
151
205
|
false
|
152
206
|
end
|
153
207
|
end
|
154
208
|
|
209
|
+
# @!attribute [r] backlog
|
155
210
|
def backlog
|
156
|
-
@thread_pool
|
211
|
+
@thread_pool&.backlog
|
157
212
|
end
|
158
213
|
|
214
|
+
# @!attribute [r] running
|
159
215
|
def running
|
160
|
-
@thread_pool
|
216
|
+
@thread_pool&.spawned
|
161
217
|
end
|
162
218
|
|
163
219
|
|
@@ -168,195 +224,42 @@ module Puma
|
|
168
224
|
# there are 5 threads sitting idle ready to take
|
169
225
|
# a request. If one request comes in, then the
|
170
226
|
# value would be 4 until it finishes processing.
|
227
|
+
# @!attribute [r] pool_capacity
|
171
228
|
def pool_capacity
|
172
|
-
@thread_pool
|
173
|
-
end
|
174
|
-
|
175
|
-
# Lopez Mode == raw tcp apps
|
176
|
-
|
177
|
-
def run_lopez_mode(background=true)
|
178
|
-
@thread_pool = ThreadPool.new(@min_threads,
|
179
|
-
@max_threads,
|
180
|
-
Hash) do |client, tl|
|
181
|
-
|
182
|
-
io = client.to_io
|
183
|
-
addr = io.peeraddr.last
|
184
|
-
|
185
|
-
if addr.empty?
|
186
|
-
# Set unix socket addrs to localhost
|
187
|
-
addr = "127.0.0.1:0"
|
188
|
-
else
|
189
|
-
addr = "#{addr}:#{io.peeraddr[1]}"
|
190
|
-
end
|
191
|
-
|
192
|
-
env = { 'thread' => tl, REMOTE_ADDR => addr }
|
193
|
-
|
194
|
-
begin
|
195
|
-
@app.call env, client.to_io
|
196
|
-
rescue Object => e
|
197
|
-
STDERR.puts "! Detected exception at toplevel: #{e.message} (#{e.class})"
|
198
|
-
STDERR.puts e.backtrace
|
199
|
-
end
|
200
|
-
|
201
|
-
client.close unless env['detach']
|
202
|
-
end
|
203
|
-
|
204
|
-
@events.fire :state, :running
|
205
|
-
|
206
|
-
if background
|
207
|
-
@thread = Thread.new do
|
208
|
-
Puma.set_thread_name "server"
|
209
|
-
handle_servers_lopez_mode
|
210
|
-
end
|
211
|
-
return @thread
|
212
|
-
else
|
213
|
-
handle_servers_lopez_mode
|
214
|
-
end
|
229
|
+
@thread_pool&.pool_capacity
|
215
230
|
end
|
216
231
|
|
217
|
-
def handle_servers_lopez_mode
|
218
|
-
begin
|
219
|
-
check = @check
|
220
|
-
sockets = [check] + @binder.ios
|
221
|
-
pool = @thread_pool
|
222
|
-
|
223
|
-
while @status == :run
|
224
|
-
begin
|
225
|
-
ios = IO.select sockets
|
226
|
-
ios.first.each do |sock|
|
227
|
-
if sock == check
|
228
|
-
break if handle_check
|
229
|
-
else
|
230
|
-
begin
|
231
|
-
if io = sock.accept_nonblock
|
232
|
-
client = Client.new io, nil
|
233
|
-
pool << client
|
234
|
-
end
|
235
|
-
rescue SystemCallError
|
236
|
-
# nothing
|
237
|
-
rescue Errno::ECONNABORTED
|
238
|
-
# client closed the socket even before accept
|
239
|
-
begin
|
240
|
-
io.close
|
241
|
-
rescue
|
242
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
243
|
-
end
|
244
|
-
end
|
245
|
-
end
|
246
|
-
end
|
247
|
-
rescue Object => e
|
248
|
-
@events.unknown_error self, e, "Listen loop"
|
249
|
-
end
|
250
|
-
end
|
251
|
-
|
252
|
-
@events.fire :state, @status
|
253
|
-
|
254
|
-
graceful_shutdown if @status == :stop || @status == :restart
|
255
|
-
|
256
|
-
rescue Exception => e
|
257
|
-
STDERR.puts "Exception handling servers: #{e.message} (#{e.class})"
|
258
|
-
STDERR.puts e.backtrace
|
259
|
-
ensure
|
260
|
-
begin
|
261
|
-
@check.close
|
262
|
-
rescue
|
263
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
264
|
-
end
|
265
|
-
|
266
|
-
# Prevent can't modify frozen IOError (RuntimeError)
|
267
|
-
begin
|
268
|
-
@notify.close
|
269
|
-
rescue IOError
|
270
|
-
# no biggy
|
271
|
-
end
|
272
|
-
end
|
273
|
-
|
274
|
-
@events.fire :state, :done
|
275
|
-
end
|
276
232
|
# Runs the server.
|
277
233
|
#
|
278
234
|
# If +background+ is true (the default) then a thread is spun
|
279
235
|
# up in the background to handle requests. Otherwise requests
|
280
236
|
# are handled synchronously.
|
281
237
|
#
|
282
|
-
def run(background=true)
|
238
|
+
def run(background=true, thread_name: 'srv')
|
283
239
|
BasicSocket.do_not_reverse_lookup = true
|
284
240
|
|
285
241
|
@events.fire :state, :booting
|
286
242
|
|
287
243
|
@status = :run
|
288
244
|
|
289
|
-
|
290
|
-
return run_lopez_mode(background)
|
291
|
-
end
|
245
|
+
@thread_pool = ThreadPool.new(thread_name, @options) { |client| process_client client }
|
292
246
|
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
@max_threads,
|
297
|
-
IOBuffer) do |client, buffer|
|
298
|
-
|
299
|
-
# Advertise this server into the thread
|
300
|
-
Thread.current[ThreadLocalKey] = self
|
301
|
-
|
302
|
-
process_now = false
|
303
|
-
|
304
|
-
begin
|
305
|
-
if queue_requests
|
306
|
-
process_now = client.eagerly_finish
|
307
|
-
else
|
308
|
-
client.finish
|
309
|
-
process_now = true
|
310
|
-
end
|
311
|
-
rescue MiniSSL::SSLError => e
|
312
|
-
ssl_socket = client.io
|
313
|
-
addr = ssl_socket.peeraddr.last
|
314
|
-
cert = ssl_socket.peercert
|
315
|
-
|
316
|
-
client.close
|
317
|
-
|
318
|
-
@events.ssl_error self, addr, cert, e
|
319
|
-
rescue HttpParserError => e
|
320
|
-
client.write_error(400)
|
321
|
-
client.close
|
322
|
-
|
323
|
-
@events.parse_error self, client.env, e
|
324
|
-
rescue HttpParserError501 => e
|
325
|
-
client.write_error(501)
|
326
|
-
client.close
|
327
|
-
@events.parse_error self, client.env, e
|
328
|
-
rescue ConnectionError, EOFError
|
329
|
-
client.close
|
330
|
-
else
|
331
|
-
if process_now
|
332
|
-
process_client client, buffer
|
333
|
-
else
|
334
|
-
client.set_timeout @first_data_timeout
|
335
|
-
@reactor.add client
|
336
|
-
end
|
337
|
-
end
|
247
|
+
if @queue_requests
|
248
|
+
@reactor = Reactor.new(@io_selector_backend) { |c| reactor_wakeup c }
|
249
|
+
@reactor.run
|
338
250
|
end
|
339
251
|
|
340
|
-
@thread_pool.clean_thread_locals = @options[:clean_thread_locals]
|
341
252
|
|
342
|
-
if
|
343
|
-
|
344
|
-
@reactor.run_in_thread
|
345
|
-
end
|
253
|
+
@thread_pool.auto_reap! if @options[:reaping_time]
|
254
|
+
@thread_pool.auto_trim! if @options[:auto_trim_time]
|
346
255
|
|
347
|
-
|
348
|
-
@thread_pool.auto_reap!(@reaping_time)
|
349
|
-
end
|
350
|
-
|
351
|
-
if @auto_trim_time
|
352
|
-
@thread_pool.auto_trim!(@auto_trim_time)
|
353
|
-
end
|
256
|
+
@check, @notify = Puma::Util.pipe unless @notify
|
354
257
|
|
355
258
|
@events.fire :state, :running
|
356
259
|
|
357
260
|
if background
|
358
261
|
@thread = Thread.new do
|
359
|
-
Puma.set_thread_name
|
262
|
+
Puma.set_thread_name thread_name
|
360
263
|
handle_servers
|
361
264
|
end
|
362
265
|
return @thread
|
@@ -365,75 +268,118 @@ module Puma
|
|
365
268
|
end
|
366
269
|
end
|
367
270
|
|
271
|
+
# This method is called from the Reactor thread when a queued Client receives data,
|
272
|
+
# times out, or when the Reactor is shutting down.
|
273
|
+
#
|
274
|
+
# It is responsible for ensuring that a request has been completely received
|
275
|
+
# before it starts to be processed by the ThreadPool. This may be known as read buffering.
|
276
|
+
# If read buffering is not done, and no other read buffering is performed (such as by an application server
|
277
|
+
# such as nginx) then the application would be subject to a slow client attack.
|
278
|
+
#
|
279
|
+
# For a graphical representation of how the request buffer works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
|
280
|
+
#
|
281
|
+
# The method checks to see if it has the full header and body with
|
282
|
+
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
283
|
+
# then the request is passed to the ThreadPool (`@thread_pool << client`)
|
284
|
+
# so that a "worker thread" can pick up the request and begin to execute application logic.
|
285
|
+
# The Client is then removed from the reactor (return `true`).
|
286
|
+
#
|
287
|
+
# If a client object times out, a 408 response is written, its connection is closed,
|
288
|
+
# and the object is removed from the reactor (return `true`).
|
289
|
+
#
|
290
|
+
# If the Reactor is shutting down, all Clients are either timed out or passed to the
|
291
|
+
# ThreadPool, depending on their current state (#can_close?).
|
292
|
+
#
|
293
|
+
# Otherwise, if the full request is not ready then the client will remain in the reactor
|
294
|
+
# (return `false`). When the client sends more data to the socket the `Puma::Client` object
|
295
|
+
# will wake up and again be checked to see if it's ready to be passed to the thread pool.
|
296
|
+
def reactor_wakeup(client)
|
297
|
+
shutdown = !@queue_requests
|
298
|
+
if client.try_to_finish || (shutdown && !client.can_close?)
|
299
|
+
@thread_pool << client
|
300
|
+
elsif shutdown || client.timeout == 0
|
301
|
+
client.timeout!
|
302
|
+
else
|
303
|
+
client.set_timeout(@first_data_timeout)
|
304
|
+
false
|
305
|
+
end
|
306
|
+
rescue StandardError => e
|
307
|
+
client_error(e, client)
|
308
|
+
client.close
|
309
|
+
true
|
310
|
+
end
|
311
|
+
|
368
312
|
def handle_servers
|
369
313
|
begin
|
370
314
|
check = @check
|
371
315
|
sockets = [check] + @binder.ios
|
372
316
|
pool = @thread_pool
|
373
317
|
queue_requests = @queue_requests
|
318
|
+
drain = @options[:drain_on_shutdown] ? 0 : nil
|
374
319
|
|
375
|
-
|
376
|
-
remote_addr_header = nil
|
377
|
-
|
378
|
-
case @options[:remote_address]
|
320
|
+
addr_send_name, addr_value = case @options[:remote_address]
|
379
321
|
when :value
|
380
|
-
|
322
|
+
[:peerip=, @options[:remote_address_value]]
|
381
323
|
when :header
|
382
|
-
remote_addr_header
|
324
|
+
[:remote_addr_header=, @options[:remote_address_header]]
|
325
|
+
when :proxy_protocol
|
326
|
+
[:expect_proxy_proto=, @options[:remote_address_proxy_protocol]]
|
327
|
+
else
|
328
|
+
[nil, nil]
|
383
329
|
end
|
384
330
|
|
385
|
-
while @status == :run
|
331
|
+
while @status == :run || (drain && shutting_down?)
|
386
332
|
begin
|
387
|
-
ios = IO.select sockets
|
333
|
+
ios = IO.select sockets, nil, nil, (shutting_down? ? 0 : nil)
|
334
|
+
break unless ios
|
388
335
|
ios.first.each do |sock|
|
389
336
|
if sock == check
|
390
337
|
break if handle_check
|
391
338
|
else
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
end
|
400
|
-
|
401
|
-
pool << client
|
402
|
-
busy_threads = pool.wait_until_not_full
|
403
|
-
if busy_threads == 0
|
404
|
-
@options[:out_of_band].each(&:call) if @options[:out_of_band]
|
405
|
-
end
|
406
|
-
end
|
407
|
-
rescue SystemCallError
|
408
|
-
# nothing
|
409
|
-
rescue Errno::ECONNABORTED
|
410
|
-
# client closed the socket even before accept
|
411
|
-
begin
|
412
|
-
io.close
|
413
|
-
rescue
|
414
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
415
|
-
end
|
339
|
+
pool.wait_until_not_full
|
340
|
+
pool.wait_for_less_busy_worker(@options[:wait_for_less_busy_worker])
|
341
|
+
|
342
|
+
io = begin
|
343
|
+
sock.accept_nonblock
|
344
|
+
rescue IO::WaitReadable
|
345
|
+
next
|
416
346
|
end
|
347
|
+
drain += 1 if shutting_down?
|
348
|
+
pool << Client.new(io, @binder.env(sock)).tap { |c|
|
349
|
+
c.listener = sock
|
350
|
+
c.http_content_length_limit = @http_content_length_limit
|
351
|
+
c.send(addr_send_name, addr_value) if addr_value
|
352
|
+
}
|
417
353
|
end
|
418
354
|
end
|
419
|
-
rescue
|
420
|
-
|
355
|
+
rescue IOError, Errno::EBADF
|
356
|
+
# In the case that any of the sockets are unexpectedly close.
|
357
|
+
raise
|
358
|
+
rescue StandardError => e
|
359
|
+
@log_writer.unknown_error e, nil, "Listen loop"
|
421
360
|
end
|
422
361
|
end
|
423
362
|
|
363
|
+
@log_writer.debug "Drained #{drain} additional connections." if drain
|
424
364
|
@events.fire :state, @status
|
425
365
|
|
426
|
-
graceful_shutdown if @status == :stop || @status == :restart
|
427
366
|
if queue_requests
|
428
|
-
@
|
367
|
+
@queue_requests = false
|
429
368
|
@reactor.shutdown
|
430
369
|
end
|
370
|
+
graceful_shutdown if @status == :stop || @status == :restart
|
431
371
|
rescue Exception => e
|
432
|
-
|
433
|
-
STDERR.puts e.backtrace
|
372
|
+
@log_writer.unknown_error e, nil, "Exception handling servers"
|
434
373
|
ensure
|
435
|
-
|
436
|
-
@notify.
|
374
|
+
# Errno::EBADF is infrequently raised
|
375
|
+
[@check, @notify].each do |io|
|
376
|
+
begin
|
377
|
+
io.close unless io.closed?
|
378
|
+
rescue Errno::EBADF
|
379
|
+
end
|
380
|
+
end
|
381
|
+
@notify = nil
|
382
|
+
@check = nil
|
437
383
|
end
|
438
384
|
|
439
385
|
@events.fire :state, :done
|
@@ -455,522 +401,147 @@ module Puma
|
|
455
401
|
return true
|
456
402
|
end
|
457
403
|
|
458
|
-
|
404
|
+
false
|
459
405
|
end
|
460
406
|
|
461
|
-
# Given a connection on +client+, handle the incoming requests
|
407
|
+
# Given a connection on +client+, handle the incoming requests,
|
408
|
+
# or queue the connection in the Reactor if no request is available.
|
409
|
+
#
|
410
|
+
# This method is called from a ThreadPool worker thread.
|
462
411
|
#
|
463
|
-
# This method
|
412
|
+
# This method supports HTTP Keep-Alive so it may, depending on if the client
|
464
413
|
# indicates that it supports keep alive, wait for another request before
|
465
414
|
# returning.
|
466
415
|
#
|
467
|
-
|
416
|
+
# Return true if one or more requests were processed.
|
417
|
+
def process_client(client)
|
418
|
+
# Advertise this server into the thread
|
419
|
+
Thread.current[THREAD_LOCAL_KEY] = self
|
420
|
+
|
421
|
+
clean_thread_locals = @options[:clean_thread_locals]
|
422
|
+
close_socket = true
|
423
|
+
|
424
|
+
requests = 0
|
425
|
+
|
468
426
|
begin
|
427
|
+
if @queue_requests &&
|
428
|
+
!client.eagerly_finish
|
469
429
|
|
470
|
-
|
471
|
-
|
430
|
+
client.set_timeout(@first_data_timeout)
|
431
|
+
if @reactor.add client
|
432
|
+
close_socket = false
|
433
|
+
return false
|
434
|
+
end
|
435
|
+
end
|
472
436
|
|
473
|
-
|
437
|
+
with_force_shutdown(client) do
|
438
|
+
client.finish(@first_data_timeout)
|
439
|
+
end
|
474
440
|
|
475
441
|
while true
|
476
|
-
|
442
|
+
@requests_count += 1
|
443
|
+
case handle_request(client, requests + 1)
|
477
444
|
when false
|
478
|
-
|
445
|
+
break
|
479
446
|
when :async
|
480
447
|
close_socket = false
|
481
|
-
|
448
|
+
break
|
482
449
|
when true
|
483
|
-
return unless @queue_requests
|
484
|
-
buffer.reset
|
485
|
-
|
486
450
|
ThreadPool.clean_thread_locals if clean_thread_locals
|
487
451
|
|
488
452
|
requests += 1
|
489
453
|
|
490
|
-
#
|
491
|
-
#
|
492
|
-
|
493
|
-
|
494
|
-
#
|
495
|
-
#
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
unless client.reset(check_for_more_data)
|
506
|
-
close_socket = false
|
454
|
+
# As an optimization, try to read the next request from the
|
455
|
+
# socket for a short time before returning to the reactor.
|
456
|
+
fast_check = @status == :run
|
457
|
+
|
458
|
+
# Always pass the client back to the reactor after a reasonable
|
459
|
+
# number of inline requests if there are other requests pending.
|
460
|
+
fast_check = false if requests >= @max_fast_inline &&
|
461
|
+
@thread_pool.backlog > 0
|
462
|
+
|
463
|
+
next_request_ready = with_force_shutdown(client) do
|
464
|
+
client.reset(fast_check)
|
465
|
+
end
|
466
|
+
|
467
|
+
unless next_request_ready
|
468
|
+
break unless @queue_requests
|
507
469
|
client.set_timeout @persistent_timeout
|
508
|
-
@reactor.add client
|
509
|
-
|
470
|
+
if @reactor.add client
|
471
|
+
close_socket = false
|
472
|
+
break
|
473
|
+
end
|
510
474
|
end
|
511
475
|
end
|
512
476
|
end
|
513
|
-
|
514
|
-
# The client disconnected while we were reading data
|
515
|
-
rescue ConnectionError
|
516
|
-
# Swallow them. The ensure tries to close +client+ down
|
517
|
-
|
518
|
-
# SSL handshake error
|
519
|
-
rescue MiniSSL::SSLError => e
|
520
|
-
lowlevel_error(e, client.env)
|
521
|
-
|
522
|
-
ssl_socket = client.io
|
523
|
-
addr = ssl_socket.peeraddr.last
|
524
|
-
cert = ssl_socket.peercert
|
525
|
-
|
526
|
-
close_socket = true
|
527
|
-
|
528
|
-
@events.ssl_error self, addr, cert, e
|
529
|
-
|
530
|
-
# The client doesn't know HTTP well
|
531
|
-
rescue HttpParserError => e
|
532
|
-
lowlevel_error(e, client.env)
|
533
|
-
|
534
|
-
client.write_error(400)
|
535
|
-
|
536
|
-
@events.parse_error self, client.env, e
|
537
|
-
rescue HttpParserError501 => e
|
538
|
-
lowlevel_error(e, client.env)
|
539
|
-
|
540
|
-
client.write_error(501)
|
541
|
-
|
542
|
-
@events.parse_error self, client.env, e
|
543
|
-
# Server error
|
477
|
+
true
|
544
478
|
rescue StandardError => e
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
@events.unknown_error self, e, "Read"
|
550
|
-
|
479
|
+
client_error(e, client)
|
480
|
+
# The ensure tries to close +client+ down
|
481
|
+
requests > 0
|
551
482
|
ensure
|
552
|
-
|
483
|
+
client.io_buffer.reset
|
553
484
|
|
554
485
|
begin
|
555
486
|
client.close if close_socket
|
556
487
|
rescue IOError, SystemCallError
|
557
|
-
|
488
|
+
Puma::Util.purge_interrupt_queue
|
558
489
|
# Already closed
|
559
490
|
rescue StandardError => e
|
560
|
-
@
|
491
|
+
@log_writer.unknown_error e, nil, "Client"
|
561
492
|
end
|
562
493
|
end
|
563
494
|
end
|
564
495
|
|
565
|
-
#
|
566
|
-
#
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
env[SERVER_NAME] = host[0, colon]
|
572
|
-
env[SERVER_PORT] = host[colon+1, host.bytesize]
|
573
|
-
else
|
574
|
-
env[SERVER_NAME] = host
|
575
|
-
env[SERVER_PORT] = default_server_port(env)
|
576
|
-
end
|
577
|
-
else
|
578
|
-
env[SERVER_NAME] = LOCALHOST
|
579
|
-
env[SERVER_PORT] = default_server_port(env)
|
580
|
-
end
|
581
|
-
|
582
|
-
unless env[REQUEST_PATH]
|
583
|
-
# it might be a dumbass full host request header
|
584
|
-
uri = URI.parse(env[REQUEST_URI])
|
585
|
-
env[REQUEST_PATH] = uri.path
|
586
|
-
|
587
|
-
raise "No REQUEST PATH" unless env[REQUEST_PATH]
|
588
|
-
|
589
|
-
# A nil env value will cause a LintError (and fatal errors elsewhere),
|
590
|
-
# so only set the env value if there actually is a value.
|
591
|
-
env[QUERY_STRING] = uri.query if uri.query
|
592
|
-
end
|
593
|
-
|
594
|
-
env[PATH_INFO] = env[REQUEST_PATH]
|
595
|
-
|
596
|
-
# From http://www.ietf.org/rfc/rfc3875 :
|
597
|
-
# "Script authors should be aware that the REMOTE_ADDR and
|
598
|
-
# REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
|
599
|
-
# may not identify the ultimate source of the request.
|
600
|
-
# They identify the client for the immediate request to the
|
601
|
-
# server; that client may be a proxy, gateway, or other
|
602
|
-
# intermediary acting on behalf of the actual source client."
|
603
|
-
#
|
604
|
-
|
605
|
-
unless env.key?(REMOTE_ADDR)
|
606
|
-
begin
|
607
|
-
addr = client.peerip
|
608
|
-
rescue Errno::ENOTCONN
|
609
|
-
# Client disconnects can result in an inability to get the
|
610
|
-
# peeraddr from the socket; default to localhost.
|
611
|
-
addr = LOCALHOST_IP
|
612
|
-
end
|
613
|
-
|
614
|
-
# Set unix socket addrs to localhost
|
615
|
-
addr = LOCALHOST_IP if addr.empty?
|
616
|
-
|
617
|
-
env[REMOTE_ADDR] = addr
|
618
|
-
end
|
496
|
+
# Triggers a client timeout if the thread-pool shuts down
|
497
|
+
# during execution of the provided block.
|
498
|
+
def with_force_shutdown(client, &block)
|
499
|
+
@thread_pool.with_force_shutdown(&block)
|
500
|
+
rescue ThreadPool::ForceShutdown
|
501
|
+
client.timeout!
|
619
502
|
end
|
620
503
|
|
621
|
-
|
622
|
-
if ['on', HTTPS].include?(env[HTTPS_KEY]) || env[HTTP_X_FORWARDED_PROTO].to_s[0...5] == HTTPS || env[HTTP_X_FORWARDED_SCHEME] == HTTPS || env[HTTP_X_FORWARDED_SSL] == "on"
|
623
|
-
PORT_443
|
624
|
-
else
|
625
|
-
PORT_80
|
626
|
-
end
|
627
|
-
end
|
628
|
-
|
629
|
-
# Takes the request +req+, invokes the Rack application to construct
|
630
|
-
# the response and writes it back to +req.io+.
|
631
|
-
#
|
632
|
-
# The second parameter +lines+ is a IO-like object unique to this thread.
|
633
|
-
# This is normally an instance of Puma::IOBuffer.
|
634
|
-
#
|
635
|
-
# It'll return +false+ when the connection is closed, this doesn't mean
|
636
|
-
# that the response wasn't successful.
|
637
|
-
#
|
638
|
-
# It'll return +:async+ if the connection remains open but will be handled
|
639
|
-
# elsewhere, i.e. the connection has been hijacked by the Rack application.
|
640
|
-
#
|
641
|
-
# Finally, it'll return +true+ on keep-alive connections.
|
642
|
-
def handle_request(req, lines)
|
643
|
-
env = req.env
|
644
|
-
client = req.io
|
645
|
-
|
646
|
-
return false if closed_socket?(client)
|
647
|
-
|
648
|
-
normalize_env env, req
|
649
|
-
|
650
|
-
env[PUMA_SOCKET] = client
|
651
|
-
|
652
|
-
if env[HTTPS_KEY] && client.peercert
|
653
|
-
env[PUMA_PEERCERT] = client.peercert
|
654
|
-
end
|
655
|
-
|
656
|
-
env[HIJACK_P] = true
|
657
|
-
env[HIJACK] = req
|
658
|
-
|
659
|
-
body = req.body
|
660
|
-
|
661
|
-
head = env[REQUEST_METHOD] == HEAD
|
662
|
-
|
663
|
-
env[RACK_INPUT] = body
|
664
|
-
env[RACK_URL_SCHEME] = default_server_port(env) == PORT_443 ? HTTPS : HTTP
|
665
|
-
|
666
|
-
if @early_hints
|
667
|
-
env[EARLY_HINTS] = lambda { |headers|
|
668
|
-
begin
|
669
|
-
fast_write client, "HTTP/1.1 103 Early Hints\r\n".freeze
|
670
|
-
|
671
|
-
headers.each_pair do |k, vs|
|
672
|
-
if vs.respond_to?(:to_s) && !vs.to_s.empty?
|
673
|
-
vs.to_s.split(NEWLINE).each do |v|
|
674
|
-
next if possible_header_injection?(v)
|
675
|
-
fast_write client, "#{k}: #{v}\r\n"
|
676
|
-
end
|
677
|
-
else
|
678
|
-
fast_write client, "#{k}: #{vs}\r\n"
|
679
|
-
end
|
680
|
-
end
|
681
|
-
|
682
|
-
fast_write client, "\r\n".freeze
|
683
|
-
rescue ConnectionError
|
684
|
-
# noop, if we lost the socket we just won't send the early hints
|
685
|
-
end
|
686
|
-
}
|
687
|
-
end
|
688
|
-
|
689
|
-
# Fixup any headers with , in the name to have _ now. We emit
|
690
|
-
# headers with , in them during the parse phase to avoid ambiguity
|
691
|
-
# with the - to _ conversion for critical headers. But here for
|
692
|
-
# compatibility, we'll convert them back. This code is written to
|
693
|
-
# avoid allocation in the common case (ie there are no headers
|
694
|
-
# with , in their names), that's why it has the extra conditionals.
|
695
|
-
|
696
|
-
to_delete = nil
|
697
|
-
to_add = nil
|
698
|
-
|
699
|
-
env.each do |k,v|
|
700
|
-
if k.start_with?("HTTP_") and k.include?(",") and k != "HTTP_TRANSFER,ENCODING"
|
701
|
-
if to_delete
|
702
|
-
to_delete << k
|
703
|
-
else
|
704
|
-
to_delete = [k]
|
705
|
-
end
|
706
|
-
|
707
|
-
unless to_add
|
708
|
-
to_add = {}
|
709
|
-
end
|
710
|
-
|
711
|
-
to_add[k.tr(",", "_")] = v
|
712
|
-
end
|
713
|
-
end
|
504
|
+
# :nocov:
|
714
505
|
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
# A rack extension. If the app writes #call'ables to this
|
721
|
-
# array, we will invoke them when the request is done.
|
722
|
-
#
|
723
|
-
after_reply = env[RACK_AFTER_REPLY] = []
|
724
|
-
|
725
|
-
begin
|
726
|
-
begin
|
727
|
-
status, headers, res_body = @app.call(env)
|
728
|
-
|
729
|
-
return :async if req.hijacked
|
730
|
-
|
731
|
-
status = status.to_i
|
732
|
-
|
733
|
-
if status == -1
|
734
|
-
unless headers.empty? and res_body == []
|
735
|
-
raise "async response must have empty headers and body"
|
736
|
-
end
|
737
|
-
|
738
|
-
return :async
|
739
|
-
end
|
740
|
-
rescue ThreadPool::ForceShutdown => e
|
741
|
-
@events.log "Detected force shutdown of a thread, returning 503"
|
742
|
-
@events.unknown_error self, e, "Rack app"
|
743
|
-
|
744
|
-
status = 503
|
745
|
-
headers = {}
|
746
|
-
res_body = ["Request was internally terminated early\n"]
|
747
|
-
|
748
|
-
rescue Exception => e
|
749
|
-
@events.unknown_error self, e, "Rack app", env
|
750
|
-
|
751
|
-
status, headers, res_body = lowlevel_error(e, env)
|
752
|
-
end
|
506
|
+
# Handle various error types thrown by Client I/O operations.
|
507
|
+
def client_error(e, client)
|
508
|
+
# Swallow, do not log
|
509
|
+
return if [ConnectionError, EOFError].include?(e.class)
|
753
510
|
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
colon = COLON
|
765
|
-
|
766
|
-
http_11 = if env[HTTP_VERSION] == HTTP_11
|
767
|
-
allow_chunked = true
|
768
|
-
keep_alive = env.fetch(HTTP_CONNECTION, "").downcase != CLOSE
|
769
|
-
include_keepalive_header = false
|
770
|
-
|
771
|
-
# An optimization. The most common response is 200, so we can
|
772
|
-
# reply with the proper 200 status without having to compute
|
773
|
-
# the response header.
|
774
|
-
#
|
775
|
-
if status == 200
|
776
|
-
lines << HTTP_11_200
|
777
|
-
else
|
778
|
-
lines.append "HTTP/1.1 ", status.to_s, " ",
|
779
|
-
fetch_status_code(status), line_ending
|
780
|
-
|
781
|
-
no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
|
782
|
-
end
|
783
|
-
true
|
784
|
-
else
|
785
|
-
allow_chunked = false
|
786
|
-
keep_alive = env.fetch(HTTP_CONNECTION, "").downcase == KEEP_ALIVE
|
787
|
-
include_keepalive_header = keep_alive
|
788
|
-
|
789
|
-
# Same optimization as above for HTTP/1.1
|
790
|
-
#
|
791
|
-
if status == 200
|
792
|
-
lines << HTTP_10_200
|
793
|
-
else
|
794
|
-
lines.append "HTTP/1.0 ", status.to_s, " ",
|
795
|
-
fetch_status_code(status), line_ending
|
796
|
-
|
797
|
-
no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
|
798
|
-
end
|
799
|
-
false
|
800
|
-
end
|
801
|
-
|
802
|
-
response_hijack = nil
|
803
|
-
|
804
|
-
headers.each do |k, vs|
|
805
|
-
case k.downcase
|
806
|
-
when CONTENT_LENGTH2
|
807
|
-
next if possible_header_injection?(vs)
|
808
|
-
content_length = vs
|
809
|
-
next
|
810
|
-
when TRANSFER_ENCODING
|
811
|
-
allow_chunked = false
|
812
|
-
content_length = nil
|
813
|
-
when HIJACK
|
814
|
-
response_hijack = vs
|
815
|
-
next
|
816
|
-
end
|
817
|
-
|
818
|
-
if vs.respond_to?(:to_s) && !vs.to_s.empty?
|
819
|
-
vs.to_s.split(NEWLINE).each do |v|
|
820
|
-
next if possible_header_injection?(v)
|
821
|
-
lines.append k, colon, v, line_ending
|
822
|
-
end
|
823
|
-
else
|
824
|
-
lines.append k, colon, line_ending
|
825
|
-
end
|
826
|
-
end
|
827
|
-
|
828
|
-
if include_keepalive_header
|
829
|
-
lines << CONNECTION_KEEP_ALIVE
|
830
|
-
elsif http_11 && !keep_alive
|
831
|
-
lines << CONNECTION_CLOSE
|
832
|
-
end
|
833
|
-
|
834
|
-
if no_body
|
835
|
-
if content_length and status != 204
|
836
|
-
lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
|
837
|
-
end
|
838
|
-
|
839
|
-
lines << line_ending
|
840
|
-
fast_write client, lines.to_s
|
841
|
-
return keep_alive
|
842
|
-
end
|
843
|
-
|
844
|
-
if content_length
|
845
|
-
lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
|
846
|
-
chunked = false
|
847
|
-
elsif !response_hijack and allow_chunked
|
848
|
-
lines << TRANSFER_ENCODING_CHUNKED
|
849
|
-
chunked = true
|
850
|
-
end
|
851
|
-
|
852
|
-
lines << line_ending
|
853
|
-
|
854
|
-
fast_write client, lines.to_s
|
855
|
-
|
856
|
-
if response_hijack
|
857
|
-
response_hijack.call client
|
858
|
-
return :async
|
859
|
-
end
|
860
|
-
|
861
|
-
begin
|
862
|
-
res_body.each do |part|
|
863
|
-
next if part.bytesize.zero?
|
864
|
-
if chunked
|
865
|
-
fast_write client, part.bytesize.to_s(16)
|
866
|
-
fast_write client, line_ending
|
867
|
-
fast_write client, part
|
868
|
-
fast_write client, line_ending
|
869
|
-
else
|
870
|
-
fast_write client, part
|
871
|
-
end
|
872
|
-
|
873
|
-
client.flush
|
874
|
-
end
|
875
|
-
|
876
|
-
if chunked
|
877
|
-
fast_write client, CLOSE_CHUNKED
|
878
|
-
client.flush
|
879
|
-
end
|
880
|
-
rescue SystemCallError, IOError
|
881
|
-
raise ConnectionError, "Connection error detected during write"
|
882
|
-
end
|
883
|
-
|
884
|
-
ensure
|
885
|
-
begin
|
886
|
-
uncork_socket client
|
887
|
-
|
888
|
-
body.close
|
889
|
-
req.tempfile.unlink if req.tempfile
|
890
|
-
ensure
|
891
|
-
res_body.close if res_body.respond_to? :close
|
892
|
-
end
|
893
|
-
|
894
|
-
after_reply.each { |o| o.call }
|
895
|
-
end
|
896
|
-
|
897
|
-
return keep_alive
|
898
|
-
end
|
899
|
-
|
900
|
-
def fetch_status_code(status)
|
901
|
-
HTTP_STATUS_CODES.fetch(status) { 'CUSTOM' }
|
902
|
-
end
|
903
|
-
private :fetch_status_code
|
904
|
-
|
905
|
-
# Given the request +env+ from +client+ and the partial body +body+
|
906
|
-
# plus a potential Content-Length value +cl+, finish reading
|
907
|
-
# the body and return it.
|
908
|
-
#
|
909
|
-
# If the body is larger than MAX_BODY, a Tempfile object is used
|
910
|
-
# for the body, otherwise a StringIO is used.
|
911
|
-
#
|
912
|
-
def read_body(env, client, body, cl)
|
913
|
-
content_length = cl.to_i
|
914
|
-
|
915
|
-
remain = content_length - body.bytesize
|
916
|
-
|
917
|
-
return StringIO.new(body) if remain <= 0
|
918
|
-
|
919
|
-
# Use a Tempfile if there is a lot of data left
|
920
|
-
if remain > MAX_BODY
|
921
|
-
stream = Tempfile.new(Const::PUMA_TMP_BASE)
|
922
|
-
stream.binmode
|
511
|
+
lowlevel_error(e, client.env)
|
512
|
+
case e
|
513
|
+
when MiniSSL::SSLError
|
514
|
+
@log_writer.ssl_error e, client.io
|
515
|
+
when HttpParserError
|
516
|
+
client.write_error(400)
|
517
|
+
@log_writer.parse_error e, client
|
518
|
+
when HttpParserError501
|
519
|
+
client.write_error(501)
|
520
|
+
@log_writer.parse_error e, client
|
923
521
|
else
|
924
|
-
|
925
|
-
|
926
|
-
stream = StringIO.new body[0,0]
|
927
|
-
end
|
928
|
-
|
929
|
-
stream.write body
|
930
|
-
|
931
|
-
# Read an odd sized chunk so we can read even sized ones
|
932
|
-
# after this
|
933
|
-
chunk = client.readpartial(remain % CHUNK_SIZE)
|
934
|
-
|
935
|
-
# No chunk means a closed socket
|
936
|
-
unless chunk
|
937
|
-
stream.close
|
938
|
-
return nil
|
939
|
-
end
|
940
|
-
|
941
|
-
remain -= stream.write(chunk)
|
942
|
-
|
943
|
-
# Raed the rest of the chunks
|
944
|
-
while remain > 0
|
945
|
-
chunk = client.readpartial(CHUNK_SIZE)
|
946
|
-
unless chunk
|
947
|
-
stream.close
|
948
|
-
return nil
|
949
|
-
end
|
950
|
-
|
951
|
-
remain -= stream.write(chunk)
|
522
|
+
client.write_error(500)
|
523
|
+
@log_writer.unknown_error e, nil, "Read"
|
952
524
|
end
|
953
|
-
|
954
|
-
stream.rewind
|
955
|
-
|
956
|
-
return stream
|
957
525
|
end
|
958
526
|
|
959
527
|
# A fallback rack response if +@app+ raises as exception.
|
960
528
|
#
|
961
|
-
def lowlevel_error(e, env)
|
529
|
+
def lowlevel_error(e, env, status=500)
|
962
530
|
if handler = @options[:lowlevel_error_handler]
|
963
531
|
if handler.arity == 1
|
964
532
|
return handler.call(e)
|
965
|
-
|
533
|
+
elsif handler.arity == 2
|
966
534
|
return handler.call(e, env)
|
535
|
+
else
|
536
|
+
return handler.call(e, env, status)
|
967
537
|
end
|
968
538
|
end
|
969
539
|
|
970
540
|
if @leak_stack_on_error
|
971
|
-
|
541
|
+
backtrace = e.backtrace.nil? ? '<no backtrace available>' : e.backtrace.join("\n")
|
542
|
+
[status, {}, ["Puma caught this error: #{e.message} (#{e.class})\n#{backtrace}"]]
|
972
543
|
else
|
973
|
-
[
|
544
|
+
[status, {}, ["An unhandled lowlevel error occurred. The application logs may have details.\n"]]
|
974
545
|
end
|
975
546
|
end
|
976
547
|
|
@@ -992,35 +563,13 @@ module Puma
|
|
992
563
|
$stdout.syswrite "#{pid}: === End thread backtrace dump ===\n"
|
993
564
|
end
|
994
565
|
|
995
|
-
if @options[:drain_on_shutdown]
|
996
|
-
count = 0
|
997
|
-
|
998
|
-
while true
|
999
|
-
ios = IO.select @binder.ios, nil, nil, 0
|
1000
|
-
break unless ios
|
1001
|
-
|
1002
|
-
ios.first.each do |sock|
|
1003
|
-
begin
|
1004
|
-
if io = sock.accept_nonblock
|
1005
|
-
count += 1
|
1006
|
-
client = Client.new io, @binder.env(sock)
|
1007
|
-
@thread_pool << client
|
1008
|
-
end
|
1009
|
-
rescue SystemCallError
|
1010
|
-
end
|
1011
|
-
end
|
1012
|
-
end
|
1013
|
-
|
1014
|
-
@events.debug "Drained #{count} additional connections."
|
1015
|
-
end
|
1016
|
-
|
1017
566
|
if @status != :restart
|
1018
567
|
@binder.close
|
1019
568
|
end
|
1020
569
|
|
1021
570
|
if @thread_pool
|
1022
571
|
if timeout = @options[:force_shutdown_after]
|
1023
|
-
@thread_pool.shutdown timeout.
|
572
|
+
@thread_pool.shutdown timeout.to_f
|
1024
573
|
else
|
1025
574
|
@thread_pool.shutdown
|
1026
575
|
end
|
@@ -1028,18 +577,16 @@ module Puma
|
|
1028
577
|
end
|
1029
578
|
|
1030
579
|
def notify_safely(message)
|
1031
|
-
|
1032
|
-
|
1033
|
-
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1037
|
-
|
1038
|
-
|
1039
|
-
|
1040
|
-
|
1041
|
-
raise e
|
1042
|
-
end
|
580
|
+
@notify << message
|
581
|
+
rescue IOError, NoMethodError, Errno::EPIPE, Errno::EBADF
|
582
|
+
# The server, in another thread, is shutting down
|
583
|
+
Puma::Util.purge_interrupt_queue
|
584
|
+
rescue RuntimeError => e
|
585
|
+
# Temporary workaround for https://bugs.ruby-lang.org/issues/13239
|
586
|
+
if e.message.include?('IOError')
|
587
|
+
Puma::Util.purge_interrupt_queue
|
588
|
+
else
|
589
|
+
raise e
|
1043
590
|
end
|
1044
591
|
end
|
1045
592
|
private :notify_safely
|
@@ -1057,44 +604,24 @@ module Puma
|
|
1057
604
|
@thread.join if @thread && sync
|
1058
605
|
end
|
1059
606
|
|
1060
|
-
def begin_restart
|
607
|
+
def begin_restart(sync=false)
|
1061
608
|
notify_safely(RESTART_COMMAND)
|
1062
|
-
|
1063
|
-
|
1064
|
-
def fast_write(io, str)
|
1065
|
-
n = 0
|
1066
|
-
while true
|
1067
|
-
begin
|
1068
|
-
n = io.syswrite str
|
1069
|
-
rescue Errno::EAGAIN, Errno::EWOULDBLOCK
|
1070
|
-
if !IO.select(nil, [io], nil, WRITE_TIMEOUT)
|
1071
|
-
raise ConnectionError, "Socket timeout writing data"
|
1072
|
-
end
|
1073
|
-
|
1074
|
-
retry
|
1075
|
-
rescue Errno::EPIPE, SystemCallError, IOError
|
1076
|
-
raise ConnectionError, "Socket timeout writing data"
|
1077
|
-
end
|
1078
|
-
|
1079
|
-
return if n == str.bytesize
|
1080
|
-
str = str.byteslice(n..-1)
|
1081
|
-
end
|
1082
|
-
end
|
1083
|
-
private :fast_write
|
1084
|
-
|
1085
|
-
ThreadLocalKey = :puma_server
|
1086
|
-
|
1087
|
-
def self.current
|
1088
|
-
Thread.current[ThreadLocalKey]
|
609
|
+
@thread.join if @thread && sync
|
1089
610
|
end
|
1090
611
|
|
1091
612
|
def shutting_down?
|
1092
613
|
@status == :stop || @status == :restart
|
1093
614
|
end
|
1094
615
|
|
1095
|
-
|
1096
|
-
|
616
|
+
# List of methods invoked by #stats.
|
617
|
+
# @version 5.0.0
|
618
|
+
STAT_METHODS = [:backlog, :running, :pool_capacity, :max_threads, :requests_count].freeze
|
619
|
+
|
620
|
+
# Returns a hash of stats about the running server for reporting purposes.
|
621
|
+
# @version 5.0.0
|
622
|
+
# @!attribute [r] stats
|
623
|
+
def stats
|
624
|
+
STAT_METHODS.map {|name| [name, send(name) || 0]}.to_h
|
1097
625
|
end
|
1098
|
-
private :possible_header_injection?
|
1099
626
|
end
|
1100
627
|
end
|