puma 3.12.6 → 6.3.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +1806 -451
- data/LICENSE +23 -20
- data/README.md +217 -65
- data/bin/puma-wild +3 -9
- data/docs/architecture.md +59 -21
- data/docs/compile_options.md +55 -0
- data/docs/deployment.md +69 -58
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +9 -0
- data/{tools → docs}/jungle/rc.d/README.md +1 -1
- data/{tools → docs}/jungle/rc.d/puma +2 -2
- data/docs/kubernetes.md +66 -0
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +22 -12
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +47 -22
- data/docs/signals.md +13 -11
- data/docs/stats.md +142 -0
- data/docs/systemd.md +94 -120
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +61 -3
- data/ext/puma_http11/http11_parser.c +103 -117
- data/ext/puma_http11/http11_parser.h +2 -2
- data/ext/puma_http11/http11_parser.java.rl +22 -38
- data/ext/puma_http11/http11_parser.rl +3 -3
- data/ext/puma_http11/http11_parser_common.rl +6 -6
- data/ext/puma_http11/mini_ssl.c +389 -99
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +108 -116
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +84 -99
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +248 -92
- data/ext/puma_http11/puma_http11.c +49 -57
- data/lib/puma/app/status.rb +71 -49
- data/lib/puma/binder.rb +244 -150
- data/lib/puma/cli.rb +38 -34
- data/lib/puma/client.rb +388 -244
- data/lib/puma/cluster/worker.rb +180 -0
- data/lib/puma/cluster/worker_handle.rb +97 -0
- data/lib/puma/cluster.rb +261 -243
- data/lib/puma/commonlogger.rb +21 -14
- data/lib/puma/configuration.rb +116 -88
- data/lib/puma/const.rb +154 -104
- data/lib/puma/control_cli.rb +115 -70
- data/lib/puma/detect.rb +33 -2
- data/lib/puma/dsl.rb +764 -134
- data/lib/puma/error_logger.rb +113 -0
- data/lib/puma/events.rb +16 -112
- data/lib/puma/io_buffer.rb +42 -5
- data/lib/puma/jruby_restart.rb +2 -59
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +184 -133
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +93 -0
- data/lib/puma/minissl.rb +263 -70
- data/lib/puma/null_io.rb +18 -1
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +3 -1
- data/lib/puma/plugin.rb +7 -13
- data/lib/puma/rack/builder.rb +9 -11
- data/lib/puma/rack/urlmap.rb +2 -0
- data/lib/puma/rack_default.rb +21 -4
- data/lib/puma/reactor.rb +93 -315
- data/lib/puma/request.rb +671 -0
- data/lib/puma/runner.rb +94 -69
- data/lib/puma/sd_notify.rb +149 -0
- data/lib/puma/server.rb +327 -772
- data/lib/puma/single.rb +20 -74
- data/lib/puma/state_file.rb +45 -8
- data/lib/puma/thread_pool.rb +146 -92
- data/lib/puma/util.rb +22 -10
- data/lib/puma.rb +60 -5
- data/lib/rack/handler/puma.rb +116 -90
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +0 -1
- metadata +54 -32
- data/ext/puma_http11/io_buffer.c +0 -155
- data/lib/puma/accept_nonblock.rb +0 -23
- data/lib/puma/compat.rb +0 -14
- data/lib/puma/convenient.rb +0 -25
- data/lib/puma/daemon_ext.rb +0 -33
- data/lib/puma/delegation.rb +0 -13
- data/lib/puma/java_io_buffer.rb +0 -47
- data/lib/puma/rack/backports/uri/common_193.rb +0 -33
- data/lib/puma/tcp_logger.rb +0 -41
- data/tools/jungle/README.md +0 -19
- data/tools/jungle/init.d/README.md +0 -61
- data/tools/jungle/init.d/puma +0 -421
- data/tools/jungle/init.d/run-puma +0 -18
- data/tools/jungle/upstart/README.md +0 -61
- data/tools/jungle/upstart/puma-manager.conf +0 -31
- data/tools/jungle/upstart/puma.conf +0 -69
- /data/{tools → docs}/jungle/rc.d/puma.conf +0 -0
data/lib/puma/server.rb
CHANGED
@@ -2,25 +2,20 @@
|
|
2
2
|
|
3
3
|
require 'stringio'
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
require 'puma/util'
|
16
|
-
|
17
|
-
require 'puma/puma_http11'
|
18
|
-
|
19
|
-
unless Puma.const_defined? "IOBuffer"
|
20
|
-
require 'puma/io_buffer'
|
21
|
-
end
|
5
|
+
require_relative 'thread_pool'
|
6
|
+
require_relative 'const'
|
7
|
+
require_relative 'log_writer'
|
8
|
+
require_relative 'events'
|
9
|
+
require_relative 'null_io'
|
10
|
+
require_relative 'reactor'
|
11
|
+
require_relative 'client'
|
12
|
+
require_relative 'binder'
|
13
|
+
require_relative 'util'
|
14
|
+
require_relative 'request'
|
22
15
|
|
23
16
|
require 'socket'
|
17
|
+
require 'io/wait' unless Puma::HAS_NATIVE_IO_WAIT
|
18
|
+
require 'forwardable'
|
24
19
|
|
25
20
|
module Puma
|
26
21
|
|
@@ -28,120 +23,175 @@ module Puma
|
|
28
23
|
#
|
29
24
|
# This class is used by the `Puma::Single` and `Puma::Cluster` classes
|
30
25
|
# to generate one or more `Puma::Server` instances capable of handling requests.
|
31
|
-
# Each Puma process will contain one `Puma::Server`
|
26
|
+
# Each Puma process will contain one `Puma::Server` instance.
|
32
27
|
#
|
33
28
|
# The `Puma::Server` instance pulls requests from the socket, adds them to a
|
34
29
|
# `Puma::Reactor` where they get eventually passed to a `Puma::ThreadPool`.
|
35
30
|
#
|
36
31
|
# Each `Puma::Server` will have one reactor and one thread pool.
|
37
32
|
class Server
|
38
|
-
|
39
33
|
include Puma::Const
|
40
|
-
|
34
|
+
include Request
|
35
|
+
extend Forwardable
|
41
36
|
|
42
37
|
attr_reader :thread
|
38
|
+
attr_reader :log_writer
|
43
39
|
attr_reader :events
|
40
|
+
attr_reader :min_threads, :max_threads # for #stats
|
41
|
+
attr_reader :requests_count # @version 5.0.0
|
42
|
+
|
43
|
+
# @todo the following may be deprecated in the future
|
44
|
+
attr_reader :auto_trim_time, :early_hints, :first_data_timeout,
|
45
|
+
:leak_stack_on_error,
|
46
|
+
:persistent_timeout, :reaping_time
|
47
|
+
|
44
48
|
attr_accessor :app
|
49
|
+
attr_accessor :binder
|
50
|
+
|
51
|
+
def_delegators :@binder, :add_tcp_listener, :add_ssl_listener,
|
52
|
+
:add_unix_listener, :connected_ports
|
45
53
|
|
46
|
-
|
47
|
-
attr_accessor :max_threads
|
48
|
-
attr_accessor :persistent_timeout
|
49
|
-
attr_accessor :auto_trim_time
|
50
|
-
attr_accessor :reaping_time
|
51
|
-
attr_accessor :first_data_timeout
|
54
|
+
THREAD_LOCAL_KEY = :puma_server
|
52
55
|
|
53
56
|
# Create a server for the rack app +app+.
|
54
57
|
#
|
55
|
-
# +
|
56
|
-
#
|
58
|
+
# +log_writer+ is a Puma::LogWriter object used to log info and error messages.
|
59
|
+
#
|
60
|
+
# +events+ is a Puma::Events object used to notify application status events.
|
57
61
|
#
|
58
62
|
# Server#run returns a thread that you can join on to wait for the server
|
59
63
|
# to do its work.
|
60
64
|
#
|
61
|
-
|
65
|
+
# @note Several instance variables exist so they are available for testing,
|
66
|
+
# and have default values set via +fetch+. Normally the values are set via
|
67
|
+
# `::Puma::Configuration.puma_default_options`.
|
68
|
+
#
|
69
|
+
# @note The `events` parameter is set to nil, and set to `Events.new` in code.
|
70
|
+
# Often `options` needs to be passed, but `events` does not. Using nil allows
|
71
|
+
# calling code to not require events.rb.
|
72
|
+
#
|
73
|
+
def initialize(app, events = nil, options = {})
|
62
74
|
@app = app
|
63
|
-
@events = events
|
64
|
-
|
65
|
-
@check, @notify = Puma::Util.pipe
|
75
|
+
@events = events || Events.new
|
66
76
|
|
77
|
+
@check, @notify = nil
|
67
78
|
@status = :stop
|
68
79
|
|
69
|
-
@min_threads = 0
|
70
|
-
@max_threads = 16
|
71
|
-
@auto_trim_time = 30
|
72
|
-
@reaping_time = 1
|
73
|
-
|
74
80
|
@thread = nil
|
75
81
|
@thread_pool = nil
|
76
|
-
@early_hints = nil
|
77
82
|
|
78
|
-
@
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
+
@options = if options.is_a?(UserFileDefaultOptions)
|
84
|
+
options
|
85
|
+
else
|
86
|
+
UserFileDefaultOptions.new(options, Configuration::DEFAULTS)
|
87
|
+
end
|
88
|
+
|
89
|
+
@log_writer = @options.fetch :log_writer, LogWriter.stdio
|
90
|
+
@early_hints = @options[:early_hints]
|
91
|
+
@first_data_timeout = @options[:first_data_timeout]
|
92
|
+
@min_threads = @options[:min_threads]
|
93
|
+
@max_threads = @options[:max_threads]
|
94
|
+
@persistent_timeout = @options[:persistent_timeout]
|
95
|
+
@queue_requests = @options[:queue_requests]
|
96
|
+
@max_fast_inline = @options[:max_fast_inline]
|
97
|
+
@io_selector_backend = @options[:io_selector_backend]
|
98
|
+
@http_content_length_limit = @options[:http_content_length_limit]
|
99
|
+
|
100
|
+
# make this a hash, since we prefer `key?` over `include?`
|
101
|
+
@supported_http_methods =
|
102
|
+
if @options[:supported_http_methods] == :any
|
103
|
+
:any
|
104
|
+
else
|
105
|
+
if (ary = @options[:supported_http_methods])
|
106
|
+
ary
|
107
|
+
else
|
108
|
+
SUPPORTED_HTTP_METHODS
|
109
|
+
end.sort.product([nil]).to_h.freeze
|
110
|
+
end
|
83
111
|
|
84
|
-
|
112
|
+
temp = !!(@options[:environment] =~ /\A(development|test)\z/)
|
113
|
+
@leak_stack_on_error = @options[:environment] ? temp : true
|
85
114
|
|
86
|
-
@
|
87
|
-
@queue_requests = options[:queue_requests].nil? ? true : options[:queue_requests]
|
115
|
+
@binder = Binder.new(log_writer)
|
88
116
|
|
89
117
|
ENV['RACK_ENV'] ||= "development"
|
90
118
|
|
91
119
|
@mode = :http
|
92
120
|
|
93
121
|
@precheck_closing = true
|
94
|
-
end
|
95
|
-
|
96
|
-
attr_accessor :binder, :leak_stack_on_error, :early_hints
|
97
122
|
|
98
|
-
|
99
|
-
|
100
|
-
forward :add_unix_listener, :@binder
|
101
|
-
forward :connected_port, :@binder
|
123
|
+
@requests_count = 0
|
124
|
+
end
|
102
125
|
|
103
126
|
def inherit_binder(bind)
|
104
127
|
@binder = bind
|
105
|
-
@own_binder = false
|
106
128
|
end
|
107
129
|
|
108
|
-
|
109
|
-
|
130
|
+
class << self
|
131
|
+
# @!attribute [r] current
|
132
|
+
def current
|
133
|
+
Thread.current[THREAD_LOCAL_KEY]
|
134
|
+
end
|
135
|
+
|
136
|
+
# :nodoc:
|
137
|
+
# @version 5.0.0
|
138
|
+
def tcp_cork_supported?
|
139
|
+
Socket.const_defined?(:TCP_CORK) && Socket.const_defined?(:IPPROTO_TCP)
|
140
|
+
end
|
141
|
+
|
142
|
+
# :nodoc:
|
143
|
+
# @version 5.0.0
|
144
|
+
def closed_socket_supported?
|
145
|
+
Socket.const_defined?(:TCP_INFO) && Socket.const_defined?(:IPPROTO_TCP)
|
146
|
+
end
|
147
|
+
private :tcp_cork_supported?
|
148
|
+
private :closed_socket_supported?
|
110
149
|
end
|
111
150
|
|
112
151
|
# On Linux, use TCP_CORK to better control how the TCP stack
|
113
152
|
# packetizes our stream. This improves both latency and throughput.
|
153
|
+
# socket parameter may be an MiniSSL::Socket, so use to_io
|
114
154
|
#
|
115
|
-
if
|
116
|
-
UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze
|
117
|
-
|
155
|
+
if tcp_cork_supported?
|
118
156
|
# 6 == Socket::IPPROTO_TCP
|
119
157
|
# 3 == TCP_CORK
|
120
158
|
# 1/0 == turn on/off
|
121
159
|
def cork_socket(socket)
|
160
|
+
skt = socket.to_io
|
122
161
|
begin
|
123
|
-
|
162
|
+
skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 1) if skt.kind_of? TCPSocket
|
124
163
|
rescue IOError, SystemCallError
|
125
|
-
|
164
|
+
Puma::Util.purge_interrupt_queue
|
126
165
|
end
|
127
166
|
end
|
128
167
|
|
129
168
|
def uncork_socket(socket)
|
169
|
+
skt = socket.to_io
|
130
170
|
begin
|
131
|
-
|
171
|
+
skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 0) if skt.kind_of? TCPSocket
|
132
172
|
rescue IOError, SystemCallError
|
133
|
-
|
173
|
+
Puma::Util.purge_interrupt_queue
|
134
174
|
end
|
135
175
|
end
|
176
|
+
else
|
177
|
+
def cork_socket(socket)
|
178
|
+
end
|
179
|
+
|
180
|
+
def uncork_socket(socket)
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
if closed_socket_supported?
|
185
|
+
UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze
|
136
186
|
|
137
187
|
def closed_socket?(socket)
|
138
|
-
|
139
|
-
return false unless @precheck_closing
|
188
|
+
skt = socket.to_io
|
189
|
+
return false unless skt.kind_of?(TCPSocket) && @precheck_closing
|
140
190
|
|
141
191
|
begin
|
142
|
-
tcp_info =
|
192
|
+
tcp_info = skt.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_INFO)
|
143
193
|
rescue IOError, SystemCallError
|
144
|
-
|
194
|
+
Puma::Util.purge_interrupt_queue
|
145
195
|
@precheck_closing = false
|
146
196
|
false
|
147
197
|
else
|
@@ -151,23 +201,19 @@ module Puma
|
|
151
201
|
end
|
152
202
|
end
|
153
203
|
else
|
154
|
-
def cork_socket(socket)
|
155
|
-
end
|
156
|
-
|
157
|
-
def uncork_socket(socket)
|
158
|
-
end
|
159
|
-
|
160
204
|
def closed_socket?(socket)
|
161
205
|
false
|
162
206
|
end
|
163
207
|
end
|
164
208
|
|
209
|
+
# @!attribute [r] backlog
|
165
210
|
def backlog
|
166
|
-
@thread_pool
|
211
|
+
@thread_pool&.backlog
|
167
212
|
end
|
168
213
|
|
214
|
+
# @!attribute [r] running
|
169
215
|
def running
|
170
|
-
@thread_pool
|
216
|
+
@thread_pool&.spawned
|
171
217
|
end
|
172
218
|
|
173
219
|
|
@@ -178,262 +224,162 @@ module Puma
|
|
178
224
|
# there are 5 threads sitting idle ready to take
|
179
225
|
# a request. If one request comes in, then the
|
180
226
|
# value would be 4 until it finishes processing.
|
227
|
+
# @!attribute [r] pool_capacity
|
181
228
|
def pool_capacity
|
182
|
-
@thread_pool
|
229
|
+
@thread_pool&.pool_capacity
|
183
230
|
end
|
184
231
|
|
185
|
-
# Lopez Mode == raw tcp apps
|
186
|
-
|
187
|
-
def run_lopez_mode(background=true)
|
188
|
-
@thread_pool = ThreadPool.new(@min_threads,
|
189
|
-
@max_threads,
|
190
|
-
Hash) do |client, tl|
|
191
|
-
|
192
|
-
io = client.to_io
|
193
|
-
addr = io.peeraddr.last
|
194
|
-
|
195
|
-
if addr.empty?
|
196
|
-
# Set unix socket addrs to localhost
|
197
|
-
addr = "127.0.0.1:0"
|
198
|
-
else
|
199
|
-
addr = "#{addr}:#{io.peeraddr[1]}"
|
200
|
-
end
|
201
|
-
|
202
|
-
env = { 'thread' => tl, REMOTE_ADDR => addr }
|
203
|
-
|
204
|
-
begin
|
205
|
-
@app.call env, client.to_io
|
206
|
-
rescue Object => e
|
207
|
-
STDERR.puts "! Detected exception at toplevel: #{e.message} (#{e.class})"
|
208
|
-
STDERR.puts e.backtrace
|
209
|
-
end
|
210
|
-
|
211
|
-
client.close unless env['detach']
|
212
|
-
end
|
213
|
-
|
214
|
-
@events.fire :state, :running
|
215
|
-
|
216
|
-
if background
|
217
|
-
@thread = Thread.new { handle_servers_lopez_mode }
|
218
|
-
return @thread
|
219
|
-
else
|
220
|
-
handle_servers_lopez_mode
|
221
|
-
end
|
222
|
-
end
|
223
|
-
|
224
|
-
def handle_servers_lopez_mode
|
225
|
-
begin
|
226
|
-
check = @check
|
227
|
-
sockets = [check] + @binder.ios
|
228
|
-
pool = @thread_pool
|
229
|
-
|
230
|
-
while @status == :run
|
231
|
-
begin
|
232
|
-
ios = IO.select sockets
|
233
|
-
ios.first.each do |sock|
|
234
|
-
if sock == check
|
235
|
-
break if handle_check
|
236
|
-
else
|
237
|
-
begin
|
238
|
-
if io = sock.accept_nonblock
|
239
|
-
client = Client.new io, nil
|
240
|
-
pool << client
|
241
|
-
end
|
242
|
-
rescue SystemCallError
|
243
|
-
# nothing
|
244
|
-
rescue Errno::ECONNABORTED
|
245
|
-
# client closed the socket even before accept
|
246
|
-
begin
|
247
|
-
io.close
|
248
|
-
rescue
|
249
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
250
|
-
end
|
251
|
-
end
|
252
|
-
end
|
253
|
-
end
|
254
|
-
rescue Object => e
|
255
|
-
@events.unknown_error self, e, "Listen loop"
|
256
|
-
end
|
257
|
-
end
|
258
|
-
|
259
|
-
@events.fire :state, @status
|
260
|
-
|
261
|
-
graceful_shutdown if @status == :stop || @status == :restart
|
262
|
-
|
263
|
-
rescue Exception => e
|
264
|
-
STDERR.puts "Exception handling servers: #{e.message} (#{e.class})"
|
265
|
-
STDERR.puts e.backtrace
|
266
|
-
ensure
|
267
|
-
begin
|
268
|
-
@check.close
|
269
|
-
rescue
|
270
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
271
|
-
end
|
272
|
-
|
273
|
-
@notify.close
|
274
|
-
|
275
|
-
if @status != :restart and @own_binder
|
276
|
-
@binder.close
|
277
|
-
end
|
278
|
-
end
|
279
|
-
|
280
|
-
@events.fire :state, :done
|
281
|
-
end
|
282
232
|
# Runs the server.
|
283
233
|
#
|
284
234
|
# If +background+ is true (the default) then a thread is spun
|
285
235
|
# up in the background to handle requests. Otherwise requests
|
286
236
|
# are handled synchronously.
|
287
237
|
#
|
288
|
-
def run(background=true)
|
238
|
+
def run(background=true, thread_name: 'srv')
|
289
239
|
BasicSocket.do_not_reverse_lookup = true
|
290
240
|
|
291
241
|
@events.fire :state, :booting
|
292
242
|
|
293
243
|
@status = :run
|
294
244
|
|
295
|
-
|
296
|
-
return run_lopez_mode(background)
|
297
|
-
end
|
298
|
-
|
299
|
-
queue_requests = @queue_requests
|
245
|
+
@thread_pool = ThreadPool.new(thread_name, @options) { |client| process_client client }
|
300
246
|
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
# Advertise this server into the thread
|
306
|
-
Thread.current[ThreadLocalKey] = self
|
307
|
-
|
308
|
-
process_now = false
|
309
|
-
|
310
|
-
begin
|
311
|
-
if queue_requests
|
312
|
-
process_now = client.eagerly_finish
|
313
|
-
else
|
314
|
-
client.finish
|
315
|
-
process_now = true
|
316
|
-
end
|
317
|
-
rescue MiniSSL::SSLError => e
|
318
|
-
ssl_socket = client.io
|
319
|
-
addr = ssl_socket.peeraddr.last
|
320
|
-
cert = ssl_socket.peercert
|
321
|
-
|
322
|
-
client.close
|
323
|
-
|
324
|
-
@events.ssl_error self, addr, cert, e
|
325
|
-
rescue HttpParserError => e
|
326
|
-
client.write_400
|
327
|
-
client.close
|
328
|
-
|
329
|
-
@events.parse_error self, client.env, e
|
330
|
-
rescue ConnectionError, EOFError
|
331
|
-
client.close
|
332
|
-
else
|
333
|
-
if process_now
|
334
|
-
process_client client, buffer
|
335
|
-
else
|
336
|
-
client.set_timeout @first_data_timeout
|
337
|
-
@reactor.add client
|
338
|
-
end
|
339
|
-
end
|
247
|
+
if @queue_requests
|
248
|
+
@reactor = Reactor.new(@io_selector_backend) { |c| reactor_wakeup c }
|
249
|
+
@reactor.run
|
340
250
|
end
|
341
251
|
|
342
|
-
@thread_pool.clean_thread_locals = @options[:clean_thread_locals]
|
343
252
|
|
344
|
-
if
|
345
|
-
|
346
|
-
@reactor.run_in_thread
|
347
|
-
end
|
253
|
+
@thread_pool.auto_reap! if @options[:reaping_time]
|
254
|
+
@thread_pool.auto_trim! if @options[:auto_trim_time]
|
348
255
|
|
349
|
-
|
350
|
-
@thread_pool.auto_reap!(@reaping_time)
|
351
|
-
end
|
352
|
-
|
353
|
-
if @auto_trim_time
|
354
|
-
@thread_pool.auto_trim!(@auto_trim_time)
|
355
|
-
end
|
256
|
+
@check, @notify = Puma::Util.pipe unless @notify
|
356
257
|
|
357
258
|
@events.fire :state, :running
|
358
259
|
|
359
260
|
if background
|
360
|
-
@thread = Thread.new
|
261
|
+
@thread = Thread.new do
|
262
|
+
Puma.set_thread_name thread_name
|
263
|
+
handle_servers
|
264
|
+
end
|
361
265
|
return @thread
|
362
266
|
else
|
363
267
|
handle_servers
|
364
268
|
end
|
365
269
|
end
|
366
270
|
|
271
|
+
# This method is called from the Reactor thread when a queued Client receives data,
|
272
|
+
# times out, or when the Reactor is shutting down.
|
273
|
+
#
|
274
|
+
# It is responsible for ensuring that a request has been completely received
|
275
|
+
# before it starts to be processed by the ThreadPool. This may be known as read buffering.
|
276
|
+
# If read buffering is not done, and no other read buffering is performed (such as by an application server
|
277
|
+
# such as nginx) then the application would be subject to a slow client attack.
|
278
|
+
#
|
279
|
+
# For a graphical representation of how the request buffer works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
|
280
|
+
#
|
281
|
+
# The method checks to see if it has the full header and body with
|
282
|
+
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
|
283
|
+
# then the request is passed to the ThreadPool (`@thread_pool << client`)
|
284
|
+
# so that a "worker thread" can pick up the request and begin to execute application logic.
|
285
|
+
# The Client is then removed from the reactor (return `true`).
|
286
|
+
#
|
287
|
+
# If a client object times out, a 408 response is written, its connection is closed,
|
288
|
+
# and the object is removed from the reactor (return `true`).
|
289
|
+
#
|
290
|
+
# If the Reactor is shutting down, all Clients are either timed out or passed to the
|
291
|
+
# ThreadPool, depending on their current state (#can_close?).
|
292
|
+
#
|
293
|
+
# Otherwise, if the full request is not ready then the client will remain in the reactor
|
294
|
+
# (return `false`). When the client sends more data to the socket the `Puma::Client` object
|
295
|
+
# will wake up and again be checked to see if it's ready to be passed to the thread pool.
|
296
|
+
def reactor_wakeup(client)
|
297
|
+
shutdown = !@queue_requests
|
298
|
+
if client.try_to_finish || (shutdown && !client.can_close?)
|
299
|
+
@thread_pool << client
|
300
|
+
elsif shutdown || client.timeout == 0
|
301
|
+
client.timeout!
|
302
|
+
else
|
303
|
+
client.set_timeout(@first_data_timeout)
|
304
|
+
false
|
305
|
+
end
|
306
|
+
rescue StandardError => e
|
307
|
+
client_error(e, client)
|
308
|
+
client.close
|
309
|
+
true
|
310
|
+
end
|
311
|
+
|
367
312
|
def handle_servers
|
368
313
|
begin
|
369
314
|
check = @check
|
370
315
|
sockets = [check] + @binder.ios
|
371
316
|
pool = @thread_pool
|
372
317
|
queue_requests = @queue_requests
|
318
|
+
drain = @options[:drain_on_shutdown] ? 0 : nil
|
373
319
|
|
374
|
-
|
375
|
-
remote_addr_header = nil
|
376
|
-
|
377
|
-
case @options[:remote_address]
|
320
|
+
addr_send_name, addr_value = case @options[:remote_address]
|
378
321
|
when :value
|
379
|
-
|
322
|
+
[:peerip=, @options[:remote_address_value]]
|
380
323
|
when :header
|
381
|
-
remote_addr_header
|
324
|
+
[:remote_addr_header=, @options[:remote_address_header]]
|
325
|
+
when :proxy_protocol
|
326
|
+
[:expect_proxy_proto=, @options[:remote_address_proxy_protocol]]
|
327
|
+
else
|
328
|
+
[nil, nil]
|
382
329
|
end
|
383
330
|
|
384
|
-
while @status == :run
|
331
|
+
while @status == :run || (drain && shutting_down?)
|
385
332
|
begin
|
386
|
-
ios = IO.select sockets
|
333
|
+
ios = IO.select sockets, nil, nil, (shutting_down? ? 0 : nil)
|
334
|
+
break unless ios
|
387
335
|
ios.first.each do |sock|
|
388
336
|
if sock == check
|
389
337
|
break if handle_check
|
390
338
|
else
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
end
|
399
|
-
|
400
|
-
pool << client
|
401
|
-
pool.wait_until_not_full
|
402
|
-
end
|
403
|
-
rescue SystemCallError
|
404
|
-
# nothing
|
405
|
-
rescue Errno::ECONNABORTED
|
406
|
-
# client closed the socket even before accept
|
407
|
-
begin
|
408
|
-
io.close
|
409
|
-
rescue
|
410
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
411
|
-
end
|
339
|
+
pool.wait_until_not_full
|
340
|
+
pool.wait_for_less_busy_worker(@options[:wait_for_less_busy_worker])
|
341
|
+
|
342
|
+
io = begin
|
343
|
+
sock.accept_nonblock
|
344
|
+
rescue IO::WaitReadable
|
345
|
+
next
|
412
346
|
end
|
347
|
+
drain += 1 if shutting_down?
|
348
|
+
pool << Client.new(io, @binder.env(sock)).tap { |c|
|
349
|
+
c.listener = sock
|
350
|
+
c.http_content_length_limit = @http_content_length_limit
|
351
|
+
c.send(addr_send_name, addr_value) if addr_value
|
352
|
+
}
|
413
353
|
end
|
414
354
|
end
|
415
|
-
rescue
|
416
|
-
|
355
|
+
rescue IOError, Errno::EBADF
|
356
|
+
# In the case that any of the sockets are unexpectedly close.
|
357
|
+
raise
|
358
|
+
rescue StandardError => e
|
359
|
+
@log_writer.unknown_error e, nil, "Listen loop"
|
417
360
|
end
|
418
361
|
end
|
419
362
|
|
363
|
+
@log_writer.debug "Drained #{drain} additional connections." if drain
|
420
364
|
@events.fire :state, @status
|
421
365
|
|
422
|
-
graceful_shutdown if @status == :stop || @status == :restart
|
423
366
|
if queue_requests
|
424
|
-
@
|
367
|
+
@queue_requests = false
|
425
368
|
@reactor.shutdown
|
426
369
|
end
|
370
|
+
graceful_shutdown if @status == :stop || @status == :restart
|
427
371
|
rescue Exception => e
|
428
|
-
|
429
|
-
STDERR.puts e.backtrace
|
372
|
+
@log_writer.unknown_error e, nil, "Exception handling servers"
|
430
373
|
ensure
|
431
|
-
|
432
|
-
@notify.
|
433
|
-
|
434
|
-
|
435
|
-
|
374
|
+
# Errno::EBADF is infrequently raised
|
375
|
+
[@check, @notify].each do |io|
|
376
|
+
begin
|
377
|
+
io.close unless io.closed?
|
378
|
+
rescue Errno::EBADF
|
379
|
+
end
|
436
380
|
end
|
381
|
+
@notify = nil
|
382
|
+
@check = nil
|
437
383
|
end
|
438
384
|
|
439
385
|
@events.fire :state, :done
|
@@ -455,498 +401,147 @@ module Puma
|
|
455
401
|
return true
|
456
402
|
end
|
457
403
|
|
458
|
-
|
404
|
+
false
|
459
405
|
end
|
460
406
|
|
461
|
-
# Given a connection on +client+, handle the incoming requests
|
407
|
+
# Given a connection on +client+, handle the incoming requests,
|
408
|
+
# or queue the connection in the Reactor if no request is available.
|
462
409
|
#
|
463
|
-
# This method
|
410
|
+
# This method is called from a ThreadPool worker thread.
|
411
|
+
#
|
412
|
+
# This method supports HTTP Keep-Alive so it may, depending on if the client
|
464
413
|
# indicates that it supports keep alive, wait for another request before
|
465
414
|
# returning.
|
466
415
|
#
|
467
|
-
|
416
|
+
# Return true if one or more requests were processed.
|
417
|
+
def process_client(client)
|
418
|
+
# Advertise this server into the thread
|
419
|
+
Thread.current[THREAD_LOCAL_KEY] = self
|
420
|
+
|
421
|
+
clean_thread_locals = @options[:clean_thread_locals]
|
422
|
+
close_socket = true
|
423
|
+
|
424
|
+
requests = 0
|
425
|
+
|
468
426
|
begin
|
427
|
+
if @queue_requests &&
|
428
|
+
!client.eagerly_finish
|
469
429
|
|
470
|
-
|
471
|
-
|
430
|
+
client.set_timeout(@first_data_timeout)
|
431
|
+
if @reactor.add client
|
432
|
+
close_socket = false
|
433
|
+
return false
|
434
|
+
end
|
435
|
+
end
|
472
436
|
|
473
|
-
|
437
|
+
with_force_shutdown(client) do
|
438
|
+
client.finish(@first_data_timeout)
|
439
|
+
end
|
474
440
|
|
475
441
|
while true
|
476
|
-
|
442
|
+
@requests_count += 1
|
443
|
+
case handle_request(client, requests + 1)
|
477
444
|
when false
|
478
|
-
|
445
|
+
break
|
479
446
|
when :async
|
480
447
|
close_socket = false
|
481
|
-
|
448
|
+
break
|
482
449
|
when true
|
483
|
-
return unless @queue_requests
|
484
|
-
buffer.reset
|
485
|
-
|
486
450
|
ThreadPool.clean_thread_locals if clean_thread_locals
|
487
451
|
|
488
452
|
requests += 1
|
489
453
|
|
490
|
-
|
454
|
+
# As an optimization, try to read the next request from the
|
455
|
+
# socket for a short time before returning to the reactor.
|
456
|
+
fast_check = @status == :run
|
457
|
+
|
458
|
+
# Always pass the client back to the reactor after a reasonable
|
459
|
+
# number of inline requests if there are other requests pending.
|
460
|
+
fast_check = false if requests >= @max_fast_inline &&
|
461
|
+
@thread_pool.backlog > 0
|
491
462
|
|
492
|
-
|
493
|
-
|
494
|
-
# has buffered and won't try to read more data. What this means is that
|
495
|
-
# every client, independent of their request speed, gets treated like a slow
|
496
|
-
# one once every MAX_FAST_INLINE requests.
|
497
|
-
check_for_more_data = false
|
463
|
+
next_request_ready = with_force_shutdown(client) do
|
464
|
+
client.reset(fast_check)
|
498
465
|
end
|
499
466
|
|
500
|
-
unless
|
501
|
-
|
467
|
+
unless next_request_ready
|
468
|
+
break unless @queue_requests
|
502
469
|
client.set_timeout @persistent_timeout
|
503
|
-
@reactor.add client
|
504
|
-
|
470
|
+
if @reactor.add client
|
471
|
+
close_socket = false
|
472
|
+
break
|
473
|
+
end
|
505
474
|
end
|
506
475
|
end
|
507
476
|
end
|
508
|
-
|
509
|
-
# The client disconnected while we were reading data
|
510
|
-
rescue ConnectionError
|
511
|
-
# Swallow them. The ensure tries to close +client+ down
|
512
|
-
|
513
|
-
# SSL handshake error
|
514
|
-
rescue MiniSSL::SSLError => e
|
515
|
-
lowlevel_error(e, client.env)
|
516
|
-
|
517
|
-
ssl_socket = client.io
|
518
|
-
addr = ssl_socket.peeraddr.last
|
519
|
-
cert = ssl_socket.peercert
|
520
|
-
|
521
|
-
close_socket = true
|
522
|
-
|
523
|
-
@events.ssl_error self, addr, cert, e
|
524
|
-
|
525
|
-
# The client doesn't know HTTP well
|
526
|
-
rescue HttpParserError => e
|
527
|
-
lowlevel_error(e, client.env)
|
528
|
-
|
529
|
-
client.write_400
|
530
|
-
|
531
|
-
@events.parse_error self, client.env, e
|
532
|
-
|
533
|
-
# Server error
|
477
|
+
true
|
534
478
|
rescue StandardError => e
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
@events.unknown_error self, e, "Read"
|
540
|
-
|
479
|
+
client_error(e, client)
|
480
|
+
# The ensure tries to close +client+ down
|
481
|
+
requests > 0
|
541
482
|
ensure
|
542
|
-
|
483
|
+
client.io_buffer.reset
|
543
484
|
|
544
485
|
begin
|
545
486
|
client.close if close_socket
|
546
487
|
rescue IOError, SystemCallError
|
547
|
-
|
488
|
+
Puma::Util.purge_interrupt_queue
|
548
489
|
# Already closed
|
549
490
|
rescue StandardError => e
|
550
|
-
@
|
491
|
+
@log_writer.unknown_error e, nil, "Client"
|
551
492
|
end
|
552
493
|
end
|
553
494
|
end
|
554
495
|
|
555
|
-
#
|
556
|
-
#
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
env[SERVER_NAME] = host[0, colon]
|
562
|
-
env[SERVER_PORT] = host[colon+1, host.bytesize]
|
563
|
-
else
|
564
|
-
env[SERVER_NAME] = host
|
565
|
-
env[SERVER_PORT] = default_server_port(env)
|
566
|
-
end
|
567
|
-
else
|
568
|
-
env[SERVER_NAME] = LOCALHOST
|
569
|
-
env[SERVER_PORT] = default_server_port(env)
|
570
|
-
end
|
571
|
-
|
572
|
-
unless env[REQUEST_PATH]
|
573
|
-
# it might be a dumbass full host request header
|
574
|
-
uri = URI.parse(env[REQUEST_URI])
|
575
|
-
env[REQUEST_PATH] = uri.path
|
576
|
-
|
577
|
-
raise "No REQUEST PATH" unless env[REQUEST_PATH]
|
578
|
-
|
579
|
-
# A nil env value will cause a LintError (and fatal errors elsewhere),
|
580
|
-
# so only set the env value if there actually is a value.
|
581
|
-
env[QUERY_STRING] = uri.query if uri.query
|
582
|
-
end
|
583
|
-
|
584
|
-
env[PATH_INFO] = env[REQUEST_PATH]
|
585
|
-
|
586
|
-
# From http://www.ietf.org/rfc/rfc3875 :
|
587
|
-
# "Script authors should be aware that the REMOTE_ADDR and
|
588
|
-
# REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
|
589
|
-
# may not identify the ultimate source of the request.
|
590
|
-
# They identify the client for the immediate request to the
|
591
|
-
# server; that client may be a proxy, gateway, or other
|
592
|
-
# intermediary acting on behalf of the actual source client."
|
593
|
-
#
|
594
|
-
|
595
|
-
unless env.key?(REMOTE_ADDR)
|
596
|
-
begin
|
597
|
-
addr = client.peerip
|
598
|
-
rescue Errno::ENOTCONN
|
599
|
-
# Client disconnects can result in an inability to get the
|
600
|
-
# peeraddr from the socket; default to localhost.
|
601
|
-
addr = LOCALHOST_IP
|
602
|
-
end
|
603
|
-
|
604
|
-
# Set unix socket addrs to localhost
|
605
|
-
addr = LOCALHOST_IP if addr.empty?
|
606
|
-
|
607
|
-
env[REMOTE_ADDR] = addr
|
608
|
-
end
|
609
|
-
end
|
610
|
-
|
611
|
-
def default_server_port(env)
|
612
|
-
return PORT_443 if env[HTTPS_KEY] == 'on' || env[HTTPS_KEY] == 'https'
|
613
|
-
env['HTTP_X_FORWARDED_PROTO'] == 'https' ? PORT_443 : PORT_80
|
496
|
+
# Triggers a client timeout if the thread-pool shuts down
|
497
|
+
# during execution of the provided block.
|
498
|
+
def with_force_shutdown(client, &block)
|
499
|
+
@thread_pool.with_force_shutdown(&block)
|
500
|
+
rescue ThreadPool::ForceShutdown
|
501
|
+
client.timeout!
|
614
502
|
end
|
615
503
|
|
616
|
-
#
|
617
|
-
|
618
|
-
#
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
env[PUMA_SOCKET] = client
|
634
|
-
|
635
|
-
if env[HTTPS_KEY] && client.peercert
|
636
|
-
env[PUMA_PEERCERT] = client.peercert
|
637
|
-
end
|
638
|
-
|
639
|
-
env[HIJACK_P] = true
|
640
|
-
env[HIJACK] = req
|
641
|
-
|
642
|
-
body = req.body
|
643
|
-
|
644
|
-
head = env[REQUEST_METHOD] == HEAD
|
645
|
-
|
646
|
-
env[RACK_INPUT] = body
|
647
|
-
env[RACK_URL_SCHEME] = env[HTTPS_KEY] ? HTTPS : HTTP
|
648
|
-
|
649
|
-
if @early_hints
|
650
|
-
env[EARLY_HINTS] = lambda { |headers|
|
651
|
-
fast_write client, "HTTP/1.1 103 Early Hints\r\n".freeze
|
652
|
-
|
653
|
-
headers.each_pair do |k, vs|
|
654
|
-
if vs.respond_to?(:to_s) && !vs.to_s.empty?
|
655
|
-
vs.to_s.split(NEWLINE).each do |v|
|
656
|
-
next if possible_header_injection?(v)
|
657
|
-
fast_write client, "#{k}: #{v}\r\n"
|
658
|
-
end
|
659
|
-
else
|
660
|
-
fast_write client, "#{k}: #{vs}\r\n"
|
661
|
-
end
|
662
|
-
end
|
663
|
-
|
664
|
-
fast_write client, "\r\n".freeze
|
665
|
-
}
|
666
|
-
end
|
667
|
-
|
668
|
-
# Fixup any headers with , in the name to have _ now. We emit
|
669
|
-
# headers with , in them during the parse phase to avoid ambiguity
|
670
|
-
# with the - to _ conversion for critical headers. But here for
|
671
|
-
# compatibility, we'll convert them back. This code is written to
|
672
|
-
# avoid allocation in the common case (ie there are no headers
|
673
|
-
# with , in their names), that's why it has the extra conditionals.
|
674
|
-
|
675
|
-
to_delete = nil
|
676
|
-
to_add = nil
|
677
|
-
|
678
|
-
env.each do |k,v|
|
679
|
-
if k.start_with?("HTTP_") and k.include?(",") and k != "HTTP_TRANSFER,ENCODING"
|
680
|
-
if to_delete
|
681
|
-
to_delete << k
|
682
|
-
else
|
683
|
-
to_delete = [k]
|
684
|
-
end
|
685
|
-
|
686
|
-
unless to_add
|
687
|
-
to_add = {}
|
688
|
-
end
|
689
|
-
|
690
|
-
to_add[k.gsub(",", "_")] = v
|
691
|
-
end
|
692
|
-
end
|
693
|
-
|
694
|
-
if to_delete
|
695
|
-
to_delete.each { |k| env.delete(k) }
|
696
|
-
env.merge! to_add
|
697
|
-
end
|
698
|
-
|
699
|
-
# A rack extension. If the app writes #call'ables to this
|
700
|
-
# array, we will invoke them when the request is done.
|
701
|
-
#
|
702
|
-
after_reply = env[RACK_AFTER_REPLY] = []
|
703
|
-
|
704
|
-
begin
|
705
|
-
begin
|
706
|
-
status, headers, res_body = @app.call(env)
|
707
|
-
|
708
|
-
return :async if req.hijacked
|
709
|
-
|
710
|
-
status = status.to_i
|
711
|
-
|
712
|
-
if status == -1
|
713
|
-
unless headers.empty? and res_body == []
|
714
|
-
raise "async response must have empty headers and body"
|
715
|
-
end
|
716
|
-
|
717
|
-
return :async
|
718
|
-
end
|
719
|
-
rescue ThreadPool::ForceShutdown => e
|
720
|
-
@events.log "Detected force shutdown of a thread, returning 503"
|
721
|
-
@events.unknown_error self, e, "Rack app"
|
722
|
-
|
723
|
-
status = 503
|
724
|
-
headers = {}
|
725
|
-
res_body = ["Request was internally terminated early\n"]
|
726
|
-
|
727
|
-
rescue Exception => e
|
728
|
-
@events.unknown_error self, e, "Rack app", env
|
729
|
-
|
730
|
-
status, headers, res_body = lowlevel_error(e, env)
|
731
|
-
end
|
732
|
-
|
733
|
-
content_length = nil
|
734
|
-
no_body = head
|
735
|
-
|
736
|
-
if res_body.kind_of? Array and res_body.size == 1
|
737
|
-
content_length = res_body[0].bytesize
|
738
|
-
end
|
739
|
-
|
740
|
-
cork_socket client
|
741
|
-
|
742
|
-
line_ending = LINE_END
|
743
|
-
colon = COLON
|
744
|
-
|
745
|
-
http_11 = if env[HTTP_VERSION] == HTTP_11
|
746
|
-
allow_chunked = true
|
747
|
-
keep_alive = env.fetch(HTTP_CONNECTION, "").downcase != CLOSE
|
748
|
-
include_keepalive_header = false
|
749
|
-
|
750
|
-
# An optimization. The most common response is 200, so we can
|
751
|
-
# reply with the proper 200 status without having to compute
|
752
|
-
# the response header.
|
753
|
-
#
|
754
|
-
if status == 200
|
755
|
-
lines << HTTP_11_200
|
756
|
-
else
|
757
|
-
lines.append "HTTP/1.1 ", status.to_s, " ",
|
758
|
-
fetch_status_code(status), line_ending
|
759
|
-
|
760
|
-
no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
|
761
|
-
end
|
762
|
-
true
|
763
|
-
else
|
764
|
-
allow_chunked = false
|
765
|
-
keep_alive = env.fetch(HTTP_CONNECTION, "").downcase == KEEP_ALIVE
|
766
|
-
include_keepalive_header = keep_alive
|
767
|
-
|
768
|
-
# Same optimization as above for HTTP/1.1
|
769
|
-
#
|
770
|
-
if status == 200
|
771
|
-
lines << HTTP_10_200
|
772
|
-
else
|
773
|
-
lines.append "HTTP/1.0 ", status.to_s, " ",
|
774
|
-
fetch_status_code(status), line_ending
|
775
|
-
|
776
|
-
no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
|
777
|
-
end
|
778
|
-
false
|
779
|
-
end
|
780
|
-
|
781
|
-
response_hijack = nil
|
782
|
-
|
783
|
-
headers.each do |k, vs|
|
784
|
-
case k.downcase
|
785
|
-
when CONTENT_LENGTH2
|
786
|
-
next if possible_header_injection?(vs)
|
787
|
-
content_length = vs
|
788
|
-
next
|
789
|
-
when TRANSFER_ENCODING
|
790
|
-
allow_chunked = false
|
791
|
-
content_length = nil
|
792
|
-
when HIJACK
|
793
|
-
response_hijack = vs
|
794
|
-
next
|
795
|
-
end
|
796
|
-
|
797
|
-
if vs.respond_to?(:to_s) && !vs.to_s.empty?
|
798
|
-
vs.to_s.split(NEWLINE).each do |v|
|
799
|
-
next if possible_header_injection?(v)
|
800
|
-
lines.append k, colon, v, line_ending
|
801
|
-
end
|
802
|
-
else
|
803
|
-
lines.append k, colon, line_ending
|
804
|
-
end
|
805
|
-
end
|
806
|
-
|
807
|
-
if include_keepalive_header
|
808
|
-
lines << CONNECTION_KEEP_ALIVE
|
809
|
-
elsif http_11 && !keep_alive
|
810
|
-
lines << CONNECTION_CLOSE
|
811
|
-
end
|
812
|
-
|
813
|
-
if no_body
|
814
|
-
if content_length and status != 204
|
815
|
-
lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
|
816
|
-
end
|
817
|
-
|
818
|
-
lines << line_ending
|
819
|
-
fast_write client, lines.to_s
|
820
|
-
return keep_alive
|
821
|
-
end
|
822
|
-
|
823
|
-
if content_length
|
824
|
-
lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
|
825
|
-
chunked = false
|
826
|
-
elsif !response_hijack and allow_chunked
|
827
|
-
lines << TRANSFER_ENCODING_CHUNKED
|
828
|
-
chunked = true
|
829
|
-
end
|
830
|
-
|
831
|
-
lines << line_ending
|
832
|
-
|
833
|
-
fast_write client, lines.to_s
|
834
|
-
|
835
|
-
if response_hijack
|
836
|
-
response_hijack.call client
|
837
|
-
return :async
|
838
|
-
end
|
839
|
-
|
840
|
-
begin
|
841
|
-
res_body.each do |part|
|
842
|
-
next if part.bytesize.zero?
|
843
|
-
if chunked
|
844
|
-
fast_write client, part.bytesize.to_s(16)
|
845
|
-
fast_write client, line_ending
|
846
|
-
fast_write client, part
|
847
|
-
fast_write client, line_ending
|
848
|
-
else
|
849
|
-
fast_write client, part
|
850
|
-
end
|
851
|
-
|
852
|
-
client.flush
|
853
|
-
end
|
854
|
-
|
855
|
-
if chunked
|
856
|
-
fast_write client, CLOSE_CHUNKED
|
857
|
-
client.flush
|
858
|
-
end
|
859
|
-
rescue SystemCallError, IOError
|
860
|
-
raise ConnectionError, "Connection error detected during write"
|
861
|
-
end
|
862
|
-
|
863
|
-
ensure
|
864
|
-
uncork_socket client
|
865
|
-
|
866
|
-
body.close
|
867
|
-
req.tempfile.unlink if req.tempfile
|
868
|
-
res_body.close if res_body.respond_to? :close
|
869
|
-
|
870
|
-
after_reply.each { |o| o.call }
|
871
|
-
end
|
872
|
-
|
873
|
-
return keep_alive
|
874
|
-
end
|
875
|
-
|
876
|
-
def fetch_status_code(status)
|
877
|
-
HTTP_STATUS_CODES.fetch(status) { 'CUSTOM' }
|
878
|
-
end
|
879
|
-
private :fetch_status_code
|
880
|
-
|
881
|
-
# Given the request +env+ from +client+ and the partial body +body+
|
882
|
-
# plus a potential Content-Length value +cl+, finish reading
|
883
|
-
# the body and return it.
|
884
|
-
#
|
885
|
-
# If the body is larger than MAX_BODY, a Tempfile object is used
|
886
|
-
# for the body, otherwise a StringIO is used.
|
887
|
-
#
|
888
|
-
def read_body(env, client, body, cl)
|
889
|
-
content_length = cl.to_i
|
890
|
-
|
891
|
-
remain = content_length - body.bytesize
|
892
|
-
|
893
|
-
return StringIO.new(body) if remain <= 0
|
894
|
-
|
895
|
-
# Use a Tempfile if there is a lot of data left
|
896
|
-
if remain > MAX_BODY
|
897
|
-
stream = Tempfile.new(Const::PUMA_TMP_BASE)
|
898
|
-
stream.binmode
|
504
|
+
# :nocov:
|
505
|
+
|
506
|
+
# Handle various error types thrown by Client I/O operations.
|
507
|
+
def client_error(e, client)
|
508
|
+
# Swallow, do not log
|
509
|
+
return if [ConnectionError, EOFError].include?(e.class)
|
510
|
+
|
511
|
+
lowlevel_error(e, client.env)
|
512
|
+
case e
|
513
|
+
when MiniSSL::SSLError
|
514
|
+
@log_writer.ssl_error e, client.io
|
515
|
+
when HttpParserError
|
516
|
+
client.write_error(400)
|
517
|
+
@log_writer.parse_error e, client
|
518
|
+
when HttpParserError501
|
519
|
+
client.write_error(501)
|
520
|
+
@log_writer.parse_error e, client
|
899
521
|
else
|
900
|
-
|
901
|
-
|
902
|
-
stream = StringIO.new body[0,0]
|
903
|
-
end
|
904
|
-
|
905
|
-
stream.write body
|
906
|
-
|
907
|
-
# Read an odd sized chunk so we can read even sized ones
|
908
|
-
# after this
|
909
|
-
chunk = client.readpartial(remain % CHUNK_SIZE)
|
910
|
-
|
911
|
-
# No chunk means a closed socket
|
912
|
-
unless chunk
|
913
|
-
stream.close
|
914
|
-
return nil
|
915
|
-
end
|
916
|
-
|
917
|
-
remain -= stream.write(chunk)
|
918
|
-
|
919
|
-
# Raed the rest of the chunks
|
920
|
-
while remain > 0
|
921
|
-
chunk = client.readpartial(CHUNK_SIZE)
|
922
|
-
unless chunk
|
923
|
-
stream.close
|
924
|
-
return nil
|
925
|
-
end
|
926
|
-
|
927
|
-
remain -= stream.write(chunk)
|
522
|
+
client.write_error(500)
|
523
|
+
@log_writer.unknown_error e, nil, "Read"
|
928
524
|
end
|
929
|
-
|
930
|
-
stream.rewind
|
931
|
-
|
932
|
-
return stream
|
933
525
|
end
|
934
526
|
|
935
527
|
# A fallback rack response if +@app+ raises as exception.
|
936
528
|
#
|
937
|
-
def lowlevel_error(e, env)
|
529
|
+
def lowlevel_error(e, env, status=500)
|
938
530
|
if handler = @options[:lowlevel_error_handler]
|
939
531
|
if handler.arity == 1
|
940
532
|
return handler.call(e)
|
941
|
-
|
533
|
+
elsif handler.arity == 2
|
942
534
|
return handler.call(e, env)
|
535
|
+
else
|
536
|
+
return handler.call(e, env, status)
|
943
537
|
end
|
944
538
|
end
|
945
539
|
|
946
540
|
if @leak_stack_on_error
|
947
|
-
|
541
|
+
backtrace = e.backtrace.nil? ? '<no backtrace available>' : e.backtrace.join("\n")
|
542
|
+
[status, {}, ["Puma caught this error: #{e.message} (#{e.class})\n#{backtrace}"]]
|
948
543
|
else
|
949
|
-
[
|
544
|
+
[status, {}, ["An unhandled lowlevel error occurred. The application logs may have details.\n"]]
|
950
545
|
end
|
951
546
|
end
|
952
547
|
|
@@ -968,31 +563,13 @@ module Puma
|
|
968
563
|
$stdout.syswrite "#{pid}: === End thread backtrace dump ===\n"
|
969
564
|
end
|
970
565
|
|
971
|
-
if @
|
972
|
-
|
973
|
-
|
974
|
-
while true
|
975
|
-
ios = IO.select @binder.ios, nil, nil, 0
|
976
|
-
break unless ios
|
977
|
-
|
978
|
-
ios.first.each do |sock|
|
979
|
-
begin
|
980
|
-
if io = sock.accept_nonblock
|
981
|
-
count += 1
|
982
|
-
client = Client.new io, @binder.env(sock)
|
983
|
-
@thread_pool << client
|
984
|
-
end
|
985
|
-
rescue SystemCallError
|
986
|
-
end
|
987
|
-
end
|
988
|
-
end
|
989
|
-
|
990
|
-
@events.debug "Drained #{count} additional connections."
|
566
|
+
if @status != :restart
|
567
|
+
@binder.close
|
991
568
|
end
|
992
569
|
|
993
570
|
if @thread_pool
|
994
571
|
if timeout = @options[:force_shutdown_after]
|
995
|
-
@thread_pool.shutdown timeout.
|
572
|
+
@thread_pool.shutdown timeout.to_f
|
996
573
|
else
|
997
574
|
@thread_pool.shutdown
|
998
575
|
end
|
@@ -1000,18 +577,16 @@ module Puma
|
|
1000
577
|
end
|
1001
578
|
|
1002
579
|
def notify_safely(message)
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
raise e
|
1014
|
-
end
|
580
|
+
@notify << message
|
581
|
+
rescue IOError, NoMethodError, Errno::EPIPE, Errno::EBADF
|
582
|
+
# The server, in another thread, is shutting down
|
583
|
+
Puma::Util.purge_interrupt_queue
|
584
|
+
rescue RuntimeError => e
|
585
|
+
# Temporary workaround for https://bugs.ruby-lang.org/issues/13239
|
586
|
+
if e.message.include?('IOError')
|
587
|
+
Puma::Util.purge_interrupt_queue
|
588
|
+
else
|
589
|
+
raise e
|
1015
590
|
end
|
1016
591
|
end
|
1017
592
|
private :notify_safely
|
@@ -1029,44 +604,24 @@ module Puma
|
|
1029
604
|
@thread.join if @thread && sync
|
1030
605
|
end
|
1031
606
|
|
1032
|
-
def begin_restart
|
607
|
+
def begin_restart(sync=false)
|
1033
608
|
notify_safely(RESTART_COMMAND)
|
1034
|
-
|
1035
|
-
|
1036
|
-
def fast_write(io, str)
|
1037
|
-
n = 0
|
1038
|
-
while true
|
1039
|
-
begin
|
1040
|
-
n = io.syswrite str
|
1041
|
-
rescue Errno::EAGAIN, Errno::EWOULDBLOCK
|
1042
|
-
if !IO.select(nil, [io], nil, WRITE_TIMEOUT)
|
1043
|
-
raise ConnectionError, "Socket timeout writing data"
|
1044
|
-
end
|
1045
|
-
|
1046
|
-
retry
|
1047
|
-
rescue Errno::EPIPE, SystemCallError, IOError
|
1048
|
-
raise ConnectionError, "Socket timeout writing data"
|
1049
|
-
end
|
1050
|
-
|
1051
|
-
return if n == str.bytesize
|
1052
|
-
str = str.byteslice(n..-1)
|
1053
|
-
end
|
1054
|
-
end
|
1055
|
-
private :fast_write
|
1056
|
-
|
1057
|
-
ThreadLocalKey = :puma_server
|
1058
|
-
|
1059
|
-
def self.current
|
1060
|
-
Thread.current[ThreadLocalKey]
|
609
|
+
@thread.join if @thread && sync
|
1061
610
|
end
|
1062
611
|
|
1063
612
|
def shutting_down?
|
1064
613
|
@status == :stop || @status == :restart
|
1065
614
|
end
|
1066
615
|
|
1067
|
-
|
1068
|
-
|
616
|
+
# List of methods invoked by #stats.
|
617
|
+
# @version 5.0.0
|
618
|
+
STAT_METHODS = [:backlog, :running, :pool_capacity, :max_threads, :requests_count].freeze
|
619
|
+
|
620
|
+
# Returns a hash of stats about the running server for reporting purposes.
|
621
|
+
# @version 5.0.0
|
622
|
+
# @!attribute [r] stats
|
623
|
+
def stats
|
624
|
+
STAT_METHODS.map {|name| [name, send(name) || 0]}.to_h
|
1069
625
|
end
|
1070
|
-
private :possible_header_injection?
|
1071
626
|
end
|
1072
627
|
end
|