piesync-puma 3.12.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/History.md +1429 -0
- data/LICENSE +26 -0
- data/README.md +280 -0
- data/bin/puma +10 -0
- data/bin/puma-wild +31 -0
- data/bin/pumactl +12 -0
- data/docs/architecture.md +36 -0
- data/docs/deployment.md +91 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/nginx.md +80 -0
- data/docs/plugins.md +28 -0
- data/docs/restart.md +39 -0
- data/docs/signals.md +96 -0
- data/docs/systemd.md +272 -0
- data/ext/puma_http11/PumaHttp11Service.java +17 -0
- data/ext/puma_http11/ext_help.h +15 -0
- data/ext/puma_http11/extconf.rb +15 -0
- data/ext/puma_http11/http11_parser.c +1071 -0
- data/ext/puma_http11/http11_parser.h +65 -0
- data/ext/puma_http11/http11_parser.java.rl +161 -0
- data/ext/puma_http11/http11_parser.rl +149 -0
- data/ext/puma_http11/http11_parser_common.rl +54 -0
- data/ext/puma_http11/io_buffer.c +155 -0
- data/ext/puma_http11/mini_ssl.c +494 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +234 -0
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +470 -0
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +352 -0
- data/ext/puma_http11/puma_http11.c +500 -0
- data/lib/puma.rb +23 -0
- data/lib/puma/accept_nonblock.rb +23 -0
- data/lib/puma/app/status.rb +74 -0
- data/lib/puma/binder.rb +413 -0
- data/lib/puma/cli.rb +235 -0
- data/lib/puma/client.rb +480 -0
- data/lib/puma/cluster.rb +531 -0
- data/lib/puma/commonlogger.rb +108 -0
- data/lib/puma/compat.rb +14 -0
- data/lib/puma/configuration.rb +361 -0
- data/lib/puma/const.rb +239 -0
- data/lib/puma/control_cli.rb +264 -0
- data/lib/puma/convenient.rb +25 -0
- data/lib/puma/daemon_ext.rb +33 -0
- data/lib/puma/delegation.rb +13 -0
- data/lib/puma/detect.rb +15 -0
- data/lib/puma/dsl.rb +518 -0
- data/lib/puma/events.rb +153 -0
- data/lib/puma/io_buffer.rb +9 -0
- data/lib/puma/java_io_buffer.rb +47 -0
- data/lib/puma/jruby_restart.rb +84 -0
- data/lib/puma/launcher.rb +433 -0
- data/lib/puma/minissl.rb +285 -0
- data/lib/puma/null_io.rb +44 -0
- data/lib/puma/plugin.rb +117 -0
- data/lib/puma/plugin/tmp_restart.rb +34 -0
- data/lib/puma/rack/backports/uri/common_193.rb +33 -0
- data/lib/puma/rack/builder.rb +299 -0
- data/lib/puma/rack/urlmap.rb +91 -0
- data/lib/puma/rack_default.rb +7 -0
- data/lib/puma/reactor.rb +347 -0
- data/lib/puma/runner.rb +184 -0
- data/lib/puma/server.rb +1072 -0
- data/lib/puma/single.rb +123 -0
- data/lib/puma/state_file.rb +31 -0
- data/lib/puma/tcp_logger.rb +41 -0
- data/lib/puma/thread_pool.rb +346 -0
- data/lib/puma/util.rb +129 -0
- data/lib/rack/handler/puma.rb +115 -0
- data/tools/jungle/README.md +19 -0
- data/tools/jungle/init.d/README.md +61 -0
- data/tools/jungle/init.d/puma +421 -0
- data/tools/jungle/init.d/run-puma +18 -0
- data/tools/jungle/rc.d/README.md +74 -0
- data/tools/jungle/rc.d/puma +61 -0
- data/tools/jungle/rc.d/puma.conf +10 -0
- data/tools/jungle/upstart/README.md +61 -0
- data/tools/jungle/upstart/puma-manager.conf +31 -0
- data/tools/jungle/upstart/puma.conf +69 -0
- data/tools/trickletest.rb +45 -0
- metadata +131 -0
data/lib/puma/runner.rb
ADDED
@@ -0,0 +1,184 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'puma/server'
|
4
|
+
require 'puma/const'
|
5
|
+
|
6
|
+
module Puma
|
7
|
+
# Generic class that is used by `Puma::Cluster` and `Puma::Single` to
|
8
|
+
# serve requests. This class spawns a new instance of `Puma::Server` via
|
9
|
+
# a call to `start_server`.
|
10
|
+
class Runner
|
11
|
+
def initialize(cli, events)
|
12
|
+
@launcher = cli
|
13
|
+
@events = events
|
14
|
+
@options = cli.options
|
15
|
+
@app = nil
|
16
|
+
@control = nil
|
17
|
+
end
|
18
|
+
|
19
|
+
def daemon?
|
20
|
+
@options[:daemon]
|
21
|
+
end
|
22
|
+
|
23
|
+
def development?
|
24
|
+
@options[:environment] == "development"
|
25
|
+
end
|
26
|
+
|
27
|
+
def test?
|
28
|
+
@options[:environment] == "test"
|
29
|
+
end
|
30
|
+
|
31
|
+
def log(str)
|
32
|
+
@events.log str
|
33
|
+
end
|
34
|
+
|
35
|
+
def before_restart
|
36
|
+
@control.stop(true) if @control
|
37
|
+
end
|
38
|
+
|
39
|
+
def error(str)
|
40
|
+
@events.error str
|
41
|
+
end
|
42
|
+
|
43
|
+
def debug(str)
|
44
|
+
@events.log "- #{str}" if @options[:debug]
|
45
|
+
end
|
46
|
+
|
47
|
+
def start_control
|
48
|
+
str = @options[:control_url]
|
49
|
+
return unless str
|
50
|
+
|
51
|
+
require 'puma/app/status'
|
52
|
+
|
53
|
+
uri = URI.parse str
|
54
|
+
|
55
|
+
app = Puma::App::Status.new @launcher
|
56
|
+
|
57
|
+
if token = @options[:control_auth_token]
|
58
|
+
app.auth_token = token unless token.empty? or token == :none
|
59
|
+
end
|
60
|
+
|
61
|
+
control = Puma::Server.new app, @launcher.events
|
62
|
+
control.min_threads = 0
|
63
|
+
control.max_threads = 1
|
64
|
+
|
65
|
+
case uri.scheme
|
66
|
+
when "tcp"
|
67
|
+
log "* Starting control server on #{str}"
|
68
|
+
control.add_tcp_listener uri.host, uri.port
|
69
|
+
when "unix"
|
70
|
+
log "* Starting control server on #{str}"
|
71
|
+
path = "#{uri.host}#{uri.path}"
|
72
|
+
mask = @options[:control_url_umask]
|
73
|
+
|
74
|
+
control.add_unix_listener path, mask
|
75
|
+
else
|
76
|
+
error "Invalid control URI: #{str}"
|
77
|
+
end
|
78
|
+
|
79
|
+
control.run
|
80
|
+
@control = control
|
81
|
+
end
|
82
|
+
|
83
|
+
def ruby_engine
|
84
|
+
if !defined?(RUBY_ENGINE) || RUBY_ENGINE == "ruby"
|
85
|
+
"ruby #{RUBY_VERSION}-p#{RUBY_PATCHLEVEL}"
|
86
|
+
else
|
87
|
+
if defined?(RUBY_ENGINE_VERSION)
|
88
|
+
"#{RUBY_ENGINE} #{RUBY_ENGINE_VERSION} - ruby #{RUBY_VERSION}"
|
89
|
+
else
|
90
|
+
"#{RUBY_ENGINE} #{RUBY_VERSION}"
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
def output_header(mode)
|
96
|
+
min_t = @options[:min_threads]
|
97
|
+
max_t = @options[:max_threads]
|
98
|
+
|
99
|
+
log "Puma starting in #{mode} mode..."
|
100
|
+
log "* Version #{Puma::Const::PUMA_VERSION} (#{ruby_engine}), codename: #{Puma::Const::CODE_NAME}"
|
101
|
+
log "* Min threads: #{min_t}, max threads: #{max_t}"
|
102
|
+
log "* Environment: #{ENV['RACK_ENV']}"
|
103
|
+
|
104
|
+
if @options[:mode] == :tcp
|
105
|
+
log "* Mode: Lopez Express (tcp)"
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def redirected_io?
|
110
|
+
@options[:redirect_stdout] || @options[:redirect_stderr]
|
111
|
+
end
|
112
|
+
|
113
|
+
def redirect_io
|
114
|
+
stdout = @options[:redirect_stdout]
|
115
|
+
stderr = @options[:redirect_stderr]
|
116
|
+
append = @options[:redirect_append]
|
117
|
+
|
118
|
+
if stdout
|
119
|
+
unless Dir.exist?(File.dirname(stdout))
|
120
|
+
raise "Cannot redirect STDOUT to #{stdout}"
|
121
|
+
end
|
122
|
+
|
123
|
+
STDOUT.reopen stdout, (append ? "a" : "w")
|
124
|
+
STDOUT.sync = true
|
125
|
+
STDOUT.puts "=== puma startup: #{Time.now} ==="
|
126
|
+
end
|
127
|
+
|
128
|
+
if stderr
|
129
|
+
unless Dir.exist?(File.dirname(stderr))
|
130
|
+
raise "Cannot redirect STDERR to #{stderr}"
|
131
|
+
end
|
132
|
+
|
133
|
+
STDERR.reopen stderr, (append ? "a" : "w")
|
134
|
+
STDERR.sync = true
|
135
|
+
STDERR.puts "=== puma startup: #{Time.now} ==="
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
def load_and_bind
|
140
|
+
unless @launcher.config.app_configured?
|
141
|
+
error "No application configured, nothing to run"
|
142
|
+
exit 1
|
143
|
+
end
|
144
|
+
|
145
|
+
# Load the app before we daemonize.
|
146
|
+
begin
|
147
|
+
@app = @launcher.config.app
|
148
|
+
rescue Exception => e
|
149
|
+
log "! Unable to load application: #{e.class}: #{e.message}"
|
150
|
+
raise e
|
151
|
+
end
|
152
|
+
|
153
|
+
@launcher.binder.parse @options[:binds], self
|
154
|
+
end
|
155
|
+
|
156
|
+
def app
|
157
|
+
@app ||= @launcher.config.app
|
158
|
+
end
|
159
|
+
|
160
|
+
def start_server
|
161
|
+
min_t = @options[:min_threads]
|
162
|
+
max_t = @options[:max_threads]
|
163
|
+
|
164
|
+
server = Puma::Server.new app, @launcher.events, @options
|
165
|
+
server.min_threads = min_t
|
166
|
+
server.max_threads = max_t
|
167
|
+
server.inherit_binder @launcher.binder
|
168
|
+
|
169
|
+
if @options[:mode] == :tcp
|
170
|
+
server.tcp_mode!
|
171
|
+
end
|
172
|
+
|
173
|
+
if @options[:early_hints]
|
174
|
+
server.early_hints = true
|
175
|
+
end
|
176
|
+
|
177
|
+
unless development? || test?
|
178
|
+
server.leak_stack_on_error = false
|
179
|
+
end
|
180
|
+
|
181
|
+
server
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|
data/lib/puma/server.rb
ADDED
@@ -0,0 +1,1072 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'stringio'
|
4
|
+
|
5
|
+
require 'puma/thread_pool'
|
6
|
+
require 'puma/const'
|
7
|
+
require 'puma/events'
|
8
|
+
require 'puma/null_io'
|
9
|
+
require 'puma/compat'
|
10
|
+
require 'puma/reactor'
|
11
|
+
require 'puma/client'
|
12
|
+
require 'puma/binder'
|
13
|
+
require 'puma/delegation'
|
14
|
+
require 'puma/accept_nonblock'
|
15
|
+
require 'puma/util'
|
16
|
+
|
17
|
+
require 'puma/puma_http11'
|
18
|
+
|
19
|
+
unless Puma.const_defined? "IOBuffer"
|
20
|
+
require 'puma/io_buffer'
|
21
|
+
end
|
22
|
+
|
23
|
+
require 'socket'
|
24
|
+
|
25
|
+
module Puma
|
26
|
+
|
27
|
+
# The HTTP Server itself. Serves out a single Rack app.
|
28
|
+
#
|
29
|
+
# This class is used by the `Puma::Single` and `Puma::Cluster` classes
|
30
|
+
# to generate one or more `Puma::Server` instances capable of handling requests.
|
31
|
+
# Each Puma process will contain one `Puma::Server` instacne.
|
32
|
+
#
|
33
|
+
# The `Puma::Server` instance pulls requests from the socket, adds them to a
|
34
|
+
# `Puma::Reactor` where they get eventually passed to a `Puma::ThreadPool`.
|
35
|
+
#
|
36
|
+
# Each `Puma::Server` will have one reactor and one thread pool.
|
37
|
+
class Server
|
38
|
+
|
39
|
+
include Puma::Const
|
40
|
+
extend Puma::Delegation
|
41
|
+
|
42
|
+
attr_reader :thread
|
43
|
+
attr_reader :events
|
44
|
+
attr_accessor :app
|
45
|
+
|
46
|
+
attr_accessor :min_threads
|
47
|
+
attr_accessor :max_threads
|
48
|
+
attr_accessor :persistent_timeout
|
49
|
+
attr_accessor :auto_trim_time
|
50
|
+
attr_accessor :reaping_time
|
51
|
+
attr_accessor :first_data_timeout
|
52
|
+
|
53
|
+
# Create a server for the rack app +app+.
|
54
|
+
#
|
55
|
+
# +events+ is an object which will be called when certain error events occur
|
56
|
+
# to be handled. See Puma::Events for the list of current methods to implement.
|
57
|
+
#
|
58
|
+
# Server#run returns a thread that you can join on to wait for the server
|
59
|
+
# to do its work.
|
60
|
+
#
|
61
|
+
def initialize(app, events=Events.stdio, options={})
|
62
|
+
@app = app
|
63
|
+
@events = events
|
64
|
+
|
65
|
+
@check, @notify = Puma::Util.pipe
|
66
|
+
|
67
|
+
@status = :stop
|
68
|
+
|
69
|
+
@min_threads = 0
|
70
|
+
@max_threads = 16
|
71
|
+
@auto_trim_time = 30
|
72
|
+
@reaping_time = 1
|
73
|
+
|
74
|
+
@thread = nil
|
75
|
+
@thread_pool = nil
|
76
|
+
@early_hints = nil
|
77
|
+
|
78
|
+
@persistent_timeout = options.fetch(:persistent_timeout, PERSISTENT_TIMEOUT)
|
79
|
+
@first_data_timeout = options.fetch(:first_data_timeout, FIRST_DATA_TIMEOUT)
|
80
|
+
|
81
|
+
@binder = Binder.new(events)
|
82
|
+
@own_binder = true
|
83
|
+
|
84
|
+
@leak_stack_on_error = true
|
85
|
+
|
86
|
+
@options = options
|
87
|
+
@queue_requests = options[:queue_requests].nil? ? true : options[:queue_requests]
|
88
|
+
|
89
|
+
ENV['RACK_ENV'] ||= "development"
|
90
|
+
|
91
|
+
@mode = :http
|
92
|
+
|
93
|
+
@precheck_closing = true
|
94
|
+
end
|
95
|
+
|
96
|
+
attr_accessor :binder, :leak_stack_on_error, :early_hints
|
97
|
+
|
98
|
+
forward :add_tcp_listener, :@binder
|
99
|
+
forward :add_ssl_listener, :@binder
|
100
|
+
forward :add_unix_listener, :@binder
|
101
|
+
forward :connected_port, :@binder
|
102
|
+
|
103
|
+
def inherit_binder(bind)
|
104
|
+
@binder = bind
|
105
|
+
@own_binder = false
|
106
|
+
end
|
107
|
+
|
108
|
+
def tcp_mode!
|
109
|
+
@mode = :tcp
|
110
|
+
end
|
111
|
+
|
112
|
+
# On Linux, use TCP_CORK to better control how the TCP stack
|
113
|
+
# packetizes our stream. This improves both latency and throughput.
|
114
|
+
#
|
115
|
+
if RUBY_PLATFORM =~ /linux/
|
116
|
+
UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze
|
117
|
+
|
118
|
+
# 6 == Socket::IPPROTO_TCP
|
119
|
+
# 3 == TCP_CORK
|
120
|
+
# 1/0 == turn on/off
|
121
|
+
def cork_socket(socket)
|
122
|
+
begin
|
123
|
+
socket.setsockopt(6, 3, 1) if socket.kind_of? TCPSocket
|
124
|
+
rescue IOError, SystemCallError
|
125
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def uncork_socket(socket)
|
130
|
+
begin
|
131
|
+
socket.setsockopt(6, 3, 0) if socket.kind_of? TCPSocket
|
132
|
+
rescue IOError, SystemCallError
|
133
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
def closed_socket?(socket)
|
138
|
+
return false unless socket.kind_of? TCPSocket
|
139
|
+
return false unless @precheck_closing
|
140
|
+
|
141
|
+
begin
|
142
|
+
tcp_info = socket.getsockopt(Socket::SOL_TCP, Socket::TCP_INFO)
|
143
|
+
rescue IOError, SystemCallError
|
144
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
145
|
+
@precheck_closing = false
|
146
|
+
false
|
147
|
+
else
|
148
|
+
state = tcp_info.unpack(UNPACK_TCP_STATE_FROM_TCP_INFO)[0]
|
149
|
+
# TIME_WAIT: 6, CLOSE: 7, CLOSE_WAIT: 8, LAST_ACK: 9, CLOSING: 11
|
150
|
+
(state >= 6 && state <= 9) || state == 11
|
151
|
+
end
|
152
|
+
end
|
153
|
+
else
|
154
|
+
def cork_socket(socket)
|
155
|
+
end
|
156
|
+
|
157
|
+
def uncork_socket(socket)
|
158
|
+
end
|
159
|
+
|
160
|
+
def closed_socket?(socket)
|
161
|
+
false
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def backlog
|
166
|
+
@thread_pool and @thread_pool.backlog
|
167
|
+
end
|
168
|
+
|
169
|
+
def running
|
170
|
+
@thread_pool and @thread_pool.spawned
|
171
|
+
end
|
172
|
+
|
173
|
+
|
174
|
+
# This number represents the number of requests that
|
175
|
+
# the server is capable of taking right now.
|
176
|
+
#
|
177
|
+
# For example if the number is 5 then it means
|
178
|
+
# there are 5 threads sitting idle ready to take
|
179
|
+
# a request. If one request comes in, then the
|
180
|
+
# value would be 4 until it finishes processing.
|
181
|
+
def pool_capacity
|
182
|
+
@thread_pool and @thread_pool.pool_capacity
|
183
|
+
end
|
184
|
+
|
185
|
+
# Lopez Mode == raw tcp apps
|
186
|
+
|
187
|
+
def run_lopez_mode(background=true)
|
188
|
+
@thread_pool = ThreadPool.new(@min_threads,
|
189
|
+
@max_threads,
|
190
|
+
Hash) do |client, tl|
|
191
|
+
|
192
|
+
io = client.to_io
|
193
|
+
addr = io.peeraddr.last
|
194
|
+
|
195
|
+
if addr.empty?
|
196
|
+
# Set unix socket addrs to localhost
|
197
|
+
addr = "127.0.0.1:0"
|
198
|
+
else
|
199
|
+
addr = "#{addr}:#{io.peeraddr[1]}"
|
200
|
+
end
|
201
|
+
|
202
|
+
env = { 'thread' => tl, REMOTE_ADDR => addr }
|
203
|
+
|
204
|
+
begin
|
205
|
+
@app.call env, client.to_io
|
206
|
+
rescue Object => e
|
207
|
+
STDERR.puts "! Detected exception at toplevel: #{e.message} (#{e.class})"
|
208
|
+
STDERR.puts e.backtrace
|
209
|
+
end
|
210
|
+
|
211
|
+
client.close unless env['detach']
|
212
|
+
end
|
213
|
+
|
214
|
+
@events.fire :state, :running
|
215
|
+
|
216
|
+
if background
|
217
|
+
@thread = Thread.new { handle_servers_lopez_mode }
|
218
|
+
return @thread
|
219
|
+
else
|
220
|
+
handle_servers_lopez_mode
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
def handle_servers_lopez_mode
|
225
|
+
begin
|
226
|
+
check = @check
|
227
|
+
sockets = [check] + @binder.ios
|
228
|
+
pool = @thread_pool
|
229
|
+
|
230
|
+
while @status == :run
|
231
|
+
begin
|
232
|
+
ios = IO.select sockets
|
233
|
+
ios.first.each do |sock|
|
234
|
+
if sock == check
|
235
|
+
break if handle_check
|
236
|
+
else
|
237
|
+
begin
|
238
|
+
if io = sock.accept_nonblock
|
239
|
+
client = Client.new io, nil
|
240
|
+
pool << client
|
241
|
+
end
|
242
|
+
rescue SystemCallError
|
243
|
+
# nothing
|
244
|
+
rescue Errno::ECONNABORTED
|
245
|
+
# client closed the socket even before accept
|
246
|
+
begin
|
247
|
+
io.close
|
248
|
+
rescue
|
249
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
250
|
+
end
|
251
|
+
end
|
252
|
+
end
|
253
|
+
end
|
254
|
+
rescue Object => e
|
255
|
+
@events.unknown_error self, e, "Listen loop"
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
@events.fire :state, @status
|
260
|
+
|
261
|
+
graceful_shutdown if @status == :stop || @status == :restart
|
262
|
+
|
263
|
+
rescue Exception => e
|
264
|
+
STDERR.puts "Exception handling servers: #{e.message} (#{e.class})"
|
265
|
+
STDERR.puts e.backtrace
|
266
|
+
ensure
|
267
|
+
begin
|
268
|
+
@check.close
|
269
|
+
rescue
|
270
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
271
|
+
end
|
272
|
+
|
273
|
+
@notify.close
|
274
|
+
|
275
|
+
if @status != :restart and @own_binder
|
276
|
+
@binder.close
|
277
|
+
end
|
278
|
+
end
|
279
|
+
|
280
|
+
@events.fire :state, :done
|
281
|
+
end
|
282
|
+
# Runs the server.
|
283
|
+
#
|
284
|
+
# If +background+ is true (the default) then a thread is spun
|
285
|
+
# up in the background to handle requests. Otherwise requests
|
286
|
+
# are handled synchronously.
|
287
|
+
#
|
288
|
+
def run(background=true)
|
289
|
+
BasicSocket.do_not_reverse_lookup = true
|
290
|
+
|
291
|
+
@events.fire :state, :booting
|
292
|
+
|
293
|
+
@status = :run
|
294
|
+
|
295
|
+
if @mode == :tcp
|
296
|
+
return run_lopez_mode(background)
|
297
|
+
end
|
298
|
+
|
299
|
+
queue_requests = @queue_requests
|
300
|
+
|
301
|
+
@thread_pool = ThreadPool.new(@min_threads,
|
302
|
+
@max_threads,
|
303
|
+
IOBuffer) do |client, buffer|
|
304
|
+
|
305
|
+
# Advertise this server into the thread
|
306
|
+
Thread.current[ThreadLocalKey] = self
|
307
|
+
|
308
|
+
process_now = false
|
309
|
+
|
310
|
+
begin
|
311
|
+
if queue_requests
|
312
|
+
process_now = client.eagerly_finish
|
313
|
+
else
|
314
|
+
client.finish
|
315
|
+
process_now = true
|
316
|
+
end
|
317
|
+
rescue MiniSSL::SSLError => e
|
318
|
+
ssl_socket = client.io
|
319
|
+
addr = ssl_socket.peeraddr.last
|
320
|
+
cert = ssl_socket.peercert
|
321
|
+
|
322
|
+
client.close
|
323
|
+
|
324
|
+
@events.ssl_error self, addr, cert, e
|
325
|
+
rescue HttpParserError => e
|
326
|
+
client.write_400
|
327
|
+
client.close
|
328
|
+
|
329
|
+
@events.parse_error self, client.env, e
|
330
|
+
rescue ConnectionError, EOFError
|
331
|
+
client.close
|
332
|
+
else
|
333
|
+
if process_now
|
334
|
+
process_client client, buffer
|
335
|
+
else
|
336
|
+
client.set_timeout @first_data_timeout
|
337
|
+
@reactor.add client
|
338
|
+
end
|
339
|
+
end
|
340
|
+
end
|
341
|
+
|
342
|
+
@thread_pool.clean_thread_locals = @options[:clean_thread_locals]
|
343
|
+
|
344
|
+
if queue_requests
|
345
|
+
@reactor = Reactor.new self, @thread_pool
|
346
|
+
@reactor.run_in_thread
|
347
|
+
end
|
348
|
+
|
349
|
+
if @reaping_time
|
350
|
+
@thread_pool.auto_reap!(@reaping_time)
|
351
|
+
end
|
352
|
+
|
353
|
+
if @auto_trim_time
|
354
|
+
@thread_pool.auto_trim!(@auto_trim_time)
|
355
|
+
end
|
356
|
+
|
357
|
+
@events.fire :state, :running
|
358
|
+
|
359
|
+
if background
|
360
|
+
@thread = Thread.new { handle_servers }
|
361
|
+
return @thread
|
362
|
+
else
|
363
|
+
handle_servers
|
364
|
+
end
|
365
|
+
end
|
366
|
+
|
367
|
+
def handle_servers
|
368
|
+
begin
|
369
|
+
check = @check
|
370
|
+
sockets = [check] + @binder.ios
|
371
|
+
pool = @thread_pool
|
372
|
+
queue_requests = @queue_requests
|
373
|
+
|
374
|
+
remote_addr_value = nil
|
375
|
+
remote_addr_header = nil
|
376
|
+
|
377
|
+
case @options[:remote_address]
|
378
|
+
when :value
|
379
|
+
remote_addr_value = @options[:remote_address_value]
|
380
|
+
when :header
|
381
|
+
remote_addr_header = @options[:remote_address_header]
|
382
|
+
end
|
383
|
+
|
384
|
+
while @status == :run
|
385
|
+
begin
|
386
|
+
ios = IO.select sockets
|
387
|
+
ios.first.each do |sock|
|
388
|
+
if sock == check
|
389
|
+
break if handle_check
|
390
|
+
else
|
391
|
+
begin
|
392
|
+
if io = sock.accept_nonblock
|
393
|
+
client = Client.new io, @binder.env(sock)
|
394
|
+
if remote_addr_value
|
395
|
+
client.peerip = remote_addr_value
|
396
|
+
elsif remote_addr_header
|
397
|
+
client.remote_addr_header = remote_addr_header
|
398
|
+
end
|
399
|
+
|
400
|
+
pool << client
|
401
|
+
pool.wait_until_not_full
|
402
|
+
end
|
403
|
+
rescue SystemCallError
|
404
|
+
# nothing
|
405
|
+
rescue Errno::ECONNABORTED
|
406
|
+
# client closed the socket even before accept
|
407
|
+
begin
|
408
|
+
io.close
|
409
|
+
rescue
|
410
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
411
|
+
end
|
412
|
+
end
|
413
|
+
end
|
414
|
+
end
|
415
|
+
rescue Object => e
|
416
|
+
@events.unknown_error self, e, "Listen loop"
|
417
|
+
end
|
418
|
+
end
|
419
|
+
|
420
|
+
@events.fire :state, @status
|
421
|
+
|
422
|
+
graceful_shutdown if @status == :stop || @status == :restart
|
423
|
+
if queue_requests
|
424
|
+
@reactor.clear!
|
425
|
+
@reactor.shutdown
|
426
|
+
end
|
427
|
+
rescue Exception => e
|
428
|
+
STDERR.puts "Exception handling servers: #{e.message} (#{e.class})"
|
429
|
+
STDERR.puts e.backtrace
|
430
|
+
ensure
|
431
|
+
@check.close
|
432
|
+
@notify.close
|
433
|
+
|
434
|
+
if @status != :restart and @own_binder
|
435
|
+
@binder.close
|
436
|
+
end
|
437
|
+
end
|
438
|
+
|
439
|
+
@events.fire :state, :done
|
440
|
+
end
|
441
|
+
|
442
|
+
# :nodoc:
|
443
|
+
def handle_check
|
444
|
+
cmd = @check.read(1)
|
445
|
+
|
446
|
+
case cmd
|
447
|
+
when STOP_COMMAND
|
448
|
+
@status = :stop
|
449
|
+
return true
|
450
|
+
when HALT_COMMAND
|
451
|
+
@status = :halt
|
452
|
+
return true
|
453
|
+
when RESTART_COMMAND
|
454
|
+
@status = :restart
|
455
|
+
return true
|
456
|
+
end
|
457
|
+
|
458
|
+
return false
|
459
|
+
end
|
460
|
+
|
461
|
+
# Given a connection on +client+, handle the incoming requests.
|
462
|
+
#
|
463
|
+
# This method support HTTP Keep-Alive so it may, depending on if the client
|
464
|
+
# indicates that it supports keep alive, wait for another request before
|
465
|
+
# returning.
|
466
|
+
#
|
467
|
+
def process_client(client, buffer)
|
468
|
+
begin
|
469
|
+
|
470
|
+
clean_thread_locals = @options[:clean_thread_locals]
|
471
|
+
close_socket = true
|
472
|
+
|
473
|
+
requests = 0
|
474
|
+
|
475
|
+
while true
|
476
|
+
case handle_request(client, buffer)
|
477
|
+
when false
|
478
|
+
return
|
479
|
+
when :async
|
480
|
+
close_socket = false
|
481
|
+
return
|
482
|
+
when true
|
483
|
+
return unless @queue_requests
|
484
|
+
buffer.reset
|
485
|
+
|
486
|
+
ThreadPool.clean_thread_locals if clean_thread_locals
|
487
|
+
|
488
|
+
requests += 1
|
489
|
+
|
490
|
+
check_for_more_data = @status == :run
|
491
|
+
|
492
|
+
if requests >= MAX_FAST_INLINE
|
493
|
+
# This will mean that reset will only try to use the data it already
|
494
|
+
# has buffered and won't try to read more data. What this means is that
|
495
|
+
# every client, independent of their request speed, gets treated like a slow
|
496
|
+
# one once every MAX_FAST_INLINE requests.
|
497
|
+
check_for_more_data = false
|
498
|
+
end
|
499
|
+
|
500
|
+
unless client.reset(check_for_more_data)
|
501
|
+
close_socket = false
|
502
|
+
client.set_timeout @persistent_timeout
|
503
|
+
@reactor.add client
|
504
|
+
return
|
505
|
+
end
|
506
|
+
end
|
507
|
+
end
|
508
|
+
|
509
|
+
# The client disconnected while we were reading data
|
510
|
+
rescue ConnectionError
|
511
|
+
# Swallow them. The ensure tries to close +client+ down
|
512
|
+
|
513
|
+
# SSL handshake error
|
514
|
+
rescue MiniSSL::SSLError => e
|
515
|
+
lowlevel_error(e, client.env)
|
516
|
+
|
517
|
+
ssl_socket = client.io
|
518
|
+
addr = ssl_socket.peeraddr.last
|
519
|
+
cert = ssl_socket.peercert
|
520
|
+
|
521
|
+
close_socket = true
|
522
|
+
|
523
|
+
@events.ssl_error self, addr, cert, e
|
524
|
+
|
525
|
+
# The client doesn't know HTTP well
|
526
|
+
rescue HttpParserError => e
|
527
|
+
lowlevel_error(e, client.env)
|
528
|
+
|
529
|
+
client.write_400
|
530
|
+
|
531
|
+
@events.parse_error self, client.env, e
|
532
|
+
|
533
|
+
# Server error
|
534
|
+
rescue StandardError => e
|
535
|
+
lowlevel_error(e, client.env)
|
536
|
+
|
537
|
+
client.write_500
|
538
|
+
|
539
|
+
@events.unknown_error self, e, "Read"
|
540
|
+
|
541
|
+
ensure
|
542
|
+
buffer.reset
|
543
|
+
|
544
|
+
begin
|
545
|
+
client.close if close_socket
|
546
|
+
rescue IOError, SystemCallError
|
547
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
548
|
+
# Already closed
|
549
|
+
rescue StandardError => e
|
550
|
+
@events.unknown_error self, e, "Client"
|
551
|
+
end
|
552
|
+
end
|
553
|
+
end
|
554
|
+
|
555
|
+
# Given a Hash +env+ for the request read from +client+, add
|
556
|
+
# and fixup keys to comply with Rack's env guidelines.
|
557
|
+
#
|
558
|
+
def normalize_env(env, client)
|
559
|
+
if host = env[HTTP_HOST]
|
560
|
+
if colon = host.index(":")
|
561
|
+
env[SERVER_NAME] = host[0, colon]
|
562
|
+
env[SERVER_PORT] = host[colon+1, host.bytesize]
|
563
|
+
else
|
564
|
+
env[SERVER_NAME] = host
|
565
|
+
env[SERVER_PORT] = default_server_port(env)
|
566
|
+
end
|
567
|
+
else
|
568
|
+
env[SERVER_NAME] = LOCALHOST
|
569
|
+
env[SERVER_PORT] = default_server_port(env)
|
570
|
+
end
|
571
|
+
|
572
|
+
unless env[REQUEST_PATH]
|
573
|
+
# it might be a dumbass full host request header
|
574
|
+
uri = URI.parse(env[REQUEST_URI])
|
575
|
+
env[REQUEST_PATH] = uri.path
|
576
|
+
|
577
|
+
raise "No REQUEST PATH" unless env[REQUEST_PATH]
|
578
|
+
|
579
|
+
# A nil env value will cause a LintError (and fatal errors elsewhere),
|
580
|
+
# so only set the env value if there actually is a value.
|
581
|
+
env[QUERY_STRING] = uri.query if uri.query
|
582
|
+
end
|
583
|
+
|
584
|
+
env[PATH_INFO] = env[REQUEST_PATH]
|
585
|
+
|
586
|
+
# From http://www.ietf.org/rfc/rfc3875 :
|
587
|
+
# "Script authors should be aware that the REMOTE_ADDR and
|
588
|
+
# REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
|
589
|
+
# may not identify the ultimate source of the request.
|
590
|
+
# They identify the client for the immediate request to the
|
591
|
+
# server; that client may be a proxy, gateway, or other
|
592
|
+
# intermediary acting on behalf of the actual source client."
|
593
|
+
#
|
594
|
+
|
595
|
+
unless env.key?(REMOTE_ADDR)
|
596
|
+
begin
|
597
|
+
addr = client.peerip
|
598
|
+
rescue Errno::ENOTCONN
|
599
|
+
# Client disconnects can result in an inability to get the
|
600
|
+
# peeraddr from the socket; default to localhost.
|
601
|
+
addr = LOCALHOST_IP
|
602
|
+
end
|
603
|
+
|
604
|
+
# Set unix socket addrs to localhost
|
605
|
+
addr = LOCALHOST_IP if addr.empty?
|
606
|
+
|
607
|
+
env[REMOTE_ADDR] = addr
|
608
|
+
end
|
609
|
+
end
|
610
|
+
|
611
|
+
def default_server_port(env)
|
612
|
+
return PORT_443 if env[HTTPS_KEY] == 'on' || env[HTTPS_KEY] == 'https'
|
613
|
+
env['HTTP_X_FORWARDED_PROTO'] == 'https' ? PORT_443 : PORT_80
|
614
|
+
end
|
615
|
+
|
616
|
+
# Given the request +env+ from +client+ and a partial request body
|
617
|
+
# in +body+, finish reading the body if there is one and invoke
|
618
|
+
# the rack app. Then construct the response and write it back to
|
619
|
+
# +client+
|
620
|
+
#
|
621
|
+
# +cl+ is the previously fetched Content-Length header if there
|
622
|
+
# was one. This is an optimization to keep from having to look
|
623
|
+
# it up again.
|
624
|
+
#
|
625
|
+
def handle_request(req, lines)
|
626
|
+
env = req.env
|
627
|
+
client = req.io
|
628
|
+
|
629
|
+
return false if closed_socket?(client)
|
630
|
+
|
631
|
+
normalize_env env, req
|
632
|
+
|
633
|
+
env[PUMA_SOCKET] = client
|
634
|
+
|
635
|
+
if env[HTTPS_KEY] && client.peercert
|
636
|
+
env[PUMA_PEERCERT] = client.peercert
|
637
|
+
end
|
638
|
+
|
639
|
+
env[HIJACK_P] = true
|
640
|
+
env[HIJACK] = req
|
641
|
+
|
642
|
+
body = req.body
|
643
|
+
|
644
|
+
head = env[REQUEST_METHOD] == HEAD
|
645
|
+
|
646
|
+
env[RACK_INPUT] = body
|
647
|
+
env[RACK_URL_SCHEME] = env[HTTPS_KEY] ? HTTPS : HTTP
|
648
|
+
|
649
|
+
if @early_hints
|
650
|
+
env[EARLY_HINTS] = lambda { |headers|
|
651
|
+
fast_write client, "HTTP/1.1 103 Early Hints\r\n".freeze
|
652
|
+
|
653
|
+
headers.each_pair do |k, vs|
|
654
|
+
if vs.respond_to?(:to_s) && !vs.to_s.empty?
|
655
|
+
vs.to_s.split(NEWLINE).each do |v|
|
656
|
+
next if possible_header_injection?(v)
|
657
|
+
fast_write client, "#{k}: #{v}\r\n"
|
658
|
+
end
|
659
|
+
else
|
660
|
+
fast_write client, "#{k}: #{vs}\r\n"
|
661
|
+
end
|
662
|
+
end
|
663
|
+
|
664
|
+
fast_write client, "\r\n".freeze
|
665
|
+
}
|
666
|
+
end
|
667
|
+
|
668
|
+
# Fixup any headers with , in the name to have _ now. We emit
|
669
|
+
# headers with , in them during the parse phase to avoid ambiguity
|
670
|
+
# with the - to _ conversion for critical headers. But here for
|
671
|
+
# compatibility, we'll convert them back. This code is written to
|
672
|
+
# avoid allocation in the common case (ie there are no headers
|
673
|
+
# with , in their names), that's why it has the extra conditionals.
|
674
|
+
|
675
|
+
to_delete = nil
|
676
|
+
to_add = nil
|
677
|
+
|
678
|
+
env.each do |k,v|
|
679
|
+
if k.start_with?("HTTP_") and k.include?(",") and k != "HTTP_TRANSFER,ENCODING"
|
680
|
+
if to_delete
|
681
|
+
to_delete << k
|
682
|
+
else
|
683
|
+
to_delete = [k]
|
684
|
+
end
|
685
|
+
|
686
|
+
unless to_add
|
687
|
+
to_add = {}
|
688
|
+
end
|
689
|
+
|
690
|
+
to_add[k.gsub(",", "_")] = v
|
691
|
+
end
|
692
|
+
end
|
693
|
+
|
694
|
+
if to_delete
|
695
|
+
to_delete.each { |k| env.delete(k) }
|
696
|
+
env.merge! to_add
|
697
|
+
end
|
698
|
+
|
699
|
+
# A rack extension. If the app writes #call'ables to this
|
700
|
+
# array, we will invoke them when the request is done.
|
701
|
+
#
|
702
|
+
after_reply = env[RACK_AFTER_REPLY] = []
|
703
|
+
|
704
|
+
begin
|
705
|
+
begin
|
706
|
+
status, headers, res_body = @app.call(env)
|
707
|
+
|
708
|
+
return :async if req.hijacked
|
709
|
+
|
710
|
+
status = status.to_i
|
711
|
+
|
712
|
+
if status == -1
|
713
|
+
unless headers.empty? and res_body == []
|
714
|
+
raise "async response must have empty headers and body"
|
715
|
+
end
|
716
|
+
|
717
|
+
return :async
|
718
|
+
end
|
719
|
+
rescue ThreadPool::ForceShutdown => e
|
720
|
+
@events.log "Detected force shutdown of a thread, returning 503"
|
721
|
+
@events.unknown_error self, e, "Rack app"
|
722
|
+
|
723
|
+
status = 503
|
724
|
+
headers = {}
|
725
|
+
res_body = ["Request was internally terminated early\n"]
|
726
|
+
|
727
|
+
rescue Exception => e
|
728
|
+
@events.unknown_error self, e, "Rack app", env
|
729
|
+
|
730
|
+
status, headers, res_body = lowlevel_error(e, env)
|
731
|
+
end
|
732
|
+
|
733
|
+
content_length = nil
|
734
|
+
no_body = head
|
735
|
+
|
736
|
+
if res_body.kind_of? Array and res_body.size == 1
|
737
|
+
content_length = res_body[0].bytesize
|
738
|
+
end
|
739
|
+
|
740
|
+
cork_socket client
|
741
|
+
|
742
|
+
line_ending = LINE_END
|
743
|
+
colon = COLON
|
744
|
+
|
745
|
+
http_11 = if env[HTTP_VERSION] == HTTP_11
|
746
|
+
allow_chunked = true
|
747
|
+
keep_alive = env.fetch(HTTP_CONNECTION, "").downcase != CLOSE
|
748
|
+
include_keepalive_header = false
|
749
|
+
|
750
|
+
# An optimization. The most common response is 200, so we can
|
751
|
+
# reply with the proper 200 status without having to compute
|
752
|
+
# the response header.
|
753
|
+
#
|
754
|
+
if status == 200
|
755
|
+
lines << HTTP_11_200
|
756
|
+
else
|
757
|
+
lines.append "HTTP/1.1 ", status.to_s, " ",
|
758
|
+
fetch_status_code(status), line_ending
|
759
|
+
|
760
|
+
no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
|
761
|
+
end
|
762
|
+
true
|
763
|
+
else
|
764
|
+
allow_chunked = false
|
765
|
+
keep_alive = env.fetch(HTTP_CONNECTION, "").downcase == KEEP_ALIVE
|
766
|
+
include_keepalive_header = keep_alive
|
767
|
+
|
768
|
+
# Same optimization as above for HTTP/1.1
|
769
|
+
#
|
770
|
+
if status == 200
|
771
|
+
lines << HTTP_10_200
|
772
|
+
else
|
773
|
+
lines.append "HTTP/1.0 ", status.to_s, " ",
|
774
|
+
fetch_status_code(status), line_ending
|
775
|
+
|
776
|
+
no_body ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status]
|
777
|
+
end
|
778
|
+
false
|
779
|
+
end
|
780
|
+
|
781
|
+
response_hijack = nil
|
782
|
+
|
783
|
+
headers.each do |k, vs|
|
784
|
+
case k.downcase
|
785
|
+
when CONTENT_LENGTH2
|
786
|
+
next if possible_header_injection?(vs)
|
787
|
+
content_length = vs
|
788
|
+
next
|
789
|
+
when TRANSFER_ENCODING
|
790
|
+
allow_chunked = false
|
791
|
+
content_length = nil
|
792
|
+
when HIJACK
|
793
|
+
response_hijack = vs
|
794
|
+
next
|
795
|
+
end
|
796
|
+
|
797
|
+
if vs.respond_to?(:to_s) && !vs.to_s.empty?
|
798
|
+
vs.to_s.split(NEWLINE).each do |v|
|
799
|
+
next if possible_header_injection?(v)
|
800
|
+
lines.append k, colon, v, line_ending
|
801
|
+
end
|
802
|
+
else
|
803
|
+
lines.append k, colon, line_ending
|
804
|
+
end
|
805
|
+
end
|
806
|
+
|
807
|
+
if include_keepalive_header
|
808
|
+
lines << CONNECTION_KEEP_ALIVE
|
809
|
+
elsif http_11 && !keep_alive
|
810
|
+
lines << CONNECTION_CLOSE
|
811
|
+
end
|
812
|
+
|
813
|
+
if no_body
|
814
|
+
if content_length and status != 204
|
815
|
+
lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
|
816
|
+
end
|
817
|
+
|
818
|
+
lines << line_ending
|
819
|
+
fast_write client, lines.to_s
|
820
|
+
return keep_alive
|
821
|
+
end
|
822
|
+
|
823
|
+
if content_length
|
824
|
+
lines.append CONTENT_LENGTH_S, content_length.to_s, line_ending
|
825
|
+
chunked = false
|
826
|
+
elsif !response_hijack and allow_chunked
|
827
|
+
lines << TRANSFER_ENCODING_CHUNKED
|
828
|
+
chunked = true
|
829
|
+
end
|
830
|
+
|
831
|
+
lines << line_ending
|
832
|
+
|
833
|
+
fast_write client, lines.to_s
|
834
|
+
|
835
|
+
if response_hijack
|
836
|
+
response_hijack.call client
|
837
|
+
return :async
|
838
|
+
end
|
839
|
+
|
840
|
+
begin
|
841
|
+
res_body.each do |part|
|
842
|
+
next if part.bytesize.zero?
|
843
|
+
if chunked
|
844
|
+
fast_write client, part.bytesize.to_s(16)
|
845
|
+
fast_write client, line_ending
|
846
|
+
fast_write client, part
|
847
|
+
fast_write client, line_ending
|
848
|
+
else
|
849
|
+
fast_write client, part
|
850
|
+
end
|
851
|
+
|
852
|
+
client.flush
|
853
|
+
end
|
854
|
+
|
855
|
+
if chunked
|
856
|
+
fast_write client, CLOSE_CHUNKED
|
857
|
+
client.flush
|
858
|
+
end
|
859
|
+
rescue SystemCallError, IOError
|
860
|
+
raise ConnectionError, "Connection error detected during write"
|
861
|
+
end
|
862
|
+
|
863
|
+
ensure
|
864
|
+
uncork_socket client
|
865
|
+
|
866
|
+
body.close
|
867
|
+
req.tempfile.unlink if req.tempfile
|
868
|
+
res_body.close if res_body.respond_to? :close
|
869
|
+
|
870
|
+
after_reply.each { |o| o.call }
|
871
|
+
end
|
872
|
+
|
873
|
+
return keep_alive
|
874
|
+
end
|
875
|
+
|
876
|
+
def fetch_status_code(status)
|
877
|
+
HTTP_STATUS_CODES.fetch(status) { 'CUSTOM' }
|
878
|
+
end
|
879
|
+
private :fetch_status_code
|
880
|
+
|
881
|
+
# Given the request +env+ from +client+ and the partial body +body+
|
882
|
+
# plus a potential Content-Length value +cl+, finish reading
|
883
|
+
# the body and return it.
|
884
|
+
#
|
885
|
+
# If the body is larger than MAX_BODY, a Tempfile object is used
|
886
|
+
# for the body, otherwise a StringIO is used.
|
887
|
+
#
|
888
|
+
def read_body(env, client, body, cl)
|
889
|
+
content_length = cl.to_i
|
890
|
+
|
891
|
+
remain = content_length - body.bytesize
|
892
|
+
|
893
|
+
return StringIO.new(body) if remain <= 0
|
894
|
+
|
895
|
+
# Use a Tempfile if there is a lot of data left
|
896
|
+
if remain > MAX_BODY
|
897
|
+
stream = Tempfile.new(Const::PUMA_TMP_BASE)
|
898
|
+
stream.binmode
|
899
|
+
else
|
900
|
+
# The body[0,0] trick is to get an empty string in the same
|
901
|
+
# encoding as body.
|
902
|
+
stream = StringIO.new body[0,0]
|
903
|
+
end
|
904
|
+
|
905
|
+
stream.write body
|
906
|
+
|
907
|
+
# Read an odd sized chunk so we can read even sized ones
|
908
|
+
# after this
|
909
|
+
chunk = client.readpartial(remain % CHUNK_SIZE)
|
910
|
+
|
911
|
+
# No chunk means a closed socket
|
912
|
+
unless chunk
|
913
|
+
stream.close
|
914
|
+
return nil
|
915
|
+
end
|
916
|
+
|
917
|
+
remain -= stream.write(chunk)
|
918
|
+
|
919
|
+
# Raed the rest of the chunks
|
920
|
+
while remain > 0
|
921
|
+
chunk = client.readpartial(CHUNK_SIZE)
|
922
|
+
unless chunk
|
923
|
+
stream.close
|
924
|
+
return nil
|
925
|
+
end
|
926
|
+
|
927
|
+
remain -= stream.write(chunk)
|
928
|
+
end
|
929
|
+
|
930
|
+
stream.rewind
|
931
|
+
|
932
|
+
return stream
|
933
|
+
end
|
934
|
+
|
935
|
+
# A fallback rack response if +@app+ raises as exception.
|
936
|
+
#
|
937
|
+
def lowlevel_error(e, env)
|
938
|
+
if handler = @options[:lowlevel_error_handler]
|
939
|
+
if handler.arity == 1
|
940
|
+
return handler.call(e)
|
941
|
+
else
|
942
|
+
return handler.call(e, env)
|
943
|
+
end
|
944
|
+
end
|
945
|
+
|
946
|
+
if @leak_stack_on_error
|
947
|
+
[500, {}, ["Puma caught this error: #{e.message} (#{e.class})\n#{e.backtrace.join("\n")}"]]
|
948
|
+
else
|
949
|
+
[500, {}, ["An unhandled lowlevel error occurred. The application logs may have details.\n"]]
|
950
|
+
end
|
951
|
+
end
|
952
|
+
|
953
|
+
# Wait for all outstanding requests to finish.
|
954
|
+
#
|
955
|
+
def graceful_shutdown
|
956
|
+
if @options[:shutdown_debug]
|
957
|
+
threads = Thread.list
|
958
|
+
total = threads.size
|
959
|
+
|
960
|
+
pid = Process.pid
|
961
|
+
|
962
|
+
$stdout.syswrite "#{pid}: === Begin thread backtrace dump ===\n"
|
963
|
+
|
964
|
+
threads.each_with_index do |t,i|
|
965
|
+
$stdout.syswrite "#{pid}: Thread #{i+1}/#{total}: #{t.inspect}\n"
|
966
|
+
$stdout.syswrite "#{pid}: #{t.backtrace.join("\n#{pid}: ")}\n\n"
|
967
|
+
end
|
968
|
+
$stdout.syswrite "#{pid}: === End thread backtrace dump ===\n"
|
969
|
+
end
|
970
|
+
|
971
|
+
if @options[:drain_on_shutdown]
|
972
|
+
count = 0
|
973
|
+
|
974
|
+
while true
|
975
|
+
ios = IO.select @binder.ios, nil, nil, 0
|
976
|
+
break unless ios
|
977
|
+
|
978
|
+
ios.first.each do |sock|
|
979
|
+
begin
|
980
|
+
if io = sock.accept_nonblock
|
981
|
+
count += 1
|
982
|
+
client = Client.new io, @binder.env(sock)
|
983
|
+
@thread_pool << client
|
984
|
+
end
|
985
|
+
rescue SystemCallError
|
986
|
+
end
|
987
|
+
end
|
988
|
+
end
|
989
|
+
|
990
|
+
@events.debug "Drained #{count} additional connections."
|
991
|
+
end
|
992
|
+
|
993
|
+
if @thread_pool
|
994
|
+
if timeout = @options[:force_shutdown_after]
|
995
|
+
@thread_pool.shutdown timeout.to_i
|
996
|
+
else
|
997
|
+
@thread_pool.shutdown
|
998
|
+
end
|
999
|
+
end
|
1000
|
+
end
|
1001
|
+
|
1002
|
+
def notify_safely(message)
|
1003
|
+
begin
|
1004
|
+
@notify << message
|
1005
|
+
rescue IOError
|
1006
|
+
# The server, in another thread, is shutting down
|
1007
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
1008
|
+
rescue RuntimeError => e
|
1009
|
+
# Temporary workaround for https://bugs.ruby-lang.org/issues/13239
|
1010
|
+
if e.message.include?('IOError')
|
1011
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
1012
|
+
else
|
1013
|
+
raise e
|
1014
|
+
end
|
1015
|
+
end
|
1016
|
+
end
|
1017
|
+
private :notify_safely
|
1018
|
+
|
1019
|
+
# Stops the acceptor thread and then causes the worker threads to finish
|
1020
|
+
# off the request queue before finally exiting.
|
1021
|
+
|
1022
|
+
def stop(sync=false)
|
1023
|
+
notify_safely(STOP_COMMAND)
|
1024
|
+
@thread.join if @thread && sync
|
1025
|
+
end
|
1026
|
+
|
1027
|
+
def halt(sync=false)
|
1028
|
+
notify_safely(HALT_COMMAND)
|
1029
|
+
@thread.join if @thread && sync
|
1030
|
+
end
|
1031
|
+
|
1032
|
+
def begin_restart
|
1033
|
+
notify_safely(RESTART_COMMAND)
|
1034
|
+
end
|
1035
|
+
|
1036
|
+
def fast_write(io, str)
|
1037
|
+
n = 0
|
1038
|
+
while true
|
1039
|
+
begin
|
1040
|
+
n = io.syswrite str
|
1041
|
+
rescue Errno::EAGAIN, Errno::EWOULDBLOCK
|
1042
|
+
if !IO.select(nil, [io], nil, WRITE_TIMEOUT)
|
1043
|
+
raise ConnectionError, "Socket timeout writing data"
|
1044
|
+
end
|
1045
|
+
|
1046
|
+
retry
|
1047
|
+
rescue Errno::EPIPE, SystemCallError, IOError
|
1048
|
+
raise ConnectionError, "Socket timeout writing data"
|
1049
|
+
end
|
1050
|
+
|
1051
|
+
return if n == str.bytesize
|
1052
|
+
str = str.byteslice(n..-1)
|
1053
|
+
end
|
1054
|
+
end
|
1055
|
+
private :fast_write
|
1056
|
+
|
1057
|
+
ThreadLocalKey = :puma_server
|
1058
|
+
|
1059
|
+
def self.current
|
1060
|
+
Thread.current[ThreadLocalKey]
|
1061
|
+
end
|
1062
|
+
|
1063
|
+
def shutting_down?
|
1064
|
+
@status == :stop || @status == :restart
|
1065
|
+
end
|
1066
|
+
|
1067
|
+
def possible_header_injection?(header_value)
|
1068
|
+
HTTP_INJECTION_REGEX =~ header_value.to_s
|
1069
|
+
end
|
1070
|
+
private :possible_header_injection?
|
1071
|
+
end
|
1072
|
+
end
|