unicorn 1.1.7 → 2.0.0pre1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. data/GIT-VERSION-GEN +1 -1
  2. data/GNUmakefile +14 -5
  3. data/Rakefile +3 -28
  4. data/TODO +7 -0
  5. data/bin/unicorn +9 -13
  6. data/bin/unicorn_rails +12 -14
  7. data/examples/big_app_gc.rb +33 -2
  8. data/ext/unicorn_http/global_variables.h +3 -1
  9. data/ext/unicorn_http/unicorn_http.rl +15 -6
  10. data/lib/unicorn.rb +67 -820
  11. data/lib/unicorn/app/exec_cgi.rb +3 -4
  12. data/lib/unicorn/configurator.rb +20 -25
  13. data/lib/unicorn/const.rb +26 -25
  14. data/lib/unicorn/http_request.rb +64 -57
  15. data/lib/unicorn/http_response.rb +16 -35
  16. data/lib/unicorn/http_server.rb +700 -0
  17. data/lib/unicorn/launcher.rb +4 -3
  18. data/lib/unicorn/oob_gc.rb +50 -61
  19. data/lib/unicorn/socket_helper.rb +4 -4
  20. data/lib/unicorn/tee_input.rb +18 -26
  21. data/lib/unicorn/tmpio.rb +29 -0
  22. data/lib/unicorn/util.rb +51 -85
  23. data/lib/unicorn/worker.rb +40 -0
  24. data/local.mk.sample +0 -9
  25. data/script/isolate_for_tests +43 -0
  26. data/t/GNUmakefile +8 -1
  27. data/t/t0003-working_directory.sh +0 -5
  28. data/t/t0010-reap-logging.sh +55 -0
  29. data/t/t0303-rails3-alt-working_directory_config.ru.sh +0 -5
  30. data/t/test-rails3.sh +1 -1
  31. data/test/exec/test_exec.rb +1 -1
  32. data/test/unit/test_http_parser_ng.rb +11 -0
  33. data/test/unit/test_request.rb +12 -0
  34. data/test/unit/test_response.rb +23 -21
  35. data/test/unit/test_signals.rb +1 -1
  36. data/test/unit/test_tee_input.rb +21 -19
  37. data/unicorn.gemspec +3 -2
  38. metadata +47 -25
  39. data/t/oob_gc.ru +0 -21
  40. data/t/oob_gc_path.ru +0 -21
  41. data/t/t0012-reload-empty-config.sh +0 -82
  42. data/t/t0018-write-on-close.sh +0 -23
  43. data/t/t9001-oob_gc.sh +0 -47
  44. data/t/t9002-oob_gc-path.sh +0 -75
  45. data/t/write-on-close.ru +0 -11
data/GIT-VERSION-GEN CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/bin/sh
2
2
 
3
3
  GVF=GIT-VERSION-FILE
4
- DEF_VER=v1.1.7.GIT
4
+ DEF_VER=v2.0.0pre1.GIT
5
5
 
6
6
  LF='
7
7
  '
data/GNUmakefile CHANGED
@@ -24,6 +24,15 @@ endif
24
24
 
25
25
  RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))')
26
26
 
27
+ isolate_libs := tmp/isolate/.$(RUBY_ENGINE)-$(RUBY_VERSION).libs
28
+ MYLIBS = $(RUBYLIB):$(shell cat $(isolate_libs) 2>/dev/null || \
29
+ (test -f ./script/isolate_for_tests && \
30
+ $(RUBY) ./script/isolate_for_tests >/dev/null && \
31
+ cat $(isolate_libs) 2>/dev/null))
32
+
33
+ echo:
34
+ @echo $(MYLIBS)
35
+
27
36
  # dunno how to implement this as concisely in Ruby, and hell, I love awk
28
37
  awk_slow := awk '/def test_/{print FILENAME"--"$$2".n"}' 2>/dev/null
29
38
 
@@ -117,14 +126,14 @@ run_test = $(quiet_pre) \
117
126
  %.n: arg = $(subst .n,,$(subst --, -n ,$@))
118
127
  %.n: t = $(subst .n,$(log_suffix),$@)
119
128
  %.n: export PATH := $(test_prefix)/bin:$(PATH)
120
- %.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
129
+ %.n: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
121
130
  %.n: $(test_prefix)/.stamp
122
131
  $(run_test)
123
132
 
124
133
  $(T): arg = $@
125
134
  $(T): t = $(subst .rb,$(log_suffix),$@)
126
135
  $(T): export PATH := $(test_prefix)/bin:$(PATH)
127
- $(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
136
+ $(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
128
137
  $(T): $(test_prefix)/.stamp
129
138
  $(run_test)
130
139
 
@@ -169,7 +178,7 @@ NEWS: GIT-VERSION-FILE .manifest
169
178
  $(RAKE) -s news_rdoc > $@+
170
179
  mv $@+ $@
171
180
 
172
- SINCE = 1.0.0
181
+ SINCE = 1.1.4
173
182
  ChangeLog: LOG_VERSION = \
174
183
  $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \
175
184
  echo $(GIT_VERSION) || git describe)
@@ -189,7 +198,7 @@ atom = <link rel="alternate" title="Atom feed" href="$(1)" \
189
198
  doc: .document $(ext)/unicorn_http.c NEWS ChangeLog
190
199
  for i in $(man1_rdoc); do echo > $$i; done
191
200
  find bin lib -type f -name '*.rbc' -exec rm -f '{}' ';'
192
- rdoc -t "$(shell sed -ne '1s/^= //p' README)"
201
+ rdoc -a -t "$(shell sed -ne '1s/^= //p' README)"
193
202
  install -m644 COPYING doc/COPYING
194
203
  install -m644 $(shell grep '^[A-Z]' .document) doc/
195
204
  $(MAKE) -C Documentation install-html install-man
@@ -251,7 +260,7 @@ $(T_r).%.r: rv = $(subst .r,,$(subst $(T_r).,,$@))
251
260
  $(T_r).%.r: extra = ' 'v$(rv)
252
261
  $(T_r).%.r: arg = $(T_r)
253
262
  $(T_r).%.r: export PATH := $(test_prefix)/bin:$(PATH)
254
- $(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
263
+ $(T_r).%.r: export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(MYLIBS)
255
264
  $(T_r).%.r: export UNICORN_RAILS_TEST_VERSION = $(rv)
256
265
  $(T_r).%.r: export RAILS_GIT_REPO = $(CURDIR)/$(rails_git)
257
266
  $(T_r).%.r: $(test_prefix)/.stamp $(rails_git)/info/v2.3.8-stamp
data/Rakefile CHANGED
@@ -15,7 +15,7 @@ def tags
15
15
  timefmt = '%Y-%m-%dT%H:%M:%SZ'
16
16
  @tags ||= `git tag -l`.split(/\n/).map do |tag|
17
17
  next if tag == "v0.0.0"
18
- if %r{\Av[\d\.]+} =~ tag
18
+ if %r{\Av[\d\.]+\z} =~ tag
19
19
  header, subject, body = `git cat-file tag #{tag}`.split(/\n\n/, 3)
20
20
  header = header.split(/\n/)
21
21
  tagger = header.grep(/\Atagger /).first
@@ -163,11 +163,12 @@ task :fm_update do
163
163
  req = {
164
164
  "auth_code" => api_token,
165
165
  "release" => {
166
- "tag_list" => "Stable",
166
+ "tag_list" => "Experimental",
167
167
  "version" => version,
168
168
  "changelog" => changelog,
169
169
  },
170
170
  }.to_json
171
+
171
172
  if ! changelog.strip.empty? && version =~ %r{\A[\d\.]+\d+\z}
172
173
  Net::HTTP.start(uri.host, uri.port) do |http|
173
174
  p http.post(uri.path, req, {'Content-Type'=>'application/json'})
@@ -193,29 +194,3 @@ begin
193
194
  end
194
195
  rescue LoadError
195
196
  end
196
-
197
- task :isolate do
198
- require 'isolate'
199
- ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
200
- opts = {
201
- :system => false,
202
- :path => "tmp/isolate/#{ruby_engine}-#{RUBY_VERSION}",
203
- :multiruby => false, # we want "1.8.7" instead of "1.8"
204
- }
205
- fp = File.open(__FILE__, "rb")
206
- fp.flock(File::LOCK_EX)
207
-
208
- # C extensions aren't binary-compatible across Ruby versions
209
- pid = fork { Isolate.now!(opts) { gem 'sqlite3-ruby', '1.2.5' } }
210
- _, status = Process.waitpid2(pid)
211
- status.success? or abort status.inspect
212
-
213
- # pure Ruby gems can be shared across all Rubies
214
- %w(3.0.0).each do |rails_ver|
215
- opts[:path] = "tmp/isolate/rails-#{rails_ver}"
216
- pid = fork { Isolate.now!(opts) { gem 'rails', rails_ver } }
217
- _, status = Process.waitpid2(pid)
218
- status.success? or abort status.inspect
219
- end
220
- fp.flock(File::LOCK_UN)
221
- end
data/TODO CHANGED
@@ -3,3 +3,10 @@
3
3
  * performance validation (esp. TeeInput)
4
4
 
5
5
  * improve test suite
6
+
7
+ * scalability to >= 1024 worker processes for crazy NUMA systems
8
+
9
+ * Rack 2.x support (when Rack 2.x exists)
10
+
11
+ * allow disabling "rack.input" rewindability for performance
12
+ (but violate the Rack 1.x SPEC)
data/bin/unicorn CHANGED
@@ -4,16 +4,13 @@ require 'unicorn/launcher'
4
4
  require 'optparse'
5
5
 
6
6
  ENV["RACK_ENV"] ||= "development"
7
- daemonize = false
8
- options = { :listeners => [] }
9
- host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT
10
- set_listener = false
7
+ rackup_opts = Unicorn::Configurator::RACKUP
8
+ options = rackup_opts[:options]
11
9
 
12
10
  opts = OptionParser.new("", 24, ' ') do |opts|
13
11
  cmd = File.basename($0)
14
12
  opts.banner = "Usage: #{cmd} " \
15
13
  "[ruby options] [#{cmd} options] [rackup config file]"
16
-
17
14
  opts.separator "Ruby options:"
18
15
 
19
16
  lineno = 1
@@ -46,14 +43,14 @@ opts = OptionParser.new("", 24, ' ') do |opts|
46
43
 
47
44
  opts.on("-o", "--host HOST",
48
45
  "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h|
49
- host = h
50
- set_listener = true
46
+ rackup_opts[:host] = h
47
+ rackup_opts[:set_listener] = true
51
48
  end
52
49
 
53
50
  opts.on("-p", "--port PORT",
54
51
  "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p|
55
- port = p.to_i
56
- set_listener = true
52
+ rackup_opts[:port] = p.to_i
53
+ rackup_opts[:set_listener] = true
57
54
  end
58
55
 
59
56
  opts.on("-E", "--env ENVIRONMENT",
@@ -62,7 +59,7 @@ opts = OptionParser.new("", 24, ' ') do |opts|
62
59
  end
63
60
 
64
61
  opts.on("-D", "--daemonize", "run daemonized in the background") do |d|
65
- daemonize = d ? true : false
62
+ rackup_opts[:daemonize] = !!d
66
63
  end
67
64
 
68
65
  opts.on("-P", "--pid FILE", "DEPRECATED") do |f|
@@ -109,16 +106,15 @@ opts = OptionParser.new("", 24, ' ') do |opts|
109
106
  end
110
107
 
111
108
  app = Unicorn.builder(ARGV[0] || 'config.ru', opts)
112
- options[:listeners] << "#{host}:#{port}" if set_listener
113
109
 
114
110
  if $DEBUG
115
111
  require 'pp'
116
112
  pp({
117
113
  :unicorn_options => options,
118
114
  :app => app,
119
- :daemonize => daemonize,
115
+ :daemonize => rackup_opts[:daemonize],
120
116
  })
121
117
  end
122
118
 
123
- Unicorn::Launcher.daemonize!(options) if daemonize
119
+ Unicorn::Launcher.daemonize!(options) if rackup_opts[:daemonize]
124
120
  Unicorn.run(app, options)
data/bin/unicorn_rails CHANGED
@@ -4,11 +4,9 @@ require 'unicorn/launcher'
4
4
  require 'optparse'
5
5
  require 'fileutils'
6
6
 
7
- daemonize = false
8
- options = { :listeners => [] }
9
- host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT
10
- set_listener = false
11
7
  ENV['RAILS_ENV'] ||= "development"
8
+ rackup_opts = Unicorn::Configurator::RACKUP
9
+ options = rackup_opts[:options]
12
10
 
13
11
  opts = OptionParser.new("", 24, ' ') do |opts|
14
12
  cmd = File.basename($0)
@@ -46,13 +44,14 @@ opts = OptionParser.new("", 24, ' ') do |opts|
46
44
 
47
45
  opts.on("-o", "--host HOST",
48
46
  "listen on HOST (default: #{Unicorn::Const::DEFAULT_HOST})") do |h|
49
- host = h
50
- set_listener = true
47
+ rackup_opts[:host] = h
48
+ rackup_opts[:set_listener] = true
51
49
  end
52
50
 
53
- opts.on("-p", "--port PORT", "use PORT (default: #{port})") do |p|
54
- port = p.to_i
55
- set_listener = true
51
+ opts.on("-p", "--port PORT",
52
+ "use PORT (default: #{Unicorn::Const::DEFAULT_PORT})") do |p|
53
+ rackup_opts[:port] = p.to_i
54
+ rackup_opts[:set_listener] = true
56
55
  end
57
56
 
58
57
  opts.on("-E", "--env RAILS_ENV",
@@ -61,7 +60,7 @@ opts = OptionParser.new("", 24, ' ') do |opts|
61
60
  end
62
61
 
63
62
  opts.on("-D", "--daemonize", "run daemonized in the background") do |d|
64
- daemonize = d ? true : false
63
+ rackup_opts[:daemonize] = !!d
65
64
  end
66
65
 
67
66
  # Unicorn-specific stuff
@@ -186,15 +185,14 @@ def rails_builder(ru, opts, daemonize)
186
185
  end
187
186
  end
188
187
 
189
- app = rails_builder(ARGV[0], opts, daemonize)
190
- options[:listeners] << "#{host}:#{port}" if set_listener
188
+ app = rails_builder(ARGV[0], opts, rackup_opts[:daemonize])
191
189
 
192
190
  if $DEBUG
193
191
  require 'pp'
194
192
  pp({
195
193
  :unicorn_options => options,
196
194
  :app => app,
197
- :daemonize => daemonize,
195
+ :daemonize => rackup_opts[:daemonize],
198
196
  })
199
197
  end
200
198
 
@@ -203,7 +201,7 @@ options[:after_reload] = lambda do
203
201
  FileUtils.mkdir_p(%w(cache pids sessions sockets).map! { |d| "tmp/#{d}" })
204
202
  end
205
203
 
206
- if daemonize
204
+ if rackup_opts[:daemonize]
207
205
  options[:pid] = "tmp/pids/unicorn.pid"
208
206
  Unicorn::Launcher.daemonize!(options)
209
207
  end
@@ -1,2 +1,33 @@
1
- # see {Unicorn::OobGC}[http://unicorn.bogomips.org/Unicorn/OobGC.html]
2
- # Unicorn::OobGC was broken in Unicorn v3.3.1 - v3.6.1 and fixed in v3.6.2
1
+ # Run GC after every request, before attempting to accept more connections.
2
+ #
3
+ # You could customize this patch to read REQ["PATH_INFO"] and only
4
+ # call GC.start after expensive requests.
5
+ #
6
+ # We could have this wrap the response body.close as middleware, but the
7
+ # scannable stack is would still be bigger than it would be here.
8
+ #
9
+ # This shouldn't hurt overall performance as long as the server cluster
10
+ # is at <=50% CPU capacity, and improves the performance of most memory
11
+ # intensive requests. This serves to improve _client-visible_
12
+ # performance (possibly at the cost of overall performance).
13
+ #
14
+ # We'll call GC after each request is been written out to the socket, so
15
+ # the client never sees the extra GC hit it. It's ideal to call the GC
16
+ # inside the HTTP server (vs middleware or hooks) since the stack is
17
+ # smaller at this point, so the GC will both be faster and more
18
+ # effective at releasing unused memory.
19
+ #
20
+ # This monkey patch is _only_ effective for applications that use a lot
21
+ # of memory, and will hurt simpler apps/endpoints that can process
22
+ # multiple requests before incurring GC.
23
+
24
+ class Unicorn::HttpServer
25
+ REQ = Unicorn::HttpRequest::REQ
26
+ alias _process_client process_client
27
+ undef_method :process_client
28
+ def process_client(client)
29
+ _process_client(client)
30
+ REQ.clear
31
+ GC.start
32
+ end
33
+ end if defined?(Unicorn)
@@ -35,13 +35,15 @@ static VALUE g_HEAD;
35
35
  static const char * const MAX_##N##_LENGTH_ERR = \
36
36
  "HTTP element " # N " is longer than the " # length " allowed length."
37
37
 
38
+ NORETURN(static void parser_error(const char *));
39
+
38
40
  /**
39
41
  * Validates the max length of given input and throws an HttpParserError
40
42
  * exception if over.
41
43
  */
42
44
  #define VALIDATE_MAX_LENGTH(len, N) do { \
43
45
  if (len > MAX_##N##_LENGTH) \
44
- rb_raise(eHttpParserError, MAX_##N##_LENGTH_ERR); \
46
+ parser_error(MAX_##N##_LENGTH_ERR); \
45
47
  } while (0)
46
48
 
47
49
  /** Defines global strings in the init method. */
@@ -48,6 +48,15 @@ struct http_parser {
48
48
 
49
49
  static void finalize_header(struct http_parser *hp, VALUE req);
50
50
 
51
+ static void parser_error(const char *msg)
52
+ {
53
+ VALUE exc = rb_exc_new2(eHttpParserError, msg);
54
+ VALUE bt = rb_ary_new();
55
+
56
+ rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
57
+ rb_exc_raise(exc);
58
+ }
59
+
51
60
  #define REMAINING (unsigned long)(pe - p)
52
61
  #define LEN(AT, FPC) (FPC - buffer - hp->AT)
53
62
  #define MARK(M,FPC) (hp->M = (FPC) - buffer)
@@ -132,7 +141,7 @@ http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
132
141
  static inline void hp_invalid_if_trailer(struct http_parser *hp)
133
142
  {
134
143
  if (HP_FL_TEST(hp, INTRAILER))
135
- rb_raise(eHttpParserError, "invalid Trailer");
144
+ parser_error("invalid Trailer");
136
145
  }
137
146
 
138
147
  static void write_cont_value(struct http_parser *hp,
@@ -141,7 +150,7 @@ static void write_cont_value(struct http_parser *hp,
141
150
  char *vptr;
142
151
 
143
152
  if (hp->cont == Qfalse)
144
- rb_raise(eHttpParserError, "invalid continuation line");
153
+ parser_error("invalid continuation line");
145
154
  if (NIL_P(hp->cont))
146
155
  return; /* we're ignoring this header (probably Host:) */
147
156
 
@@ -192,7 +201,7 @@ static void write_value(VALUE req, struct http_parser *hp,
192
201
  } else if (f == g_content_length) {
193
202
  hp->len.content = parse_length(RSTRING_PTR(v), RSTRING_LEN(v));
194
203
  if (hp->len.content < 0)
195
- rb_raise(eHttpParserError, "invalid Content-Length");
204
+ parser_error("invalid Content-Length");
196
205
  HP_FL_SET(hp, HASBODY);
197
206
  hp_invalid_if_trailer(hp);
198
207
  } else if (f == g_http_transfer_encoding) {
@@ -285,7 +294,7 @@ static void write_value(VALUE req, struct http_parser *hp,
285
294
  action add_to_chunk_size {
286
295
  hp->len.chunk = step_incr(hp->len.chunk, fc, 16);
287
296
  if (hp->len.chunk < 0)
288
- rb_raise(eHttpParserError, "invalid chunk size");
297
+ parser_error("invalid chunk size");
289
298
  }
290
299
  action header_done {
291
300
  finalize_header(hp, req);
@@ -550,7 +559,7 @@ static VALUE HttpParser_headers(VALUE self, VALUE req, VALUE data)
550
559
  }
551
560
 
552
561
  if (hp->cs == http_parser_error)
553
- rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
562
+ parser_error("Invalid HTTP format, parsing fails.");
554
563
 
555
564
  return Qnil;
556
565
  }
@@ -643,7 +652,7 @@ static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data)
643
652
  hp->s.dest_offset = 0;
644
653
  http_parser_execute(hp, buf, dptr, dlen);
645
654
  if (hp->cs == http_parser_error)
646
- rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
655
+ parser_error("Invalid HTTP format, parsing fails.");
647
656
 
648
657
  assert(hp->s.dest_offset <= hp->offset &&
649
658
  "destination buffer overflow");
data/lib/unicorn.rb CHANGED
@@ -1,836 +1,83 @@
1
1
  # -*- encoding: binary -*-
2
-
3
2
  require 'fcntl'
4
3
  require 'etc'
5
4
  require 'stringio'
6
5
  require 'rack'
7
- require 'unicorn/socket_helper'
8
- require 'unicorn/const'
9
- require 'unicorn/http_request'
10
- require 'unicorn/configurator'
11
- require 'unicorn/util'
12
- require 'unicorn/tee_input'
13
- require 'unicorn/http_response'
6
+ require 'kgio'
14
7
 
15
- # Unicorn module containing all of the classes (include C extensions) for running
16
- # a Unicorn web server. It contains a minimalist HTTP server with just enough
17
- # functionality to service web application requests fast as possible.
8
+ # Unicorn module containing all of the classes (include C extensions) for
9
+ # running a Unicorn web server. It contains a minimalist HTTP server with just
10
+ # enough functionality to service web application requests fast as possible.
18
11
  module Unicorn
19
-
20
- # raised inside TeeInput when a client closes the socket inside the
21
- # application dispatch. This is always raised with an empty backtrace
22
- # since there is nothing in the application stack that is responsible
23
- # for client shutdowns/disconnects.
24
- class ClientShutdown < EOFError
25
- end
26
-
27
- class << self
28
- def run(app, options = {})
29
- HttpServer.new(app, options).start.join
30
- end
31
-
32
- # This returns a lambda to pass in as the app, this does not "build" the
33
- # app (which we defer based on the outcome of "preload_app" in the
34
- # Unicorn config). The returned lambda will be called when it is
35
- # time to build the app.
36
- def builder(ru, opts)
37
- # allow Configurator to parse cli switches embedded in the ru file
38
- Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
39
-
40
- # always called after config file parsing, may be called after forking
41
- lambda do ||
42
- inner_app = case ru
43
- when /\.ru$/
44
- raw = File.read(ru)
45
- raw.sub!(/^__END__\n.*/, '')
46
- eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
47
- else
48
- require ru
49
- Object.const_get(File.basename(ru, '.rb').capitalize)
50
- end
51
-
52
- pp({ :inner_app => inner_app }) if $DEBUG
53
-
54
- # return value, matches rackup defaults based on env
55
- case ENV["RACK_ENV"]
56
- when "development"
57
- Rack::Builder.new do
58
- use Rack::CommonLogger, $stderr
59
- use Rack::ShowExceptions
60
- use Rack::Lint
61
- run inner_app
62
- end.to_app
63
- when "deployment"
64
- Rack::Builder.new do
65
- use Rack::CommonLogger, $stderr
66
- run inner_app
67
- end.to_app
68
- else
69
- inner_app
70
- end
71
- end
72
- end
73
-
74
- # returns an array of strings representing TCP listen socket addresses
75
- # and Unix domain socket paths. This is useful for use with
76
- # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
77
- def listener_names
78
- HttpServer::LISTENERS.map { |io| SocketHelper.sock_name(io) }
79
- end
12
+ def self.run(app, options = {})
13
+ Unicorn::HttpServer.new(app, options).start.join
80
14
  end
81
15
 
82
- # This is the process manager of Unicorn. This manages worker
83
- # processes which in turn handle the I/O and application process.
84
- # Listener sockets are started in the master process and shared with
85
- # forked worker children.
86
-
87
- class HttpServer < Struct.new(:app, :timeout, :worker_processes,
88
- :before_fork, :after_fork, :before_exec,
89
- :logger, :pid, :listener_opts, :preload_app,
90
- :reexec_pid, :orig_app, :init_listeners,
91
- :master_pid, :config, :ready_pipe, :user)
92
- include ::Unicorn::SocketHelper
93
-
94
- # prevents IO objects in here from being GC-ed
95
- IO_PURGATORY = []
96
-
97
- # all bound listener sockets
98
- LISTENERS = []
99
-
100
- # This hash maps PIDs to Workers
101
- WORKERS = {}
102
-
103
- # We use SELF_PIPE differently in the master and worker processes:
104
- #
105
- # * The master process never closes or reinitializes this once
106
- # initialized. Signal handlers in the master process will write to
107
- # it to wake up the master from IO.select in exactly the same manner
108
- # djb describes in http://cr.yp.to/docs/selfpipe.html
109
- #
110
- # * The workers immediately close the pipe they inherit from the
111
- # master and replace it with a new pipe after forking. This new
112
- # pipe is also used to wakeup from IO.select from inside (worker)
113
- # signal handlers. However, workers *close* the pipe descriptors in
114
- # the signal handlers to raise EBADF in IO.select instead of writing
115
- # like we do in the master. We cannot easily use the reader set for
116
- # IO.select because LISTENERS is already that set, and it's extra
117
- # work (and cycles) to distinguish the pipe FD from the reader set
118
- # once IO.select returns. So we're lazy and just close the pipe when
119
- # a (rare) signal arrives in the worker and reinitialize the pipe later.
120
- SELF_PIPE = []
121
-
122
- # signal queue used for self-piping
123
- SIG_QUEUE = []
124
-
125
- # constant lookups are faster and we're single-threaded/non-reentrant
126
- REQUEST = HttpRequest.new
127
-
128
- # We populate this at startup so we can figure out how to reexecute
129
- # and upgrade the currently running instance of Unicorn
130
- # This Hash is considered a stable interface and changing its contents
131
- # will allow you to switch between different installations of Unicorn
132
- # or even different installations of the same applications without
133
- # downtime. Keys of this constant Hash are described as follows:
134
- #
135
- # * 0 - the path to the unicorn/unicorn_rails executable
136
- # * :argv - a deep copy of the ARGV array the executable originally saw
137
- # * :cwd - the working directory of the application, this is where
138
- # you originally started Unicorn.
139
- #
140
- # To change your unicorn executable to a different path without downtime,
141
- # you can set the following in your Unicorn config file, HUP and then
142
- # continue with the traditional USR2 + QUIT upgrade steps:
143
- #
144
- # Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
145
- START_CTX = {
146
- :argv => ARGV.map { |arg| arg.dup },
147
- :cwd => lambda {
148
- # favor ENV['PWD'] since it is (usually) symlink aware for
149
- # Capistrano and like systems
150
- begin
151
- a = File.stat(pwd = ENV['PWD'])
152
- b = File.stat(Dir.pwd)
153
- a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
154
- rescue
155
- Dir.pwd
156
- end
157
- }.call,
158
- 0 => $0.dup,
159
- }
160
-
161
- # This class and its members can be considered a stable interface
162
- # and will not change in a backwards-incompatible fashion between
163
- # releases of Unicorn. You may need to access it in the
164
- # before_fork/after_fork hooks. See the Unicorn::Configurator RDoc
165
- # for examples.
166
- class Worker < Struct.new(:nr, :tmp, :switched)
167
-
168
- # worker objects may be compared to just plain numbers
169
- def ==(other_nr)
170
- self.nr == other_nr
171
- end
172
-
173
- # Changes the worker process to the specified +user+ and +group+
174
- # This is only intended to be called from within the worker
175
- # process from the +after_fork+ hook. This should be called in
176
- # the +after_fork+ hook after any priviledged functions need to be
177
- # run (e.g. to set per-worker CPU affinity, niceness, etc)
178
- #
179
- # Any and all errors raised within this method will be propagated
180
- # directly back to the caller (usually the +after_fork+ hook.
181
- # These errors commonly include ArgumentError for specifying an
182
- # invalid user/group and Errno::EPERM for insufficient priviledges
183
- def user(user, group = nil)
184
- # we do not protect the caller, checking Process.euid == 0 is
185
- # insufficient because modern systems have fine-grained
186
- # capabilities. Let the caller handle any and all errors.
187
- uid = Etc.getpwnam(user).uid
188
- gid = Etc.getgrnam(group).gid if group
189
- Unicorn::Util.chown_logs(uid, gid)
190
- tmp.chown(uid, gid)
191
- if gid && Process.egid != gid
192
- Process.initgroups(user, gid)
193
- Process::GID.change_privilege(gid)
194
- end
195
- Process.euid != uid and Process::UID.change_privilege(uid)
196
- self.switched = true
197
- end
198
-
199
- end
200
-
201
- # Creates a working server on host:port (strange things happen if
202
- # port isn't a Number). Use HttpServer::run to start the server and
203
- # HttpServer.run.join to join the thread that's processing
204
- # incoming requests on the socket.
205
- def initialize(app, options = {})
206
- self.app = app
207
- self.reexec_pid = 0
208
- self.ready_pipe = options.delete(:ready_pipe)
209
- self.init_listeners = options[:listeners] ? options[:listeners].dup : []
210
- self.config = Configurator.new(options.merge(:use_defaults => true))
211
- self.listener_opts = {}
212
-
213
- # we try inheriting listeners first, so we bind them later.
214
- # we don't write the pid file until we've bound listeners in case
215
- # unicorn was started twice by mistake. Even though our #pid= method
216
- # checks for stale/existing pid files, race conditions are still
217
- # possible (and difficult/non-portable to avoid) and can be likely
218
- # to clobber the pid if the second start was in quick succession
219
- # after the first, so we rely on the listener binding to fail in
220
- # that case. Some tests (in and outside of this source tree) and
221
- # monitoring tools may also rely on pid files existing before we
222
- # attempt to connect to the listener(s)
223
- config.commit!(self, :skip => [:listeners, :pid])
224
- self.orig_app = app
225
- end
226
-
227
- # Runs the thing. Returns self so you can run join on it
228
- def start
229
- BasicSocket.do_not_reverse_lookup = true
230
-
231
- # inherit sockets from parents, they need to be plain Socket objects
232
- # before they become UNIXServer or TCPServer
233
- inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
234
- io = Socket.for_fd(fd.to_i)
235
- set_server_sockopt(io, listener_opts[sock_name(io)])
236
- IO_PURGATORY << io
237
- logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
238
- server_cast(io)
239
- end
240
-
241
- config_listeners = config[:listeners].dup
242
- LISTENERS.replace(inherited)
243
-
244
- # we start out with generic Socket objects that get cast to either
245
- # TCPServer or UNIXServer objects; but since the Socket objects
246
- # share the same OS-level file descriptor as the higher-level *Server
247
- # objects; we need to prevent Socket objects from being garbage-collected
248
- config_listeners -= listener_names
249
- if config_listeners.empty? && LISTENERS.empty?
250
- config_listeners << Unicorn::Const::DEFAULT_LISTEN
251
- init_listeners << Unicorn::Const::DEFAULT_LISTEN
252
- START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
253
- end
254
- config_listeners.each { |addr| listen(addr) }
255
- raise ArgumentError, "no listeners" if LISTENERS.empty?
256
-
257
- # this pipe is used to wake us up from select(2) in #join when signals
258
- # are trapped. See trap_deferred.
259
- init_self_pipe!
260
-
261
- # setup signal handlers before writing pid file in case people get
262
- # trigger happy and send signals as soon as the pid file exists.
263
- # Note that signals don't actually get handled until the #join method
264
- QUEUE_SIGS.each { |sig| trap_deferred(sig) }
265
- trap(:CHLD) { |_| awaken_master }
266
- self.pid = config[:pid]
267
-
268
- self.master_pid = $$
269
- build_app! if preload_app
270
- maintain_worker_count
271
- self
272
- end
273
-
274
- # replaces current listener set with +listeners+. This will
275
- # close the socket if it will not exist in the new listener set
276
- def listeners=(listeners)
277
- cur_names, dead_names = [], []
278
- listener_names.each do |name|
279
- if ?/ == name[0]
280
- # mark unlinked sockets as dead so we can rebind them
281
- (File.socket?(name) ? cur_names : dead_names) << name
282
- else
283
- cur_names << name
284
- end
285
- end
286
- set_names = listener_names(listeners)
287
- dead_names.concat(cur_names - set_names).uniq!
288
-
289
- LISTENERS.delete_if do |io|
290
- if dead_names.include?(sock_name(io))
291
- IO_PURGATORY.delete_if do |pio|
292
- pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
293
- end
294
- (io.close rescue nil).nil? # true
295
- else
296
- set_server_sockopt(io, listener_opts[sock_name(io)])
297
- false
298
- end
299
- end
300
-
301
- (set_names - cur_names).each { |addr| listen(addr) }
302
- end
303
-
304
- def stdout_path=(path); redirect_io($stdout, path); end
305
- def stderr_path=(path); redirect_io($stderr, path); end
306
-
307
- def logger=(obj)
308
- HttpRequest::DEFAULTS["rack.logger"] = super
309
- end
310
-
311
- # sets the path for the PID file of the master process
312
- def pid=(path)
313
- if path
314
- if x = valid_pid?(path)
315
- return path if pid && path == pid && x == $$
316
- if x == reexec_pid && pid =~ /\.oldbin\z/
317
- logger.warn("will not set pid=#{path} while reexec-ed "\
318
- "child is running PID:#{x}")
319
- return
320
- end
321
- raise ArgumentError, "Already running on PID:#{x} " \
322
- "(or pid=#{path} is stale)"
323
- end
324
- end
325
- unlink_pid_safe(pid) if pid
326
-
327
- if path
328
- fp = begin
329
- tmp = "#{File.dirname(path)}/#{rand}.#$$"
330
- File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
331
- rescue Errno::EEXIST
332
- retry
333
- end
334
- fp.syswrite("#$$\n")
335
- File.rename(fp.path, path)
336
- fp.close
337
- end
338
- super(path)
339
- end
340
-
341
- # add a given address to the +listeners+ set, idempotently
342
- # Allows workers to add a private, per-process listener via the
343
- # after_fork hook. Very useful for debugging and testing.
344
- # +:tries+ may be specified as an option for the number of times
345
- # to retry, and +:delay+ may be specified as the time in seconds
346
- # to delay between retries.
347
- # A negative value for +:tries+ indicates the listen will be
348
- # retried indefinitely, this is useful when workers belonging to
349
- # different masters are spawned during a transparent upgrade.
350
- def listen(address, opt = {}.merge(listener_opts[address] || {}))
351
- address = config.expand_addr(address)
352
- return if String === address && listener_names.include?(address)
353
-
354
- delay = opt[:delay] || 0.5
355
- tries = opt[:tries] || 5
356
- begin
357
- io = bind_listen(address, opt)
358
- unless TCPServer === io || UNIXServer === io
359
- IO_PURGATORY << io
360
- io = server_cast(io)
361
- end
362
- logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
363
- LISTENERS << io
364
- io
365
- rescue Errno::EADDRINUSE => err
366
- logger.error "adding listener failed addr=#{address} (in use)"
367
- raise err if tries == 0
368
- tries -= 1
369
- logger.error "retrying in #{delay} seconds " \
370
- "(#{tries < 0 ? 'infinite' : tries} tries left)"
371
- sleep(delay)
372
- retry
373
- rescue => err
374
- logger.fatal "error adding listener addr=#{address}"
375
- raise err
376
- end
377
- end
378
-
379
- # monitors children and receives signals forever
380
- # (or until a termination signal is sent). This handles signals
381
- # one-at-a-time time and we'll happily drop signals in case somebody
382
- # is signalling us too often.
383
- def join
384
- respawn = true
385
- last_check = Time.now
386
-
387
- proc_name 'master'
388
- logger.info "master process ready" # test_exec.rb relies on this message
389
- if ready_pipe
390
- ready_pipe.syswrite($$.to_s)
391
- ready_pipe.close rescue nil
392
- self.ready_pipe = nil
393
- end
394
- begin
395
- loop do
396
- reap_all_workers
397
- case SIG_QUEUE.shift
398
- when nil
399
- # avoid murdering workers after our master process (or the
400
- # machine) comes out of suspend/hibernation
401
- if (last_check + timeout) >= (last_check = Time.now)
402
- murder_lazy_workers
403
- else
404
- # wait for workers to wakeup on suspend
405
- master_sleep(timeout/2.0 + 1)
406
- end
407
- maintain_worker_count if respawn
408
- master_sleep(1)
409
- when :QUIT # graceful shutdown
410
- break
411
- when :TERM, :INT # immediate shutdown
412
- stop(false)
413
- break
414
- when :USR1 # rotate logs
415
- logger.info "master reopening logs..."
416
- Unicorn::Util.reopen_logs
417
- logger.info "master done reopening logs"
418
- kill_each_worker(:USR1)
419
- when :USR2 # exec binary, stay alive in case something went wrong
420
- reexec
421
- when :WINCH
422
- if Process.ppid == 1 || Process.getpgrp != $$
423
- respawn = false
424
- logger.info "gracefully stopping all workers"
425
- kill_each_worker(:QUIT)
426
- self.worker_processes = 0
427
- else
428
- logger.info "SIGWINCH ignored because we're not daemonized"
429
- end
430
- when :TTIN
431
- respawn = true
432
- self.worker_processes += 1
433
- when :TTOU
434
- self.worker_processes -= 1 if self.worker_processes > 0
435
- when :HUP
436
- respawn = true
437
- if config.config_file
438
- load_config!
439
- redo # immediate reaping since we may have QUIT workers
440
- else # exec binary and exit if there's no config file
441
- logger.info "config_file not present, reexecuting binary"
442
- reexec
443
- break
444
- end
445
- end
446
- end
447
- rescue Errno::EINTR
448
- retry
449
- rescue => e
450
- logger.error "Unhandled master loop exception #{e.inspect}."
451
- logger.error e.backtrace.join("\n")
452
- retry
453
- end
454
- stop # gracefully shutdown all workers on our way out
455
- logger.info "master complete"
456
- unlink_pid_safe(pid) if pid
457
- end
458
-
459
- # Terminates all workers, but does not exit master process
460
- def stop(graceful = true)
461
- self.listeners = []
462
- limit = Time.now + timeout
463
- until WORKERS.empty? || Time.now > limit
464
- kill_each_worker(graceful ? :QUIT : :TERM)
465
- sleep(0.1)
466
- reap_all_workers
467
- end
468
- kill_each_worker(:KILL)
469
- end
470
-
471
- private
472
-
473
- # list of signals we care about and trap in master.
474
- QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
475
- :TTIN, :TTOU ]
476
-
477
- # defer a signal for later processing in #join (master process)
478
- def trap_deferred(signal)
479
- trap(signal) do |sig_nr|
480
- if SIG_QUEUE.size < 5
481
- SIG_QUEUE << signal
482
- awaken_master
483
- else
484
- logger.error "ignoring SIG#{signal}, queue=#{SIG_QUEUE.inspect}"
485
- end
486
- end
487
- end
488
-
489
- # wait for a signal hander to wake us up and then consume the pipe
490
- # Wake up every second anyways to run murder_lazy_workers
491
- def master_sleep(sec)
492
- IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
493
- SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
494
- rescue Errno::EAGAIN, Errno::EINTR
495
- end
496
-
497
- def awaken_master
498
- begin
499
- SELF_PIPE[1].write_nonblock('.') # wakeup master process from select
500
- rescue Errno::EAGAIN, Errno::EINTR
501
- # pipe is full, master should wake up anyways
502
- retry
503
- end
504
- end
505
-
506
- # reaps all unreaped workers
507
- def reap_all_workers
508
- begin
509
- loop do
510
- wpid, status = Process.waitpid2(-1, Process::WNOHANG)
511
- wpid or break
512
- if reexec_pid == wpid
513
- logger.error "reaped #{status.inspect} exec()-ed"
514
- self.reexec_pid = 0
515
- self.pid = pid.chomp('.oldbin') if pid
516
- proc_name 'master'
517
- else
518
- worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
519
- logger.info "reaped #{status.inspect} " \
520
- "worker=#{worker.nr rescue 'unknown'}"
521
- end
522
- end
523
- rescue Errno::ECHILD
524
- end
525
- end
526
-
527
- # reexecutes the START_CTX with a new binary
528
- def reexec
529
- if reexec_pid > 0
530
- begin
531
- Process.kill(0, reexec_pid)
532
- logger.error "reexec-ed child already running PID:#{reexec_pid}"
533
- return
534
- rescue Errno::ESRCH
535
- self.reexec_pid = 0
536
- end
537
- end
538
-
539
- if pid
540
- old_pid = "#{pid}.oldbin"
541
- prev_pid = pid.dup
542
- begin
543
- self.pid = old_pid # clear the path for a new pid file
544
- rescue ArgumentError
545
- logger.error "old PID:#{valid_pid?(old_pid)} running with " \
546
- "existing pid=#{old_pid}, refusing rexec"
547
- return
548
- rescue => e
549
- logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
550
- return
551
- end
552
- end
553
-
554
- self.reexec_pid = fork do
555
- listener_fds = LISTENERS.map { |sock| sock.fileno }
556
- ENV['UNICORN_FD'] = listener_fds.join(',')
557
- Dir.chdir(START_CTX[:cwd])
558
- cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
559
-
560
- # avoid leaking FDs we don't know about, but let before_exec
561
- # unset FD_CLOEXEC, if anything else in the app eventually
562
- # relies on FD inheritence.
563
- (3..1024).each do |io|
564
- next if listener_fds.include?(io)
565
- io = IO.for_fd(io) rescue nil
566
- io or next
567
- IO_PURGATORY << io
568
- io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
569
- end
570
- logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
571
- before_exec.call(self)
572
- exec(*cmd)
573
- end
574
- proc_name 'master (old)'
575
- end
576
-
577
- # forcibly terminate all workers that haven't checked in in timeout
578
- # seconds. The timeout is implemented using an unlinked File
579
- # shared between the parent process and each worker. The worker
580
- # runs File#chmod to modify the ctime of the File. If the ctime
581
- # is stale for >timeout seconds, then we'll kill the corresponding
582
- # worker.
583
- def murder_lazy_workers
584
- WORKERS.dup.each_pair do |wpid, worker|
585
- stat = worker.tmp.stat
586
- # skip workers that disable fchmod or have never fchmod-ed
587
- stat.mode == 0100600 and next
588
- (diff = (Time.now - stat.ctime)) <= timeout and next
589
- logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
590
- "(#{diff}s > #{timeout}s), killing"
591
- kill_worker(:KILL, wpid) # take no prisoners for timeout violations
592
- end
593
- end
594
-
595
- def spawn_missing_workers
596
- (0...worker_processes).each do |worker_nr|
597
- WORKERS.values.include?(worker_nr) and next
598
- worker = Worker.new(worker_nr, Unicorn::Util.tmpio)
599
- before_fork.call(self, worker)
600
- WORKERS[fork {
601
- ready_pipe.close if ready_pipe
602
- self.ready_pipe = nil
603
- worker_loop(worker)
604
- }] = worker
605
- end
606
- end
607
-
608
- def maintain_worker_count
609
- (off = WORKERS.size - worker_processes) == 0 and return
610
- off < 0 and return spawn_missing_workers
611
- WORKERS.dup.each_pair { |wpid,w|
612
- w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
613
- }
614
- end
615
-
616
- # if we get any error, try to write something back to the client
617
- # assuming we haven't closed the socket, but don't get hung up
618
- # if the socket is already closed or broken. We'll always ensure
619
- # the socket is closed at the end of this function
620
- def handle_error(client, e)
621
- msg = case e
622
- when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
623
- Const::ERROR_500_RESPONSE
624
- when HttpParserError # try to tell the client they're bad
625
- Const::ERROR_400_RESPONSE
16
+ # This returns a lambda to pass in as the app, this does not "build" the
17
+ # app (which we defer based on the outcome of "preload_app" in the
18
+ # Unicorn config). The returned lambda will be called when it is
19
+ # time to build the app.
20
+ def self.builder(ru, opts)
21
+ # allow Configurator to parse cli switches embedded in the ru file
22
+ Unicorn::Configurator::RACKUP.update(:file => ru, :optparse => opts)
23
+
24
+ # always called after config file parsing, may be called after forking
25
+ lambda do ||
26
+ inner_app = case ru
27
+ when /\.ru$/
28
+ raw = File.read(ru)
29
+ raw.sub!(/^__END__\n.*/, '')
30
+ eval("Rack::Builder.new {(#{raw}\n)}.to_app", TOPLEVEL_BINDING, ru)
626
31
  else
627
- logger.error "Read error: #{e.inspect}"
628
- logger.error e.backtrace.join("\n")
629
- Const::ERROR_500_RESPONSE
630
- end
631
- client.write_nonblock(msg)
632
- client.close
633
- rescue
634
- nil
635
- end
636
-
637
- # once a client is accepted, it is processed in its entirety here
638
- # in 3 easy steps: read request, call app, write app response
639
- def process_client(client)
640
- client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
641
- response = app.call(env = REQUEST.read(client))
642
-
643
- if 100 == response[0].to_i
644
- client.write(Const::EXPECT_100_RESPONSE)
645
- env.delete(Const::HTTP_EXPECT)
646
- response = app.call(env)
647
- end
648
- HttpResponse.write(client, response, HttpRequest::PARSER.headers?)
649
- client.close # flushes and uncorks the socket immediately, no keepalive
650
- rescue => e
651
- handle_error(client, e)
652
- end
653
-
654
- # gets rid of stuff the worker has no business keeping track of
655
- # to free some resources and drops all sig handlers.
656
- # traps for USR1, USR2, and HUP may be set in the after_fork Proc
657
- # by the user.
658
- def init_worker_process(worker)
659
- QUEUE_SIGS.each { |sig| trap(sig, nil) }
660
- trap(:CHLD, 'DEFAULT')
661
- SIG_QUEUE.clear
662
- proc_name "worker[#{worker.nr}]"
663
- START_CTX.clear
664
- init_self_pipe!
665
- WORKERS.values.each { |other| other.tmp.close rescue nil }
666
- WORKERS.clear
667
- LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
668
- worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
669
- after_fork.call(self, worker) # can drop perms
670
- worker.user(*user) if user.kind_of?(Array) && ! worker.switched
671
- self.timeout /= 2.0 # halve it for select()
672
- build_app! unless preload_app
673
- end
674
-
675
- def reopen_worker_logs(worker_nr)
676
- logger.info "worker=#{worker_nr} reopening logs..."
677
- Unicorn::Util.reopen_logs
678
- logger.info "worker=#{worker_nr} done reopening logs"
679
- init_self_pipe!
680
- end
681
-
682
- # runs inside each forked worker, this sits around and waits
683
- # for connections and doesn't die until the parent dies (or is
684
- # given a INT, QUIT, or TERM signal)
685
- def worker_loop(worker)
686
- ppid = master_pid
687
- init_worker_process(worker)
688
- nr = 0 # this becomes negative if we need to reopen logs
689
- alive = worker.tmp # tmp is our lifeline to the master process
690
- ready = LISTENERS
691
-
692
- # closing anything we IO.select on will raise EBADF
693
- trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
694
- trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
695
- [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
696
- logger.info "worker=#{worker.nr} ready"
697
- m = 0
698
-
699
- begin
700
- nr < 0 and reopen_worker_logs(worker.nr)
701
- nr = 0
702
-
703
- # we're a goner in timeout seconds anyways if alive.chmod
704
- # breaks, so don't trap the exception. Using fchmod() since
705
- # futimes() is not available in base Ruby and I very strongly
706
- # prefer temporary files to be unlinked for security,
707
- # performance and reliability reasons, so utime is out. No-op
708
- # changes with chmod doesn't update ctime on all filesystems; so
709
- # we change our counter each and every time (after process_client
710
- # and before IO.select).
711
- alive.chmod(m = 0 == m ? 1 : 0)
712
-
713
- ready.each do |sock|
714
- begin
715
- process_client(sock.accept_nonblock)
716
- nr += 1
717
- alive.chmod(m = 0 == m ? 1 : 0)
718
- rescue Errno::EAGAIN, Errno::ECONNABORTED
719
- end
720
- break if nr < 0
721
- end
722
-
723
- # make the following bet: if we accepted clients this round,
724
- # we're probably reasonably busy, so avoid calling select()
725
- # and do a speculative accept_nonblock on ready listeners
726
- # before we sleep again in select().
727
- redo unless nr == 0 # (nr < 0) => reopen logs
728
-
729
- ppid == Process.ppid or return
730
- alive.chmod(m = 0 == m ? 1 : 0)
731
- begin
732
- # timeout used so we can detect parent death:
733
- ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
734
- ready = ret[0]
735
- rescue Errno::EINTR
736
- ready = LISTENERS
737
- rescue Errno::EBADF
738
- nr < 0 or return
739
- end
740
- rescue => e
741
- if alive
742
- logger.error "Unhandled listen loop exception #{e.inspect}."
743
- logger.error e.backtrace.join("\n")
744
- end
745
- end while alive
746
- end
747
-
748
- # delivers a signal to a worker and fails gracefully if the worker
749
- # is no longer running.
750
- def kill_worker(signal, wpid)
751
- begin
752
- Process.kill(signal, wpid)
753
- rescue Errno::ESRCH
754
- worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
755
- end
756
- end
757
-
758
- # delivers a signal to each worker
759
- def kill_each_worker(signal)
760
- WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
761
- end
762
-
763
- # unlinks a PID file at given +path+ if it contains the current PID
764
- # still potentially racy without locking the directory (which is
765
- # non-portable and may interact badly with other programs), but the
766
- # window for hitting the race condition is small
767
- def unlink_pid_safe(path)
768
- (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
769
- end
770
-
771
- # returns a PID if a given path contains a non-stale PID file,
772
- # nil otherwise.
773
- def valid_pid?(path)
774
- wpid = File.read(path).to_i
775
- wpid <= 0 and return nil
776
- begin
777
- Process.kill(0, wpid)
778
- wpid
779
- rescue Errno::ESRCH
780
- # don't unlink stale pid files, racy without non-portable locking...
781
- end
782
- rescue Errno::ENOENT
783
- end
784
-
785
- def load_config!
786
- loaded_app = app
787
- begin
788
- logger.info "reloading config_file=#{config.config_file}"
789
- config[:listeners].replace(init_listeners)
790
- config.reload
791
- config.commit!(self)
792
- kill_each_worker(:QUIT)
793
- Unicorn::Util.reopen_logs
794
- self.app = orig_app
795
- build_app! if preload_app
796
- logger.info "done reloading config_file=#{config.config_file}"
797
- rescue StandardError, LoadError, SyntaxError => e
798
- logger.error "error reloading config_file=#{config.config_file}: " \
799
- "#{e.class} #{e.message} #{e.backtrace}"
800
- self.app = loaded_app
801
- end
802
- end
803
-
804
- # returns an array of string names for the given listener array
805
- def listener_names(listeners = LISTENERS)
806
- listeners.map { |io| sock_name(io) }
807
- end
808
-
809
- def build_app!
810
- if app.respond_to?(:arity) && app.arity == 0
811
- if defined?(Gem) && Gem.respond_to?(:refresh)
812
- logger.info "Refreshing Gem list"
813
- Gem.refresh
814
- end
815
- self.app = app.call
32
+ require ru
33
+ Object.const_get(File.basename(ru, '.rb').capitalize)
34
+ end
35
+
36
+ pp({ :inner_app => inner_app }) if $DEBUG
37
+
38
+ # return value, matches rackup defaults based on env
39
+ case ENV["RACK_ENV"]
40
+ when "development"
41
+ Rack::Builder.new do
42
+ use Rack::CommonLogger, $stderr
43
+ use Rack::ShowExceptions
44
+ use Rack::Lint
45
+ run inner_app
46
+ end.to_app
47
+ when "deployment"
48
+ Rack::Builder.new do
49
+ use Rack::CommonLogger, $stderr
50
+ run inner_app
51
+ end.to_app
52
+ else
53
+ inner_app
816
54
  end
817
55
  end
56
+ end
818
57
 
819
- def proc_name(tag)
820
- $0 = ([ File.basename(START_CTX[0]), tag
821
- ]).concat(START_CTX[:argv]).join(' ')
822
- end
823
-
824
- def redirect_io(io, path)
825
- File.open(path, 'ab') { |fp| io.reopen(fp) } if path
826
- io.sync = true
827
- end
828
-
829
- def init_self_pipe!
830
- SELF_PIPE.each { |io| io.close rescue nil }
831
- SELF_PIPE.replace(IO.pipe)
832
- SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
58
+ # returns an array of strings representing TCP listen socket addresses
59
+ # and Unix domain socket paths. This is useful for use with
60
+ # Raindrops::Middleware under Linux: http://raindrops.bogomips.org/
61
+ def self.listener_names
62
+ Unicorn::HttpServer::LISTENERS.map do |io|
63
+ Unicorn::SocketHelper.sock_name(io)
833
64
  end
834
-
835
65
  end
836
66
  end
67
+
68
+ # raised inside TeeInput when a client closes the socket inside the
69
+ # application dispatch. This is always raised with an empty backtrace
70
+ # since there is nothing in the application stack that is responsible
71
+ # for client shutdowns/disconnects.
72
+ class Unicorn::ClientShutdown < EOFError; end
73
+
74
+ require 'unicorn/const'
75
+ require 'unicorn/socket_helper'
76
+ require 'unicorn/http_request'
77
+ require 'unicorn/configurator'
78
+ require 'unicorn/tmpio'
79
+ require 'unicorn/util'
80
+ require 'unicorn/tee_input'
81
+ require 'unicorn/http_response'
82
+ require 'unicorn/worker'
83
+ require 'unicorn/http_server'