unicorn 1.0.2 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -36,6 +36,9 @@ with rackup(1) but strongly discouraged.
36
36
  implemented as a Ruby DSL, so Ruby code may executed.
37
37
  See the RDoc/ri for the *Unicorn::Configurator* class for the full
38
38
  list of directives available from the DSL.
39
+ Using an absolute path for for CONFIG_FILE is recommended as it
40
+ makes multiple instances of Unicorn easily distinguishable when
41
+ viewing ps(1) output.
39
42
 
40
43
  -D, \--daemonize
41
44
  : Run daemonized in the background. The process is detached from
@@ -34,8 +34,11 @@ as much as possible.
34
34
  -c, \--config-file CONFIG_FILE
35
35
  : Path to the Unicorn-specific config file. The config file is
36
36
  implemented as a Ruby DSL, so Ruby code may executed.
37
- See the RDoc/ri for the *Unicorn::Configurator* class for the
38
- full list of directives available from the DSL.
37
+ See the RDoc/ri for the *Unicorn::Configurator* class for the full
38
+ list of directives available from the DSL.
39
+ Using an absolute path for for CONFIG_FILE is recommended as it
40
+ makes multiple instances of Unicorn easily distinguishable when
41
+ viewing ps(1) output.
39
42
 
40
43
  -D, \--daemonize
41
44
  : Run daemonized in the background. The process is detached from
@@ -1,7 +1,7 @@
1
1
  #!/bin/sh
2
2
 
3
3
  GVF=GIT-VERSION-FILE
4
- DEF_VER=v1.0.2.GIT
4
+ DEF_VER=v1.0.0.GIT
5
5
 
6
6
  LF='
7
7
  '
@@ -169,7 +169,7 @@ NEWS: GIT-VERSION-FILE .manifest
169
169
  $(RAKE) -s news_rdoc > $@+
170
170
  mv $@+ $@
171
171
 
172
- SINCE = 1.0.0
172
+ SINCE = 0.991.0
173
173
  ChangeLog: LOG_VERSION = \
174
174
  $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \
175
175
  echo $(GIT_VERSION) || git describe)
@@ -189,7 +189,7 @@ atom = <link rel="alternate" title="Atom feed" href="$(1)" \
189
189
  doc: .document $(ext)/unicorn_http.c NEWS ChangeLog
190
190
  for i in $(man1_rdoc); do echo > $$i; done
191
191
  find bin lib -type f -name '*.rbc' -exec rm -f '{}' ';'
192
- rdoc -t "$(shell sed -ne '1s/^= //p' README)"
192
+ rdoc -a -t "$(shell sed -ne '1s/^= //p' README)"
193
193
  install -m644 COPYING doc/COPYING
194
194
  install -m644 $(shell grep '^[A-Z]' .document) doc/
195
195
  $(MAKE) -C Documentation install-html install-man
data/Rakefile CHANGED
@@ -15,7 +15,7 @@ def tags
15
15
  timefmt = '%Y-%m-%dT%H:%M:%SZ'
16
16
  @tags ||= `git tag -l`.split(/\n/).map do |tag|
17
17
  next if tag == "v0.0.0"
18
- if %r{\Av[\d\.]+} =~ tag
18
+ if %r{\Av[\d\.]+\z} =~ tag
19
19
  header, subject, body = `git cat-file tag #{tag}`.split(/\n\n/, 3)
20
20
  header = header.split(/\n/)
21
21
  tagger = header.grep(/\Atagger /).first
@@ -168,12 +168,8 @@ task :fm_update do
168
168
  "changelog" => changelog,
169
169
  },
170
170
  }.to_json
171
- if ! changelog.strip.empty? && version =~ %r{\A[\d\.]+\d+\z}
172
- Net::HTTP.start(uri.host, uri.port) do |http|
173
- p http.post(uri.path, req, {'Content-Type'=>'application/json'})
174
- end
175
- else
176
- warn "not updating freshmeat for v#{version}"
171
+ Net::HTTP.start(uri.host, uri.port) do |http|
172
+ p http.post(uri.path, req, {'Content-Type'=>'application/json'})
177
173
  end
178
174
  end
179
175
 
@@ -684,7 +684,7 @@ void Init_unicorn_http(void)
684
684
  {
685
685
  VALUE mUnicorn, cHttpParser;
686
686
 
687
- mUnicorn = rb_define_module("Unicorn");
687
+ mUnicorn = rb_const_get(rb_cObject, rb_intern("Unicorn"));
688
688
  cHttpParser = rb_define_class_under(mUnicorn, "HttpParser", rb_cObject);
689
689
  eHttpParserError =
690
690
  rb_define_class_under(mUnicorn, "HttpParserError", rb_eIOError);
@@ -2,6 +2,7 @@
2
2
 
3
3
  require 'fcntl'
4
4
  require 'etc'
5
+ require 'stringio'
5
6
  require 'rack'
6
7
  require 'unicorn/socket_helper'
7
8
  require 'unicorn/const'
@@ -312,11 +313,6 @@ module Unicorn
312
313
  if path
313
314
  if x = valid_pid?(path)
314
315
  return path if pid && path == pid && x == $$
315
- if x == reexec_pid && pid =~ /\.oldbin\z/
316
- logger.warn("will not set pid=#{path} while reexec-ed "\
317
- "child is running PID:#{x}")
318
- return
319
- end
320
316
  raise ArgumentError, "Already running on PID:#{x} " \
321
317
  "(or pid=#{path} is stale)"
322
318
  end
@@ -422,12 +418,10 @@ module Unicorn
422
418
  respawn = false
423
419
  logger.info "gracefully stopping all workers"
424
420
  kill_each_worker(:QUIT)
425
- self.worker_processes = 0
426
421
  else
427
422
  logger.info "SIGWINCH ignored because we're not daemonized"
428
423
  end
429
424
  when :TTIN
430
- respawn = true
431
425
  self.worker_processes += 1
432
426
  when :TTOU
433
427
  self.worker_processes -= 1 if self.worker_processes > 0
@@ -488,14 +482,17 @@ module Unicorn
488
482
  # wait for a signal hander to wake us up and then consume the pipe
489
483
  # Wake up every second anyways to run murder_lazy_workers
490
484
  def master_sleep(sec)
491
- IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
492
- SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
485
+ begin
486
+ IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
487
+ SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF)
493
488
  rescue Errno::EAGAIN, Errno::EINTR
489
+ break
490
+ end while true
494
491
  end
495
492
 
496
493
  def awaken_master
497
494
  begin
498
- SELF_PIPE.last.write_nonblock('.') # wakeup master process from select
495
+ SELF_PIPE[1].write_nonblock('.') # wakeup master process from select
499
496
  rescue Errno::EAGAIN, Errno::EINTR
500
497
  # pipe is full, master should wake up anyways
501
498
  retry
@@ -639,7 +636,7 @@ module Unicorn
639
636
  client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
640
637
  response = app.call(env = REQUEST.read(client))
641
638
 
642
- if 100 == response.first.to_i
639
+ if 100 == response[0].to_i
643
640
  client.write(Const::EXPECT_100_RESPONSE)
644
641
  env.delete(Const::HTTP_EXPECT)
645
642
  response = app.call(env)
@@ -688,7 +685,7 @@ module Unicorn
688
685
  ready = LISTENERS
689
686
 
690
687
  # closing anything we IO.select on will raise EBADF
691
- trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil }
688
+ trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
692
689
  trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
693
690
  [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
694
691
  logger.info "worker=#{worker.nr} ready"
@@ -729,7 +726,7 @@ module Unicorn
729
726
  begin
730
727
  # timeout used so we can detect parent death:
731
728
  ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
732
- ready = ret.first
729
+ ready = ret[0]
733
730
  rescue Errno::EINTR
734
731
  ready = LISTENERS
735
732
  rescue Errno::EBADF
@@ -1,489 +1,512 @@
1
1
  # -*- encoding: binary -*-
2
-
3
- require 'socket'
4
2
  require 'logger'
5
3
 
6
- module Unicorn
7
-
8
- # Implements a simple DSL for configuring a Unicorn server.
9
- #
10
- # See http://unicorn.bogomips.org/examples/unicorn.conf.rb and
11
- # http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb
12
- # example configuration files. An example config file for use with
13
- # nginx is also available at
14
- # http://unicorn.bogomips.org/examples/nginx.conf
15
- class Configurator < Struct.new(:set, :config_file, :after_reload)
16
- # :stopdoc:
17
- # used to stash stuff for deferred processing of cli options in
18
- # config.ru after "working_directory" is bound. Do not rely on
19
- # this being around later on...
20
- RACKUP = {}
21
- # :startdoc:
22
-
23
- # Default settings for Unicorn
24
- DEFAULTS = {
25
- :timeout => 60,
26
- :logger => Logger.new($stderr),
27
- :worker_processes => 1,
28
- :after_fork => lambda { |server, worker|
29
- server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
30
- },
31
- :before_fork => lambda { |server, worker|
32
- server.logger.info("worker=#{worker.nr} spawning...")
33
- },
34
- :before_exec => lambda { |server|
35
- server.logger.info("forked child re-executing...")
36
- },
37
- :pid => nil,
38
- :preload_app => false,
39
- }
40
-
41
- def initialize(defaults = {}) #:nodoc:
42
- self.set = Hash.new(:unset)
43
- @use_defaults = defaults.delete(:use_defaults)
44
- self.config_file = defaults.delete(:config_file)
45
-
46
- # after_reload is only used by unicorn_rails, unsupported otherwise
47
- self.after_reload = defaults.delete(:after_reload)
48
-
49
- set.merge!(DEFAULTS) if @use_defaults
50
- defaults.each { |key, value| self.__send__(key, value) }
51
- Hash === set[:listener_opts] or
52
- set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
53
- Array === set[:listeners] or set[:listeners] = []
54
- reload(false)
55
- end
4
+ # Implements a simple DSL for configuring a \Unicorn server.
5
+ #
6
+ # See http://unicorn.bogomips.org/examples/unicorn.conf.rb and
7
+ # http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb
8
+ # example configuration files. An example config file for use with
9
+ # nginx is also available at
10
+ # http://unicorn.bogomips.org/examples/nginx.conf
11
+ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
12
+ # used to stash stuff for deferred processing of cli options in
13
+ # config.ru after "working_directory" is bound. Do not rely on
14
+ # this being around later on...
15
+ RACKUP = {} # :nodoc:
16
+
17
+ # Default settings for Unicorn
18
+ # :stopdoc:
19
+ DEFAULTS = {
20
+ :timeout => 60,
21
+ :logger => Logger.new($stderr),
22
+ :worker_processes => 1,
23
+ :after_fork => lambda { |server, worker|
24
+ server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
25
+ },
26
+ :before_fork => lambda { |server, worker|
27
+ server.logger.info("worker=#{worker.nr} spawning...")
28
+ },
29
+ :before_exec => lambda { |server|
30
+ server.logger.info("forked child re-executing...")
31
+ },
32
+ :pid => nil,
33
+ :preload_app => false,
34
+ }
35
+ #:startdoc:
36
+
37
+ def initialize(defaults = {}) #:nodoc:
38
+ self.set = Hash.new(:unset)
39
+ use_defaults = defaults.delete(:use_defaults)
40
+ self.config_file = defaults.delete(:config_file)
41
+
42
+ # after_reload is only used by unicorn_rails, unsupported otherwise
43
+ self.after_reload = defaults.delete(:after_reload)
44
+
45
+ set.merge!(DEFAULTS) if use_defaults
46
+ defaults.each { |key, value| self.send(key, value) }
47
+ Hash === set[:listener_opts] or
48
+ set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
49
+ Array === set[:listeners] or set[:listeners] = []
50
+ reload
51
+ end
56
52
 
57
- def reload(merge_defaults = true) #:nodoc:
58
- if merge_defaults && @use_defaults
59
- set.merge!(DEFAULTS) if @use_defaults
60
- end
61
- instance_eval(File.read(config_file), config_file) if config_file
53
+ def reload #:nodoc:
54
+ instance_eval(File.read(config_file), config_file) if config_file
62
55
 
63
- parse_rackup_file
56
+ parse_rackup_file
64
57
 
65
- # unicorn_rails creates dirs here after working_directory is bound
66
- after_reload.call if after_reload
58
+ # unicorn_rails creates dirs here after working_directory is bound
59
+ after_reload.call if after_reload
67
60
 
68
- # working_directory binds immediately (easier error checking that way),
69
- # now ensure any paths we changed are correctly set.
70
- [ :pid, :stderr_path, :stdout_path ].each do |var|
71
- String === (path = set[var]) or next
72
- path = File.expand_path(path)
73
- File.writable?(path) || File.writable?(File.dirname(path)) or \
74
- raise ArgumentError, "directory for #{var}=#{path} not writable"
75
- end
61
+ # working_directory binds immediately (easier error checking that way),
62
+ # now ensure any paths we changed are correctly set.
63
+ [ :pid, :stderr_path, :stdout_path ].each do |var|
64
+ String === (path = set[var]) or next
65
+ path = File.expand_path(path)
66
+ File.writable?(path) || File.writable?(File.dirname(path)) or \
67
+ raise ArgumentError, "directory for #{var}=#{path} not writable"
76
68
  end
69
+ end
77
70
 
78
- def commit!(server, options = {}) #:nodoc:
79
- skip = options[:skip] || []
80
- if ready_pipe = RACKUP.delete(:ready_pipe)
81
- server.ready_pipe = ready_pipe
82
- end
83
- set.each do |key, value|
84
- value == :unset and next
85
- skip.include?(key) and next
86
- server.__send__("#{key}=", value)
87
- end
71
+ def commit!(server, options = {}) #:nodoc:
72
+ skip = options[:skip] || []
73
+ if ready_pipe = RACKUP.delete(:ready_pipe)
74
+ server.ready_pipe = ready_pipe
88
75
  end
89
-
90
- def [](key) # :nodoc:
91
- set[key]
76
+ set.each do |key, value|
77
+ value == :unset and next
78
+ skip.include?(key) and next
79
+ server.__send__("#{key}=", value)
92
80
  end
81
+ end
93
82
 
94
- # sets object to the +new+ Logger-like object. The new logger-like
95
- # object must respond to the following methods:
96
- # +debug+, +info+, +warn+, +error+, +fatal+
97
- # The default Logger will log its output to the path specified
98
- # by +stderr_path+. If you're running Unicorn daemonized, then
99
- # you must specify a path to prevent error messages from going
100
- # to /dev/null.
101
- def logger(new)
102
- %w(debug info warn error fatal).each do |m|
103
- new.respond_to?(m) and next
104
- raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
105
- end
83
+ def [](key) # :nodoc:
84
+ set[key]
85
+ end
106
86
 
107
- set[:logger] = new
87
+ # sets object to the +new+ Logger-like object. The new logger-like
88
+ # object must respond to the following methods:
89
+ # +debug+, +info+, +warn+, +error+, +fatal+
90
+ # The default Logger will log its output to the path specified
91
+ # by +stderr_path+. If you're running Unicorn daemonized, then
92
+ # you must specify a path to prevent error messages from going
93
+ # to /dev/null.
94
+ def logger(new)
95
+ %w(debug info warn error fatal).each do |m|
96
+ new.respond_to?(m) and next
97
+ raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
108
98
  end
109
99
 
110
- # sets after_fork hook to a given block. This block will be called by
111
- # the worker after forking. The following is an example hook which adds
112
- # a per-process listener to every worker:
113
- #
114
- # after_fork do |server,worker|
115
- # # per-process listener ports for debugging/admin:
116
- # addr = "127.0.0.1:#{9293 + worker.nr}"
117
- #
118
- # # the negative :tries parameter indicates we will retry forever
119
- # # waiting on the existing process to exit with a 5 second :delay
120
- # # Existing options for Unicorn::Configurator#listen such as
121
- # # :backlog, :rcvbuf, :sndbuf are available here as well.
122
- # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
123
- #
124
- # # drop permissions to "www-data" in the worker
125
- # # generally there's no reason to start Unicorn as a priviledged user
126
- # # as it is not recommended to expose Unicorn to public clients.
127
- # worker.user('www-data', 'www-data') if Process.euid == 0
128
- # end
129
- def after_fork(*args, &block)
130
- set_hook(:after_fork, block_given? ? block : args[0])
131
- end
100
+ set[:logger] = new
101
+ end
132
102
 
133
- # sets before_fork got be a given Proc object. This Proc
134
- # object will be called by the master process before forking
135
- # each worker.
136
- def before_fork(*args, &block)
137
- set_hook(:before_fork, block_given? ? block : args[0])
138
- end
103
+ # sets after_fork hook to a given block. This block will be called by
104
+ # the worker after forking. The following is an example hook which adds
105
+ # a per-process listener to every worker:
106
+ #
107
+ # after_fork do |server,worker|
108
+ # # per-process listener ports for debugging/admin:
109
+ # addr = "127.0.0.1:#{9293 + worker.nr}"
110
+ #
111
+ # # the negative :tries parameter indicates we will retry forever
112
+ # # waiting on the existing process to exit with a 5 second :delay
113
+ # # Existing options for Unicorn::Configurator#listen such as
114
+ # # :backlog, :rcvbuf, :sndbuf are available here as well.
115
+ # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
116
+ #
117
+ # # drop permissions to "www-data" in the worker
118
+ # # generally there's no reason to start Unicorn as a priviledged user
119
+ # # as it is not recommended to expose Unicorn to public clients.
120
+ # worker.user('www-data', 'www-data') if Process.euid == 0
121
+ # end
122
+ def after_fork(*args, &block)
123
+ set_hook(:after_fork, block_given? ? block : args[0])
124
+ end
139
125
 
140
- # sets the before_exec hook to a given Proc object. This
141
- # Proc object will be called by the master process right
142
- # before exec()-ing the new unicorn binary. This is useful
143
- # for freeing certain OS resources that you do NOT wish to
144
- # share with the reexeced child process.
145
- # There is no corresponding after_exec hook (for obvious reasons).
146
- def before_exec(*args, &block)
147
- set_hook(:before_exec, block_given? ? block : args[0], 1)
148
- end
126
+ # sets before_fork got be a given Proc object. This Proc
127
+ # object will be called by the master process before forking
128
+ # each worker.
129
+ def before_fork(*args, &block)
130
+ set_hook(:before_fork, block_given? ? block : args[0])
131
+ end
149
132
 
150
- # sets the timeout of worker processes to +seconds+. Workers
151
- # handling the request/app.call/response cycle taking longer than
152
- # this time period will be forcibly killed (via SIGKILL). This
153
- # timeout is enforced by the master process itself and not subject
154
- # to the scheduling limitations by the worker process. Due the
155
- # low-complexity, low-overhead implementation, timeouts of less
156
- # than 3.0 seconds can be considered inaccurate and unsafe.
157
- #
158
- # For running Unicorn behind nginx, it is recommended to set
159
- # "fail_timeout=0" for in your nginx configuration like this
160
- # to have nginx always retry backends that may have had workers
161
- # SIGKILL-ed due to timeouts.
162
- #
163
- # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
164
- # # on nginx upstream configuration:
165
- # upstream unicorn_backend {
166
- # # for UNIX domain socket setups:
167
- # server unix:/path/to/unicorn.sock fail_timeout=0;
168
- #
169
- # # for TCP setups
170
- # server 192.168.0.7:8080 fail_timeout=0;
171
- # server 192.168.0.8:8080 fail_timeout=0;
172
- # server 192.168.0.9:8080 fail_timeout=0;
173
- # }
174
- def timeout(seconds)
175
- Numeric === seconds or raise ArgumentError,
176
- "not numeric: timeout=#{seconds.inspect}"
177
- seconds >= 3 or raise ArgumentError,
178
- "too low: timeout=#{seconds.inspect}"
179
- set[:timeout] = seconds
180
- end
133
+ # sets the before_exec hook to a given Proc object. This
134
+ # Proc object will be called by the master process right
135
+ # before exec()-ing the new unicorn binary. This is useful
136
+ # for freeing certain OS resources that you do NOT wish to
137
+ # share with the reexeced child process.
138
+ # There is no corresponding after_exec hook (for obvious reasons).
139
+ def before_exec(*args, &block)
140
+ set_hook(:before_exec, block_given? ? block : args[0], 1)
141
+ end
181
142
 
182
- # sets the current number of worker_processes to +nr+. Each worker
183
- # process will serve exactly one client at a time. You can
184
- # increment or decrement this value at runtime by sending SIGTTIN
185
- # or SIGTTOU respectively to the master process without reloading
186
- # the rest of your Unicorn configuration. See the SIGNALS document
187
- # for more information.
188
- def worker_processes(nr)
189
- Integer === nr or raise ArgumentError,
190
- "not an integer: worker_processes=#{nr.inspect}"
191
- nr >= 0 or raise ArgumentError,
192
- "not non-negative: worker_processes=#{nr.inspect}"
193
- set[:worker_processes] = nr
194
- end
143
+ # sets the timeout of worker processes to +seconds+. Workers
144
+ # handling the request/app.call/response cycle taking longer than
145
+ # this time period will be forcibly killed (via SIGKILL). This
146
+ # timeout is enforced by the master process itself and not subject
147
+ # to the scheduling limitations by the worker process. Due the
148
+ # low-complexity, low-overhead implementation, timeouts of less
149
+ # than 3.0 seconds can be considered inaccurate and unsafe.
150
+ #
151
+ # For running Unicorn behind nginx, it is recommended to set
152
+ # "fail_timeout=0" for in your nginx configuration like this
153
+ # to have nginx always retry backends that may have had workers
154
+ # SIGKILL-ed due to timeouts.
155
+ #
156
+ # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
157
+ # # on nginx upstream configuration:
158
+ # upstream unicorn_backend {
159
+ # # for UNIX domain socket setups:
160
+ # server unix:/path/to/unicorn.sock fail_timeout=0;
161
+ #
162
+ # # for TCP setups
163
+ # server 192.168.0.7:8080 fail_timeout=0;
164
+ # server 192.168.0.8:8080 fail_timeout=0;
165
+ # server 192.168.0.9:8080 fail_timeout=0;
166
+ # }
167
+ def timeout(seconds)
168
+ Numeric === seconds or raise ArgumentError,
169
+ "not numeric: timeout=#{seconds.inspect}"
170
+ seconds >= 3 or raise ArgumentError,
171
+ "too low: timeout=#{seconds.inspect}"
172
+ set[:timeout] = seconds
173
+ end
195
174
 
196
- # sets listeners to the given +addresses+, replacing or augmenting the
197
- # current set. This is for the global listener pool shared by all
198
- # worker processes. For per-worker listeners, see the after_fork example
199
- # This is for internal API use only, do not use it in your Unicorn
200
- # config file. Use listen instead.
201
- def listeners(addresses) # :nodoc:
202
- Array === addresses or addresses = Array(addresses)
203
- addresses.map! { |addr| expand_addr(addr) }
204
- set[:listeners] = addresses
205
- end
175
+ # sets the current number of worker_processes to +nr+. Each worker
176
+ # process will serve exactly one client at a time. You can
177
+ # increment or decrement this value at runtime by sending SIGTTIN
178
+ # or SIGTTOU respectively to the master process without reloading
179
+ # the rest of your Unicorn configuration. See the SIGNALS document
180
+ # for more information.
181
+ def worker_processes(nr)
182
+ Integer === nr or raise ArgumentError,
183
+ "not an integer: worker_processes=#{nr.inspect}"
184
+ nr >= 0 or raise ArgumentError,
185
+ "not non-negative: worker_processes=#{nr.inspect}"
186
+ set[:worker_processes] = nr
187
+ end
206
188
 
207
- # adds an +address+ to the existing listener set.
208
- #
209
- # The following options may be specified (but are generally not needed):
210
- #
211
- # +:backlog+: this is the backlog of the listen() syscall.
212
- #
213
- # Some operating systems allow negative values here to specify the
214
- # maximum allowable value. In most cases, this number is only
215
- # recommendation and there are other OS-specific tunables and
216
- # variables that can affect this number. See the listen(2)
217
- # syscall documentation of your OS for the exact semantics of
218
- # this.
219
- #
220
- # If you are running unicorn on multiple machines, lowering this number
221
- # can help your load balancer detect when a machine is overloaded
222
- # and give requests to a different machine.
223
- #
224
- # Default: 1024
225
- #
226
- # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
227
- #
228
- # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
229
- # can be set via the setsockopt(2) syscall. Some kernels
230
- # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
231
- # there is no need (and it is sometimes detrimental) to specify them.
232
- #
233
- # See the socket API documentation of your operating system
234
- # to determine the exact semantics of these settings and
235
- # other operating system-specific knobs where they can be
236
- # specified.
237
- #
238
- # Defaults: operating system defaults
239
- #
240
- # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
241
- #
242
- # This has no effect on UNIX sockets.
243
- #
244
- # Default: operating system defaults (usually Nagle's algorithm enabled)
245
- #
246
- # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
247
- #
248
- # This will prevent partial TCP frames from being sent out.
249
- # Enabling +tcp_nopush+ is generally not needed or recommended as
250
- # controlling +tcp_nodelay+ already provides sufficient latency
251
- # reduction whereas Unicorn does not know when the best times are
252
- # for flushing corked sockets.
253
- #
254
- # This has no effect on UNIX sockets.
255
- #
256
- # +:tries+: times to retry binding a socket if it is already in use
257
- #
258
- # A negative number indicates we will retry indefinitely, this is
259
- # useful for migrations and upgrades when individual workers
260
- # are binding to different ports.
261
- #
262
- # Default: 5
263
- #
264
- # +:delay+: seconds to wait between successive +tries+
265
- #
266
- # Default: 0.5 seconds
267
- #
268
- # +:umask+: sets the file mode creation mask for UNIX sockets
269
- #
270
- # Typically UNIX domain sockets are created with more liberal
271
- # file permissions than the rest of the application. By default,
272
- # we create UNIX domain sockets to be readable and writable by
273
- # all local users to give them the same accessibility as
274
- # locally-bound TCP listeners.
275
- #
276
- # This has no effect on TCP listeners.
277
- #
278
- # Default: 0 (world read/writable)
279
- def listen(address, opt = {})
280
- address = expand_addr(address)
281
- if String === address
282
- [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
283
- value = opt[key] or next
284
- Integer === value or
285
- raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
286
- end
287
- [ :tcp_nodelay, :tcp_nopush ].each do |key|
288
- (value = opt[key]).nil? and next
289
- TrueClass === value || FalseClass === value or
290
- raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
291
- end
292
- unless (value = opt[:delay]).nil?
293
- Numeric === value or
294
- raise ArgumentError, "not numeric: delay=#{value.inspect}"
295
- end
296
- set[:listener_opts][address].merge!(opt)
189
+ # sets listeners to the given +addresses+, replacing or augmenting the
190
+ # current set. This is for the global listener pool shared by all
191
+ # worker processes. For per-worker listeners, see the after_fork example
192
+ # This is for internal API use only, do not use it in your Unicorn
193
+ # config file. Use listen instead.
194
+ def listeners(addresses) # :nodoc:
195
+ Array === addresses or addresses = Array(addresses)
196
+ addresses.map! { |addr| expand_addr(addr) }
197
+ set[:listeners] = addresses
198
+ end
199
+
200
+ # adds an +address+ to the existing listener set.
201
+ #
202
+ # The following options may be specified (but are generally not needed):
203
+ #
204
+ # +:backlog+: this is the backlog of the listen() syscall.
205
+ #
206
+ # Some operating systems allow negative values here to specify the
207
+ # maximum allowable value. In most cases, this number is only
208
+ # recommendation and there are other OS-specific tunables and
209
+ # variables that can affect this number. See the listen(2)
210
+ # syscall documentation of your OS for the exact semantics of
211
+ # this.
212
+ #
213
+ # If you are running unicorn on multiple machines, lowering this number
214
+ # can help your load balancer detect when a machine is overloaded
215
+ # and give requests to a different machine.
216
+ #
217
+ # Default: 1024
218
+ #
219
+ # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
220
+ #
221
+ # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
222
+ # can be set via the setsockopt(2) syscall. Some kernels
223
+ # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
224
+ # there is no need (and it is sometimes detrimental) to specify them.
225
+ #
226
+ # See the socket API documentation of your operating system
227
+ # to determine the exact semantics of these settings and
228
+ # other operating system-specific knobs where they can be
229
+ # specified.
230
+ #
231
+ # Defaults: operating system defaults
232
+ #
233
+ # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
234
+ #
235
+ # This has no effect on UNIX sockets.
236
+ #
237
+ # Default: operating system defaults (usually Nagle's algorithm enabled)
238
+ #
239
+ # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
240
+ #
241
+ # This will prevent partial TCP frames from being sent out.
242
+ # Enabling +tcp_nopush+ is generally not needed or recommended as
243
+ # controlling +tcp_nodelay+ already provides sufficient latency
244
+ # reduction whereas Unicorn does not know when the best times are
245
+ # for flushing corked sockets.
246
+ #
247
+ # This has no effect on UNIX sockets.
248
+ #
249
+ # +:tries+: times to retry binding a socket if it is already in use
250
+ #
251
+ # A negative number indicates we will retry indefinitely, this is
252
+ # useful for migrations and upgrades when individual workers
253
+ # are binding to different ports.
254
+ #
255
+ # Default: 5
256
+ #
257
+ # +:delay+: seconds to wait between successive +tries+
258
+ #
259
+ # Default: 0.5 seconds
260
+ #
261
+ # +:umask+: sets the file mode creation mask for UNIX sockets
262
+ #
263
+ # Typically UNIX domain sockets are created with more liberal
264
+ # file permissions than the rest of the application. By default,
265
+ # we create UNIX domain sockets to be readable and writable by
266
+ # all local users to give them the same accessibility as
267
+ # locally-bound TCP listeners.
268
+ #
269
+ # This has no effect on TCP listeners.
270
+ #
271
+ # Default: 0 (world read/writable)
272
+ #
273
+ # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only)
274
+ #
275
+ # For Linux 2.6.32 and later, this is the number of retransmits to
276
+ # defer an accept() for if no data arrives, but the client will
277
+ # eventually be accepted after the specified number of retransmits
278
+ # regardless of whether data is ready.
279
+ #
280
+ # For Linux before 2.6.32, this is a boolean option, and
281
+ # accepts are _always_ deferred indefinitely if no data arrives.
282
+ # This is similar to <code>:accept_filter => "dataready"</code>
283
+ # under FreeBSD.
284
+ #
285
+ # Specifying +true+ is synonymous for the default value(s) below,
286
+ # and +false+ or +nil+ is synonymous for a value of zero.
287
+ #
288
+ # A value of +1+ is a good optimization for local networks
289
+ # and trusted clients. For Rainbows! and Zbatery users, a higher
290
+ # value (e.g. +60+) provides more protection against some
291
+ # denial-of-service attacks. There is no good reason to ever
292
+ # disable this with a +zero+ value when serving HTTP.
293
+ #
294
+ # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+
295
+ #
296
+ # +:accept_filter: defer accept() until data is ready (FreeBSD-only)
297
+ #
298
+ # This enables either the "dataready" or (default) "httpready"
299
+ # accept() filter under FreeBSD. This is intended as an
300
+ # optimization to reduce context switches with common GET/HEAD
301
+ # requests. For Rainbows! and Zbatery users, this provides
302
+ # some protection against certain denial-of-service attacks, too.
303
+ #
304
+ # There is no good reason to change from the default.
305
+ #
306
+ # Default: "httpready"
307
+ def listen(address, opt = {})
308
+ address = expand_addr(address)
309
+ if String === address
310
+ [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
311
+ value = opt[key] or next
312
+ Integer === value or
313
+ raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
314
+ end
315
+ [ :tcp_nodelay, :tcp_nopush ].each do |key|
316
+ (value = opt[key]).nil? and next
317
+ TrueClass === value || FalseClass === value or
318
+ raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
319
+ end
320
+ unless (value = opt[:delay]).nil?
321
+ Numeric === value or
322
+ raise ArgumentError, "not numeric: delay=#{value.inspect}"
297
323
  end
324
+ set[:listener_opts][address].merge!(opt)
325
+ end
326
+
327
+ set[:listeners] << address
328
+ end
298
329
 
299
- set[:listeners] << address
330
+ # sets the +path+ for the PID file of the unicorn master process
331
+ def pid(path); set_path(:pid, path); end
332
+
333
+ # Enabling this preloads an application before forking worker
334
+ # processes. This allows memory savings when using a
335
+ # copy-on-write-friendly GC but can cause bad things to happen when
336
+ # resources like sockets are opened at load time by the master
337
+ # process and shared by multiple children. People enabling this are
338
+ # highly encouraged to look at the before_fork/after_fork hooks to
339
+ # properly close/reopen sockets. Files opened for logging do not
340
+ # have to be reopened as (unbuffered-in-userspace) files opened with
341
+ # the File::APPEND flag are written to atomically on UNIX.
342
+ #
343
+ # In addition to reloading the unicorn-specific config settings,
344
+ # SIGHUP will reload application code in the working
345
+ # directory/symlink when workers are gracefully restarted when
346
+ # preload_app=false (the default). As reloading the application
347
+ # sometimes requires RubyGems updates, +Gem.refresh+ is always
348
+ # called before the application is loaded (for RubyGems users).
349
+ #
350
+ # During deployments, care should _always_ be taken to ensure your
351
+ # applications are properly deployed and running. Using
352
+ # preload_app=false (the default) means you _must_ check if
353
+ # your application is responding properly after a deployment.
354
+ # Improperly deployed applications can go into a spawn loop
355
+ # if the application fails to load. While your children are
356
+ # in a spawn loop, it is is possible to fix an application
357
+ # by properly deploying all required code and dependencies.
358
+ # Using preload_app=true means any application load error will
359
+ # cause the master process to exit with an error.
360
+
361
+ def preload_app(bool)
362
+ case bool
363
+ when TrueClass, FalseClass
364
+ set[:preload_app] = bool
365
+ else
366
+ raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
300
367
  end
368
+ end
301
369
 
302
- # sets the +path+ for the PID file of the unicorn master process
303
- def pid(path); set_path(:pid, path); end
304
-
305
- # Enabling this preloads an application before forking worker
306
- # processes. This allows memory savings when using a
307
- # copy-on-write-friendly GC but can cause bad things to happen when
308
- # resources like sockets are opened at load time by the master
309
- # process and shared by multiple children. People enabling this are
310
- # highly encouraged to look at the before_fork/after_fork hooks to
311
- # properly close/reopen sockets. Files opened for logging do not
312
- # have to be reopened as (unbuffered-in-userspace) files opened with
313
- # the File::APPEND flag are written to atomically on UNIX.
314
- #
315
- # In addition to reloading the unicorn-specific config settings,
316
- # SIGHUP will reload application code in the working
317
- # directory/symlink when workers are gracefully restarted when
318
- # preload_app=false (the default). As reloading the application
319
- # sometimes requires RubyGems updates, +Gem.refresh+ is always
320
- # called before the application is loaded (for RubyGems users).
321
- #
322
- # During deployments, care should _always_ be taken to ensure your
323
- # applications are properly deployed and running. Using
324
- # preload_app=false (the default) means you _must_ check if
325
- # your application is responding properly after a deployment.
326
- # Improperly deployed applications can go into a spawn loop
327
- # if the application fails to load. While your children are
328
- # in a spawn loop, it is is possible to fix an application
329
- # by properly deploying all required code and dependencies.
330
- # Using preload_app=true means any application load error will
331
- # cause the master process to exit with an error.
332
-
333
- def preload_app(bool)
334
- case bool
335
- when TrueClass, FalseClass
336
- set[:preload_app] = bool
337
- else
338
- raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
339
- end
370
+ # Allow redirecting $stderr to a given path. Unlike doing this from
371
+ # the shell, this allows the unicorn process to know the path its
372
+ # writing to and rotate the file if it is used for logging. The
373
+ # file will be opened with the File::APPEND flag and writes
374
+ # synchronized to the kernel (but not necessarily to _disk_) so
375
+ # multiple processes can safely append to it.
376
+ #
377
+ # If you are daemonizing and using the default +logger+, it is important
378
+ # to specify this as errors will otherwise be lost to /dev/null.
379
+ # Some applications/libraries may also triggering warnings that go to
380
+ # stderr, and they will end up here.
381
+ def stderr_path(path)
382
+ set_path(:stderr_path, path)
383
+ end
384
+
385
+ # Same as stderr_path, except for $stdout. Not many Rack applications
386
+ # write to $stdout, but any that do will have their output written here.
387
+ # It is safe to point this to the same location a stderr_path.
388
+ # Like stderr_path, this defaults to /dev/null when daemonized.
389
+ def stdout_path(path)
390
+ set_path(:stdout_path, path)
391
+ end
392
+
393
+ # sets the working directory for Unicorn. This ensures SIGUSR2 will
394
+ # start a new instance of Unicorn in this directory. This may be
395
+ # a symlink, a common scenario for Capistrano users.
396
+ def working_directory(path)
397
+ # just let chdir raise errors
398
+ path = File.expand_path(path)
399
+ if config_file &&
400
+ config_file[0] != ?/ &&
401
+ ! File.readable?("#{path}/#{config_file}")
402
+ raise ArgumentError,
403
+ "config_file=#{config_file} would not be accessible in" \
404
+ " working_directory=#{path}"
340
405
  end
406
+ Dir.chdir(path)
407
+ Unicorn::HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
408
+ end
409
+
410
+ # Runs worker processes as the specified +user+ and +group+.
411
+ # The master process always stays running as the user who started it.
412
+ # This switch will occur after calling the after_fork hook, and only
413
+ # if the Worker#user method is not called in the after_fork hook
414
+ def user(user, group = nil)
415
+ # raises ArgumentError on invalid user/group
416
+ Etc.getpwnam(user)
417
+ Etc.getgrnam(group) if group
418
+ set[:user] = [ user, group ]
419
+ end
341
420
 
342
- # Allow redirecting $stderr to a given path. Unlike doing this from
343
- # the shell, this allows the unicorn process to know the path its
344
- # writing to and rotate the file if it is used for logging. The
345
- # file will be opened with the File::APPEND flag and writes
346
- # synchronized to the kernel (but not necessarily to _disk_) so
347
- # multiple processes can safely append to it.
348
- #
349
- # If you are daemonizing and using the default +logger+, it is important
350
- # to specify this as errors will otherwise be lost to /dev/null.
351
- # Some applications/libraries may also triggering warnings that go to
352
- # stderr, and they will end up here.
353
- def stderr_path(path)
354
- set_path(:stderr_path, path)
421
+ # expands "unix:path/to/foo" to a socket relative to the current path
422
+ # expands pathnames of sockets if relative to "~" or "~username"
423
+ # expands "*:port and ":port" to "0.0.0.0:port"
424
+ def expand_addr(address) #:nodoc
425
+ return "0.0.0.0:#{address}" if Integer === address
426
+ return address unless String === address
427
+
428
+ case address
429
+ when %r{\Aunix:(.*)\z}
430
+ File.expand_path($1)
431
+ when %r{\A~}
432
+ File.expand_path(address)
433
+ when %r{\A(?:\*:)?(\d+)\z}
434
+ "0.0.0.0:#$1"
435
+ when %r{\A(.*):(\d+)\z}
436
+ # canonicalize the name
437
+ packed = Socket.pack_sockaddr_in($2.to_i, $1)
438
+ Socket.unpack_sockaddr_in(packed).reverse!.join(':')
439
+ else
440
+ address
355
441
  end
442
+ end
443
+
444
+ private
356
445
 
357
- # Same as stderr_path, except for $stdout. Not many Rack applications
358
- # write to $stdout, but any that do will have their output written here.
359
- # It is safe to point this to the same location a stderr_path.
360
- # Like stderr_path, this defaults to /dev/null when daemonized.
361
- def stdout_path(path)
362
- set_path(:stdout_path, path)
446
+ def set_path(var, path) #:nodoc:
447
+ case path
448
+ when NilClass, String
449
+ set[var] = path
450
+ else
451
+ raise ArgumentError
363
452
  end
453
+ end
364
454
 
365
- # sets the working directory for Unicorn. This ensures SIGUSR2 will
366
- # start a new instance of Unicorn in this directory. This may be
367
- # a symlink, a common scenario for Capistrano users. Unlike
368
- # all other Unicorn configuration directives, this binds immediately
369
- # for error checking and cannot be undone by unsetting it in the
370
- # configuration file and reloading.
371
- def working_directory(path)
372
- # just let chdir raise errors
373
- path = File.expand_path(path)
374
- if config_file &&
375
- config_file[0] != ?/ &&
376
- ! File.readable?("#{path}/#{config_file}")
455
+ def set_hook(var, my_proc, req_arity = 2) #:nodoc:
456
+ case my_proc
457
+ when Proc
458
+ arity = my_proc.arity
459
+ (arity == req_arity) or \
377
460
  raise ArgumentError,
378
- "config_file=#{config_file} would not be accessible in" \
379
- " working_directory=#{path}"
380
- end
381
- Dir.chdir(path)
382
- HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
461
+ "#{var}=#{my_proc.inspect} has invalid arity: " \
462
+ "#{arity} (need #{req_arity})"
463
+ when NilClass
464
+ my_proc = DEFAULTS[var]
465
+ else
466
+ raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
383
467
  end
468
+ set[var] = my_proc
469
+ end
384
470
 
385
- # Runs worker processes as the specified +user+ and +group+.
386
- # The master process always stays running as the user who started it.
387
- # This switch will occur after calling the after_fork hook, and only
388
- # if the Worker#user method is not called in the after_fork hook
389
- def user(user, group = nil)
390
- # raises ArgumentError on invalid user/group
391
- Etc.getpwnam(user)
392
- Etc.getgrnam(group) if group
393
- set[:user] = [ user, group ]
394
- end
471
+ # this is called _after_ working_directory is bound. This only
472
+ # parses the embedded switches in .ru files
473
+ # (for "rackup" compatibility)
474
+ def parse_rackup_file # :nodoc:
475
+ ru = RACKUP[:file] or return # we only return here in unit tests
395
476
 
396
- # expands "unix:path/to/foo" to a socket relative to the current path
397
- # expands pathnames of sockets if relative to "~" or "~username"
398
- # expands "*:port and ":port" to "0.0.0.0:port"
399
- def expand_addr(address) #:nodoc
400
- return "0.0.0.0:#{address}" if Integer === address
401
- return address unless String === address
402
-
403
- case address
404
- when %r{\Aunix:(.*)\z}
405
- File.expand_path($1)
406
- when %r{\A~}
407
- File.expand_path(address)
408
- when %r{\A(?:\*:)?(\d+)\z}
409
- "0.0.0.0:#$1"
410
- when %r{\A(.*):(\d+)\z}
411
- # canonicalize the name
412
- packed = Socket.pack_sockaddr_in($2.to_i, $1)
413
- Socket.unpack_sockaddr_in(packed).reverse!.join(':')
414
- else
415
- address
416
- end
477
+ # :rails means use (old) Rails autodetect
478
+ if ru == :rails
479
+ File.readable?('config.ru') or return
480
+ ru = 'config.ru'
417
481
  end
418
482
 
419
- private
483
+ File.readable?(ru) or
484
+ raise ArgumentError, "rackup file (#{ru}) not readable"
420
485
 
421
- def set_path(var, path) #:nodoc:
422
- case path
423
- when NilClass, String
424
- set[var] = path
425
- else
426
- raise ArgumentError
427
- end
428
- end
486
+ # it could be a .rb file, too, we don't parse those manually
487
+ ru =~ /\.ru\z/ or return
429
488
 
430
- def set_hook(var, my_proc, req_arity = 2) #:nodoc:
431
- case my_proc
432
- when Proc
433
- arity = my_proc.arity
434
- (arity == req_arity) or \
435
- raise ArgumentError,
436
- "#{var}=#{my_proc.inspect} has invalid arity: " \
437
- "#{arity} (need #{req_arity})"
438
- when NilClass
439
- my_proc = DEFAULTS[var]
440
- else
441
- raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
442
- end
443
- set[var] = my_proc
444
- end
489
+ /^#\\(.*)/ =~ File.read(ru) or return
490
+ RACKUP[:optparse].parse!($1.split(/\s+/))
445
491
 
446
- # this is called _after_ working_directory is bound. This only
447
- # parses the embedded switches in .ru files
448
- # (for "rackup" compatibility)
449
- def parse_rackup_file # :nodoc:
450
- ru = RACKUP[:file] or return # we only return here in unit tests
492
+ # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
493
+ host, port, set_listener, options, daemonize =
494
+ eval("[ host, port, set_listener, options, daemonize ]",
495
+ TOPLEVEL_BINDING)
451
496
 
452
- # :rails means use (old) Rails autodetect
453
- if ru == :rails
454
- File.readable?('config.ru') or return
455
- ru = 'config.ru'
456
- end
497
+ # XXX duplicate code from bin/unicorn{,_rails}
498
+ set[:listeners] << "#{host}:#{port}" if set_listener
457
499
 
458
- File.readable?(ru) or
459
- raise ArgumentError, "rackup file (#{ru}) not readable"
460
-
461
- # it could be a .rb file, too, we don't parse those manually
462
- ru =~ /\.ru\z/ or return
463
-
464
- /^#\\(.*)/ =~ File.read(ru) or return
465
- RACKUP[:optparse].parse!($1.split(/\s+/))
466
-
467
- # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
468
- host, port, set_listener, options, daemonize =
469
- eval("[ host, port, set_listener, options, daemonize ]",
470
- TOPLEVEL_BINDING)
471
-
472
- # XXX duplicate code from bin/unicorn{,_rails}
473
- set[:listeners] << "#{host}:#{port}" if set_listener
474
-
475
- if daemonize
476
- # unicorn_rails wants a default pid path, (not plain 'unicorn')
477
- if after_reload
478
- spid = set[:pid]
479
- pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
480
- end
481
- unless RACKUP[:daemonized]
482
- Unicorn::Launcher.daemonize!(options)
483
- RACKUP[:ready_pipe] = options.delete(:ready_pipe)
484
- end
500
+ if daemonize
501
+ # unicorn_rails wants a default pid path, (not plain 'unicorn')
502
+ if after_reload
503
+ spid = set[:pid]
504
+ pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
505
+ end
506
+ unless RACKUP[:daemonized]
507
+ Unicorn::Launcher.daemonize!(options)
508
+ RACKUP[:ready_pipe] = options.delete(:ready_pipe)
485
509
  end
486
510
  end
487
-
488
511
  end
489
512
  end