unicorn 0.5.4 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,12 +2,37 @@
2
2
  * Copyright (c) 2005 Zed A. Shaw
3
3
  * You can redistribute it and/or modify it under the same terms as Ruby.
4
4
  */
5
- #include "http11_parser.h"
6
- #include <stdio.h>
7
- #include <assert.h>
8
- #include <stdlib.h>
9
- #include <ctype.h>
10
- #include <string.h>
5
+ #ifndef http11_parser_h
6
+ #define http11_parser_h
7
+
8
+ #include <sys/types.h>
9
+
10
+ static void http_field(void *data, const char *field,
11
+ size_t flen, const char *value, size_t vlen);
12
+ static void request_method(void *data, const char *at, size_t length);
13
+ static void scheme(void *data, const char *at, size_t length);
14
+ static void host(void *data, const char *at, size_t length);
15
+ static void request_uri(void *data, const char *at, size_t length);
16
+ static void fragment(void *data, const char *at, size_t length);
17
+ static void request_path(void *data, const char *at, size_t length);
18
+ static void query_string(void *data, const char *at, size_t length);
19
+ static void http_version(void *data, const char *at, size_t length);
20
+ static void header_done(void *data, const char *at, size_t length);
21
+
22
+ typedef struct http_parser {
23
+ int cs;
24
+ size_t body_start;
25
+ size_t nread;
26
+ size_t mark;
27
+ size_t field_start;
28
+ size_t field_len;
29
+ size_t query_start;
30
+
31
+ void *data;
32
+ } http_parser;
33
+
34
+ static int http_parser_has_error(http_parser *parser);
35
+ static int http_parser_is_finished(http_parser *parser);
11
36
 
12
37
  /*
13
38
  * capitalizes all lower-case ASCII characters,
@@ -15,10 +40,16 @@
15
40
  */
16
41
  static void snake_upcase_char(char *c)
17
42
  {
18
- if (*c >= 'a' && *c <= 'z')
19
- *c &= ~0x20;
20
- else if (*c == '-')
21
- *c = '_';
43
+ if (*c >= 'a' && *c <= 'z')
44
+ *c &= ~0x20;
45
+ else if (*c == '-')
46
+ *c = '_';
47
+ }
48
+
49
+ static void downcase_char(char *c)
50
+ {
51
+ if (*c >= 'A' && *c <= 'Z')
52
+ *c |= 0x20;
22
53
  }
23
54
 
24
55
  #define LEN(AT, FPC) (FPC - buffer - parser->AT)
@@ -28,84 +59,69 @@ static void snake_upcase_char(char *c)
28
59
  /** Machine **/
29
60
 
30
61
  %%{
31
-
32
62
  machine http_parser;
33
63
 
34
64
  action mark {MARK(mark, fpc); }
35
65
 
36
-
37
66
  action start_field { MARK(field_start, fpc); }
38
67
  action snake_upcase_field { snake_upcase_char((char *)fpc); }
39
- action write_field {
68
+ action downcase_char { downcase_char((char *)fpc); }
69
+ action write_field {
40
70
  parser->field_len = LEN(field_start, fpc);
41
71
  }
42
72
 
43
73
  action start_value { MARK(mark, fpc); }
44
- action write_value {
45
- if(parser->http_field != NULL) {
46
- parser->http_field(parser->data, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, fpc));
47
- }
74
+ action write_value {
75
+ http_field(parser->data, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, fpc));
48
76
  }
49
- action request_method {
50
- if(parser->request_method != NULL)
51
- parser->request_method(parser->data, PTR_TO(mark), LEN(mark, fpc));
77
+ action request_method {
78
+ request_method(parser->data, PTR_TO(mark), LEN(mark, fpc));
52
79
  }
53
- action request_uri {
54
- if(parser->request_uri != NULL)
55
- parser->request_uri(parser->data, PTR_TO(mark), LEN(mark, fpc));
80
+ action scheme { scheme(parser->data, PTR_TO(mark), LEN(mark, fpc)); }
81
+ action host { host(parser->data, PTR_TO(mark), LEN(mark, fpc)); }
82
+ action request_uri {
83
+ request_uri(parser->data, PTR_TO(mark), LEN(mark, fpc));
56
84
  }
57
- action fragment {
58
- if(parser->fragment != NULL)
59
- parser->fragment(parser->data, PTR_TO(mark), LEN(mark, fpc));
85
+ action fragment {
86
+ fragment(parser->data, PTR_TO(mark), LEN(mark, fpc));
60
87
  }
61
88
 
62
89
  action start_query {MARK(query_start, fpc); }
63
- action query_string {
64
- if(parser->query_string != NULL)
65
- parser->query_string(parser->data, PTR_TO(query_start), LEN(query_start, fpc));
90
+ action query_string {
91
+ query_string(parser->data, PTR_TO(query_start), LEN(query_start, fpc));
66
92
  }
67
93
 
68
- action http_version {
69
- if(parser->http_version != NULL)
70
- parser->http_version(parser->data, PTR_TO(mark), LEN(mark, fpc));
94
+ action http_version {
95
+ http_version(parser->data, PTR_TO(mark), LEN(mark, fpc));
71
96
  }
72
97
 
73
98
  action request_path {
74
- if(parser->request_path != NULL)
75
- parser->request_path(parser->data, PTR_TO(mark), LEN(mark,fpc));
99
+ request_path(parser->data, PTR_TO(mark), LEN(mark,fpc));
76
100
  }
77
101
 
78
- action done {
79
- parser->body_start = fpc - buffer + 1;
80
- if(parser->header_done != NULL)
81
- parser->header_done(parser->data, fpc + 1, pe - fpc - 1);
102
+ action done {
103
+ parser->body_start = fpc - buffer + 1;
104
+ header_done(parser->data, fpc + 1, pe - fpc - 1);
82
105
  fbreak;
83
106
  }
84
107
 
85
108
  include http_parser_common "http11_parser_common.rl";
86
-
87
109
  }%%
88
110
 
89
111
  /** Data **/
90
112
  %% write data;
91
113
 
92
- int http_parser_init(http_parser *parser) {
114
+ static void http_parser_init(http_parser *parser) {
93
115
  int cs = 0;
116
+ memset(parser, 0, sizeof(*parser));
94
117
  %% write init;
95
118
  parser->cs = cs;
96
- parser->body_start = 0;
97
- parser->content_len = 0;
98
- parser->mark = 0;
99
- parser->nread = 0;
100
- parser->field_len = 0;
101
- parser->field_start = 0;
102
-
103
- return(1);
104
119
  }
105
120
 
106
-
107
121
  /** exec **/
108
- size_t http_parser_execute(http_parser *parser, const char *buffer, size_t len) {
122
+ static void http_parser_execute(
123
+ http_parser *parser, const char *buffer, size_t len)
124
+ {
109
125
  const char *p, *pe;
110
126
  int cs = parser->cs;
111
127
  size_t off = parser->nread;
@@ -130,25 +146,13 @@ size_t http_parser_execute(http_parser *parser, const char *buffer, size_t len)
130
146
  assert(parser->mark < len && "mark is after buffer end");
131
147
  assert(parser->field_len <= len && "field has length longer than whole buffer");
132
148
  assert(parser->field_start < len && "field starts after buffer end");
133
-
134
- return(parser->nread);
135
- }
136
-
137
- int http_parser_finish(http_parser *parser)
138
- {
139
- if (http_parser_has_error(parser) ) {
140
- return -1;
141
- } else if (http_parser_is_finished(parser) ) {
142
- return 1;
143
- } else {
144
- return 0;
145
- }
146
149
  }
147
150
 
148
- int http_parser_has_error(http_parser *parser) {
151
+ static int http_parser_has_error(http_parser *parser) {
149
152
  return parser->cs == http_parser_error;
150
153
  }
151
154
 
152
- int http_parser_is_finished(http_parser *parser) {
155
+ static int http_parser_is_finished(http_parser *parser) {
153
156
  return parser->cs == http_parser_first_final;
154
157
  }
158
+ #endif /* http11_parser_h */
@@ -24,8 +24,9 @@
24
24
  token = (ascii -- (CTL | tspecials));
25
25
 
26
26
  # URI schemes and absolute paths
27
- scheme = ( alpha | digit | "+" | "-" | "." )* ;
28
- absolute_uri = (scheme ":" (uchar | reserved )*);
27
+ scheme = ( "http"i ("s"i)? ) $downcase_char >mark %scheme;
28
+ hostname = (alnum | "-" | "." | "_")+;
29
+ host_with_port = (hostname (":" digit*)?) >mark %host;
29
30
 
30
31
  path = ( pchar+ ( "/" pchar* )* ) ;
31
32
  query = ( uchar | reserved )* %query_string ;
@@ -33,8 +34,10 @@
33
34
  params = ( param ( ";" param )* ) ;
34
35
  rel_path = ( path? %request_path (";" params)? ) ("?" %start_query query)?;
35
36
  absolute_path = ( "/"+ rel_path );
37
+ path_uri = absolute_path > mark %request_uri;
38
+ Absolute_URI = (scheme "://" host_with_port path_uri);
36
39
 
37
- Request_URI = ( "*" | absolute_uri | absolute_path ) >mark %request_uri;
40
+ Request_URI = ((absolute_path | "*") >mark %request_uri) | Absolute_URI;
38
41
  Fragment = ( uchar | reserved )* >mark %fragment;
39
42
  Method = ( upper | digit | safe ){1,20} >mark %request_method;
40
43
 
@@ -1,7 +1,6 @@
1
- require 'logger'
2
1
  require 'fcntl'
3
2
 
4
- require 'unicorn/socket'
3
+ require 'unicorn/socket_helper'
5
4
  require 'unicorn/const'
6
5
  require 'unicorn/http_request'
7
6
  require 'unicorn/http_response'
@@ -26,14 +25,30 @@ module Unicorn
26
25
  attr_reader :logger
27
26
  include ::Unicorn::SocketHelper
28
27
 
28
+ # prevents IO objects in here from being GC-ed
29
+ IO_PURGATORY = []
30
+
31
+ # all bound listener sockets
32
+ LISTENERS = []
33
+
34
+ # This hash maps PIDs to Workers
35
+ WORKERS = {}
36
+
37
+ # See: http://cr.yp.to/docs/selfpipe.html
38
+ SELF_PIPE = []
39
+
40
+ # signal queue used for self-piping
29
41
  SIG_QUEUE = []
30
- DEFAULT_START_CTX = {
42
+
43
+ # We populate this at startup so we can figure out how to reexecute
44
+ # and upgrade the currently running instance of Unicorn
45
+ START_CTX = {
31
46
  :argv => ARGV.map { |arg| arg.dup },
32
47
  # don't rely on Dir.pwd here since it's not symlink-aware, and
33
48
  # symlink dirs are the default with Capistrano...
34
49
  :cwd => `/bin/sh -c pwd`.chomp("\n"),
35
50
  :zero => $0.dup,
36
- }.freeze
51
+ }
37
52
 
38
53
  Worker = Struct.new(:nr, :tempfile) unless defined?(Worker)
39
54
  class Worker
@@ -45,22 +60,16 @@ module Unicorn
45
60
 
46
61
  # Creates a working server on host:port (strange things happen if
47
62
  # port isn't a Number). Use HttpServer::run to start the server and
48
- # HttpServer.workers.join to join the thread that's processing
63
+ # HttpServer.run.join to join the thread that's processing
49
64
  # incoming requests on the socket.
50
65
  def initialize(app, options = {})
51
- start_ctx = options.delete(:start_ctx)
52
- @start_ctx = DEFAULT_START_CTX.dup
53
- @start_ctx.merge!(start_ctx) if start_ctx
54
66
  @app = app
55
- @workers = Hash.new
56
- @io_purgatory = [] # prevents IO objects in here from being GC-ed
57
- @request = @rd_sig = @wr_sig = nil
58
67
  @reexec_pid = 0
59
68
  @init_listeners = options[:listeners] ? options[:listeners].dup : []
60
69
  @config = Configurator.new(options.merge(:use_defaults => true))
61
70
  @listener_opts = {}
62
71
  @config.commit!(self, :skip => [:listeners, :pid])
63
- @listeners = []
72
+ @request = HttpRequest.new(@logger)
64
73
  end
65
74
 
66
75
  # Runs the thing. Returns self so you can run join on it
@@ -72,30 +81,27 @@ module Unicorn
72
81
  inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
73
82
  io = Socket.for_fd(fd.to_i)
74
83
  set_server_sockopt(io, @listener_opts[sock_name(io)])
75
- @io_purgatory << io
84
+ IO_PURGATORY << io
76
85
  logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
77
86
  server_cast(io)
78
87
  end
79
88
 
80
89
  config_listeners = @config[:listeners].dup
81
- @listeners.replace(inherited)
90
+ LISTENERS.replace(inherited)
82
91
 
83
92
  # we start out with generic Socket objects that get cast to either
84
93
  # TCPServer or UNIXServer objects; but since the Socket objects
85
94
  # share the same OS-level file descriptor as the higher-level *Server
86
95
  # objects; we need to prevent Socket objects from being garbage-collected
87
96
  config_listeners -= listener_names
88
- if config_listeners.empty? && @listeners.empty?
97
+ if config_listeners.empty? && LISTENERS.empty?
89
98
  config_listeners << Unicorn::Const::DEFAULT_LISTEN
90
99
  end
91
100
  config_listeners.each { |addr| listen(addr) }
92
- raise ArgumentError, "no listeners" if @listeners.empty?
101
+ raise ArgumentError, "no listeners" if LISTENERS.empty?
93
102
  self.pid = @config[:pid]
94
103
  build_app! if @preload_app
95
- File.open(@stderr_path, "a") { |fp| $stderr.reopen(fp) } if @stderr_path
96
- File.open(@stdout_path, "a") { |fp| $stdout.reopen(fp) } if @stdout_path
97
- $stderr.sync = $stdout.sync = true
98
- spawn_missing_workers
104
+ maintain_worker_count
99
105
  self
100
106
  end
101
107
 
@@ -115,9 +121,9 @@ module Unicorn
115
121
  dead_names += cur_names - set_names
116
122
  dead_names.uniq!
117
123
 
118
- @listeners.delete_if do |io|
124
+ LISTENERS.delete_if do |io|
119
125
  if dead_names.include?(sock_name(io))
120
- @io_purgatory.delete_if do |pio|
126
+ IO_PURGATORY.delete_if do |pio|
121
127
  pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
122
128
  end
123
129
  (io.close rescue nil).nil? # true
@@ -130,6 +136,9 @@ module Unicorn
130
136
  (set_names - cur_names).each { |addr| listen(addr) }
131
137
  end
132
138
 
139
+ def stdout_path=(path); redirect_io($stdout, path); end
140
+ def stderr_path=(path); redirect_io($stderr, path); end
141
+
133
142
  # sets the path for the PID file of the master process
134
143
  def pid=(path)
135
144
  if path
@@ -152,11 +161,11 @@ module Unicorn
152
161
 
153
162
  if io = bind_listen(address, opt)
154
163
  unless TCPServer === io || UNIXServer === io
155
- @io_purgatory << io
164
+ IO_PURGATORY << io
156
165
  io = server_cast(io)
157
166
  end
158
167
  logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
159
- @listeners << io
168
+ LISTENERS << io
160
169
  else
161
170
  logger.error "adding listener failed addr=#{address} (in use)"
162
171
  raise Errno::EADDRINUSE, address
@@ -170,8 +179,7 @@ module Unicorn
170
179
  def join
171
180
  # this pipe is used to wake us up from select(2) in #join when signals
172
181
  # are trapped. See trap_deferred
173
- @rd_sig, @wr_sig = IO.pipe unless (@rd_sig && @wr_sig)
174
- mode = nil
182
+ init_self_pipe!
175
183
  respawn = true
176
184
 
177
185
  QUEUE_SIGS.each { |sig| trap_deferred(sig) }
@@ -181,10 +189,10 @@ module Unicorn
181
189
  begin
182
190
  loop do
183
191
  reap_all_workers
184
- case (mode = SIG_QUEUE.shift)
192
+ case SIG_QUEUE.shift
185
193
  when nil
186
194
  murder_lazy_workers
187
- spawn_missing_workers if respawn
195
+ maintain_worker_count if respawn
188
196
  master_sleep
189
197
  when :QUIT # graceful shutdown
190
198
  break
@@ -206,6 +214,10 @@ module Unicorn
206
214
  else
207
215
  logger.info "SIGWINCH ignored because we're not daemonized"
208
216
  end
217
+ when :TTIN
218
+ @worker_processes += 1
219
+ when :TTOU
220
+ @worker_processes -= 1 if @worker_processes > 0
209
221
  when :HUP
210
222
  respawn = true
211
223
  if @config.config_file
@@ -216,8 +228,6 @@ module Unicorn
216
228
  reexec
217
229
  break
218
230
  end
219
- else
220
- logger.error "master process in unknown mode: #{mode}"
221
231
  end
222
232
  end
223
233
  rescue Errno::EINTR
@@ -238,7 +248,7 @@ module Unicorn
238
248
  timeleft = @timeout
239
249
  step = 0.2
240
250
  reap_all_workers
241
- until @workers.empty?
251
+ until WORKERS.empty?
242
252
  sleep(step)
243
253
  reap_all_workers
244
254
  (timeleft -= step) > 0 and next
@@ -251,7 +261,8 @@ module Unicorn
251
261
  private
252
262
 
253
263
  # list of signals we care about and trap in master.
254
- QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP ].freeze
264
+ QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
265
+ :TTIN, :TTOU ].freeze
255
266
 
256
267
  # defer a signal for later processing in #join (master process)
257
268
  def trap_deferred(signal)
@@ -269,16 +280,16 @@ module Unicorn
269
280
  # Wake up every second anyways to run murder_lazy_workers
270
281
  def master_sleep
271
282
  begin
272
- ready = IO.select([@rd_sig], nil, nil, 1)
273
- ready && ready[0] && ready[0][0] or return
274
- loop { @rd_sig.read_nonblock(Const::CHUNK_SIZE) }
283
+ ready = IO.select([SELF_PIPE.first], nil, nil, 1) or return
284
+ ready.first && ready.first.first or return
285
+ loop { SELF_PIPE.first.read_nonblock(Const::CHUNK_SIZE) }
275
286
  rescue Errno::EAGAIN, Errno::EINTR
276
287
  end
277
288
  end
278
289
 
279
290
  def awaken_master
280
291
  begin
281
- @wr_sig.write_nonblock('.') # wakeup master process from IO.select
292
+ SELF_PIPE.last.write_nonblock('.') # wakeup master process from select
282
293
  rescue Errno::EAGAIN, Errno::EINTR
283
294
  # pipe is full, master should wake up anyways
284
295
  retry
@@ -297,7 +308,7 @@ module Unicorn
297
308
  self.pid = @pid.chomp('.oldbin') if @pid
298
309
  proc_name 'master'
299
310
  else
300
- worker = @workers.delete(pid)
311
+ worker = WORKERS.delete(pid)
301
312
  worker.tempfile.close rescue nil
302
313
  logger.info "reaped #{status.inspect} " \
303
314
  "worker=#{worker.nr rescue 'unknown'}"
@@ -307,7 +318,7 @@ module Unicorn
307
318
  end
308
319
  end
309
320
 
310
- # reexecutes the @start_ctx with a new binary
321
+ # reexecutes the START_CTX with a new binary
311
322
  def reexec
312
323
  if @reexec_pid > 0
313
324
  begin
@@ -335,20 +346,19 @@ module Unicorn
335
346
  end
336
347
 
337
348
  @reexec_pid = fork do
338
- listener_fds = @listeners.map { |sock| sock.fileno }
349
+ listener_fds = LISTENERS.map { |sock| sock.fileno }
339
350
  ENV['UNICORN_FD'] = listener_fds.join(',')
340
- Dir.chdir(@start_ctx[:cwd])
341
- cmd = [ @start_ctx[:zero] ] + @start_ctx[:argv]
351
+ Dir.chdir(START_CTX[:cwd])
352
+ cmd = [ START_CTX[:zero] ] + START_CTX[:argv]
342
353
 
343
354
  # avoid leaking FDs we don't know about, but let before_exec
344
355
  # unset FD_CLOEXEC, if anything else in the app eventually
345
356
  # relies on FD inheritence.
346
- purgatory = [] # prevent GC of IO objects
347
357
  (3..1024).each do |io|
348
358
  next if listener_fds.include?(io)
349
359
  io = IO.for_fd(io) rescue nil
350
360
  io or next
351
- purgatory << io
361
+ IO_PURGATORY << io
352
362
  io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
353
363
  end
354
364
  logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
@@ -366,7 +376,7 @@ module Unicorn
366
376
  # worker.
367
377
  def murder_lazy_workers
368
378
  now = Time.now
369
- @workers.each_pair do |pid, worker|
379
+ WORKERS.each_pair do |pid, worker|
370
380
  (now - worker.tempfile.ctime) <= @timeout and next
371
381
  logger.error "worker=#{worker.nr} PID:#{pid} is too old, killing"
372
382
  kill_worker(:KILL, pid) # take no prisoners for @timeout violations
@@ -375,25 +385,32 @@ module Unicorn
375
385
  end
376
386
 
377
387
  def spawn_missing_workers
378
- return if @workers.size == @worker_processes
379
388
  (0...@worker_processes).each do |worker_nr|
380
- @workers.values.include?(worker_nr) and next
389
+ WORKERS.values.include?(worker_nr) and next
381
390
  begin
382
- Dir.chdir(@start_ctx[:cwd])
391
+ Dir.chdir(START_CTX[:cwd])
383
392
  rescue Errno::ENOENT => err
384
- logger.fatal "#{err.inspect} (#{@start_ctx[:cwd]})"
393
+ logger.fatal "#{err.inspect} (#{START_CTX[:cwd]})"
385
394
  SIG_QUEUE << :QUIT # forcibly emulate SIGQUIT
386
395
  return
387
396
  end
388
- tempfile = Tempfile.new('') # as short as possible to save dir space
397
+ tempfile = Tempfile.new(nil) # as short as possible to save dir space
389
398
  tempfile.unlink # don't allow other processes to find or see it
390
399
  worker = Worker.new(worker_nr, tempfile)
391
400
  @before_fork.call(self, worker)
392
401
  pid = fork { worker_loop(worker) }
393
- @workers[pid] = worker
402
+ WORKERS[pid] = worker
394
403
  end
395
404
  end
396
405
 
406
+ def maintain_worker_count
407
+ (off = WORKERS.size - @worker_processes) == 0 and return
408
+ off < 0 and return spawn_missing_workers
409
+ WORKERS.each_pair { |pid,w|
410
+ w.nr >= @worker_processes and kill_worker(:QUIT, pid) rescue nil
411
+ }
412
+ end
413
+
397
414
  # once a client is accepted, it is processed in its entirety here
398
415
  # in 3 easy steps: read request, call app, write app response
399
416
  def process_client(client)
@@ -431,15 +448,22 @@ module Unicorn
431
448
  trap(:CHLD, 'DEFAULT')
432
449
  SIG_QUEUE.clear
433
450
  proc_name "worker[#{worker.nr}]"
434
- @rd_sig.close if @rd_sig
435
- @wr_sig.close if @wr_sig
436
- @workers.values.each { |other| other.tempfile.close rescue nil }
437
- @start_ctx = @workers = @rd_sig = @wr_sig = nil
438
- @listeners.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
451
+ START_CTX.clear
452
+ init_self_pipe!
453
+ WORKERS.values.each { |other| other.tempfile.close! rescue nil }
454
+ WORKERS.clear
455
+ LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
439
456
  worker.tempfile.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
440
457
  @after_fork.call(self, worker) # can drop perms
441
- @request = HttpRequest.new(logger)
442
- build_app! unless @preload_app
458
+ @timeout /= 2.0 # halve it for select()
459
+ build_app! unless @config[:preload_app]
460
+ end
461
+
462
+ def reopen_worker_logs(worker_nr)
463
+ @logger.info "worker=#{worker_nr} reopening logs..."
464
+ Unicorn::Util.reopen_logs
465
+ @logger.info "worker=#{worker_nr} done reopening logs"
466
+ init_self_pipe!
443
467
  end
444
468
 
445
469
  # runs inside each forked worker, this sits around and waits
@@ -449,30 +473,19 @@ module Unicorn
449
473
  master_pid = Process.ppid # slightly racy, but less memory usage
450
474
  init_worker_process(worker)
451
475
  nr = 0 # this becomes negative if we need to reopen logs
452
- tempfile = worker.tempfile
453
- ready = @listeners
476
+ alive = worker.tempfile # tempfile is our lifeline to the master process
477
+ ready = LISTENERS
454
478
  client = nil
455
- rd, wr = IO.pipe
456
- rd.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
457
- wr.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
458
479
 
459
480
  # closing anything we IO.select on will raise EBADF
460
481
  trap(:USR1) { nr = -65536; rd.close rescue nil }
461
- trap(:QUIT) { @listeners.each { |sock| sock.close rescue nil } }
482
+ trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
462
483
  [:TERM, :INT].each { |sig| trap(sig) { exit(0) } } # instant shutdown
463
484
  @logger.info "worker=#{worker.nr} ready"
464
485
 
465
- while master_pid == Process.ppid
466
- if nr < 0
467
- @logger.info "worker=#{worker.nr} reopening logs..."
468
- Unicorn::Util.reopen_logs
469
- @logger.info "worker=#{worker.nr} done reopening logs"
470
- wr.close rescue nil
471
- rd, wr = IO.pipe
472
- rd.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
473
- wr.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
474
- end
475
- # we're a goner in @timeout seconds anyways if tempfile.chmod
486
+ while alive
487
+ reopen_worker_logs(worker.nr) if nr < 0
488
+ # we're a goner in @timeout seconds anyways if alive.chmod
476
489
  # breaks, so don't trap the exception. Using fchmod() since
477
490
  # futimes() is not available in base Ruby and I very strongly
478
491
  # prefer temporary files to be unlinked for security,
@@ -480,7 +493,7 @@ module Unicorn
480
493
  # changes with chmod doesn't update ctime on all filesystems; so
481
494
  # we change our counter each and every time (after process_client
482
495
  # and before IO.select).
483
- tempfile.chmod(nr = 0)
496
+ alive.chmod(nr = 0)
484
497
 
485
498
  begin
486
499
  ready.each do |sock|
@@ -495,7 +508,7 @@ module Unicorn
495
508
  # client closed the socket even before accept
496
509
  client.close rescue nil
497
510
  ensure
498
- tempfile.chmod(nr += 1) if client
511
+ alive.chmod(nr += 1) if client
499
512
  break if nr < 0
500
513
  end
501
514
  end
@@ -506,17 +519,18 @@ module Unicorn
506
519
  # and do a speculative accept_nonblock on every listener
507
520
  # before we sleep again in select().
508
521
  if nr != 0 # (nr < 0) => reopen logs
509
- ready = @listeners
522
+ ready = LISTENERS
510
523
  else
524
+ master_pid == Process.ppid or exit(0)
525
+ alive.chmod(nr += 1)
511
526
  begin
512
- tempfile.chmod(nr += 1)
513
527
  # timeout used so we can detect parent death:
514
- ret = IO.select(@listeners, nil, [rd], @timeout/2.0) or next
515
- ready = ret[0]
528
+ ret = IO.select(LISTENERS, nil, SELF_PIPE, @timeout) or next
529
+ ready = ret.first
516
530
  rescue Errno::EINTR
517
- ready = @listeners
531
+ ready = LISTENERS
518
532
  rescue Errno::EBADF => e
519
- nr < 0 or exit(@listeners[0].closed? ? 0 : 1)
533
+ nr < 0 or exit(alive ? 1 : 0)
520
534
  end
521
535
  end
522
536
  rescue SignalException, SystemExit => e
@@ -536,13 +550,13 @@ module Unicorn
536
550
  begin
537
551
  Process.kill(signal, pid)
538
552
  rescue Errno::ESRCH
539
- worker = @workers.delete(pid) and worker.tempfile.close rescue nil
553
+ worker = WORKERS.delete(pid) and worker.tempfile.close rescue nil
540
554
  end
541
555
  end
542
556
 
543
557
  # delivers a signal to each worker
544
558
  def kill_each_worker(signal)
545
- @workers.keys.each { |pid| kill_worker(signal, pid) }
559
+ WORKERS.keys.each { |pid| kill_worker(signal, pid) }
546
560
  end
547
561
 
548
562
  # unlinks a PID file at given +path+ if it contains the current PID
@@ -579,7 +593,7 @@ module Unicorn
579
593
  end
580
594
 
581
595
  # returns an array of string names for the given listener array
582
- def listener_names(listeners = @listeners)
596
+ def listener_names(listeners = LISTENERS)
583
597
  listeners.map { |io| sock_name(io) }
584
598
  end
585
599
 
@@ -588,8 +602,19 @@ module Unicorn
588
602
  end
589
603
 
590
604
  def proc_name(tag)
591
- $0 = ([ File.basename(@start_ctx[:zero]), tag ] +
592
- @start_ctx[:argv]).join(' ')
605
+ $0 = ([ File.basename(START_CTX[:zero]), tag ] +
606
+ START_CTX[:argv]).join(' ')
607
+ end
608
+
609
+ def redirect_io(io, path)
610
+ File.open(path, 'a') { |fp| io.reopen(fp) } if path
611
+ io.sync = true
612
+ end
613
+
614
+ def init_self_pipe!
615
+ SELF_PIPE.each { |io| io.close rescue nil }
616
+ SELF_PIPE.replace(IO.pipe)
617
+ SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
593
618
  end
594
619
 
595
620
  end