unicorn 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,45 @@
1
+ /**
2
+ * Copyright (c) 2005 Zed A. Shaw
3
+ * You can redistribute it and/or modify it under the same terms as Ruby.
4
+ */
5
+
6
+ #ifndef http11_parser_h
7
+ #define http11_parser_h
8
+
9
+ #include <sys/types.h>
10
+
11
+ typedef void (*element_cb)(void *data, const char *at, size_t length);
12
+ typedef void (*field_cb)(void *data, const char *field, size_t flen, const char *value, size_t vlen);
13
+
14
+ typedef struct http_parser {
15
+ int cs;
16
+ size_t body_start;
17
+ int content_len;
18
+ size_t nread;
19
+ size_t mark;
20
+ size_t field_start;
21
+ size_t field_len;
22
+ size_t query_start;
23
+
24
+ void *data;
25
+
26
+ field_cb http_field;
27
+ element_cb request_method;
28
+ element_cb request_uri;
29
+ element_cb fragment;
30
+ element_cb request_path;
31
+ element_cb query_string;
32
+ element_cb http_version;
33
+ element_cb header_done;
34
+
35
+ } http_parser;
36
+
37
+ int http_parser_init(http_parser *parser);
38
+ int http_parser_finish(http_parser *parser);
39
+ size_t http_parser_execute(http_parser *parser, const char *data, size_t len, size_t off);
40
+ int http_parser_has_error(http_parser *parser);
41
+ int http_parser_is_finished(http_parser *parser);
42
+
43
+ #define http_parser_nread(parser) (parser)->nread
44
+
45
+ #endif
@@ -0,0 +1,153 @@
1
+ /**
2
+ * Copyright (c) 2005 Zed A. Shaw
3
+ * You can redistribute it and/or modify it under the same terms as Ruby.
4
+ */
5
+ #include "http11_parser.h"
6
+ #include <stdio.h>
7
+ #include <assert.h>
8
+ #include <stdlib.h>
9
+ #include <ctype.h>
10
+ #include <string.h>
11
+
12
+ /*
13
+ * capitalizes all lower-case ASCII characters,
14
+ * converts dashes to underscores.
15
+ */
16
+ static void snake_upcase_char(char *c)
17
+ {
18
+ if (*c >= 'a' && *c <= 'z')
19
+ *c &= ~0x20;
20
+ else if (*c == '-')
21
+ *c = '_';
22
+ }
23
+
24
+ #define LEN(AT, FPC) (FPC - buffer - parser->AT)
25
+ #define MARK(M,FPC) (parser->M = (FPC) - buffer)
26
+ #define PTR_TO(F) (buffer + parser->F)
27
+
28
+ /** Machine **/
29
+
30
+ %%{
31
+
32
+ machine http_parser;
33
+
34
+ action mark {MARK(mark, fpc); }
35
+
36
+
37
+ action start_field { MARK(field_start, fpc); }
38
+ action snake_upcase_field { snake_upcase_char((char *)fpc); }
39
+ action write_field {
40
+ parser->field_len = LEN(field_start, fpc);
41
+ }
42
+
43
+ action start_value { MARK(mark, fpc); }
44
+ action write_value {
45
+ if(parser->http_field != NULL) {
46
+ parser->http_field(parser->data, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, fpc));
47
+ }
48
+ }
49
+ action request_method {
50
+ if(parser->request_method != NULL)
51
+ parser->request_method(parser->data, PTR_TO(mark), LEN(mark, fpc));
52
+ }
53
+ action request_uri {
54
+ if(parser->request_uri != NULL)
55
+ parser->request_uri(parser->data, PTR_TO(mark), LEN(mark, fpc));
56
+ }
57
+ action fragment {
58
+ if(parser->fragment != NULL)
59
+ parser->fragment(parser->data, PTR_TO(mark), LEN(mark, fpc));
60
+ }
61
+
62
+ action start_query {MARK(query_start, fpc); }
63
+ action query_string {
64
+ if(parser->query_string != NULL)
65
+ parser->query_string(parser->data, PTR_TO(query_start), LEN(query_start, fpc));
66
+ }
67
+
68
+ action http_version {
69
+ if(parser->http_version != NULL)
70
+ parser->http_version(parser->data, PTR_TO(mark), LEN(mark, fpc));
71
+ }
72
+
73
+ action request_path {
74
+ if(parser->request_path != NULL)
75
+ parser->request_path(parser->data, PTR_TO(mark), LEN(mark,fpc));
76
+ }
77
+
78
+ action done {
79
+ parser->body_start = fpc - buffer + 1;
80
+ if(parser->header_done != NULL)
81
+ parser->header_done(parser->data, fpc + 1, pe - fpc - 1);
82
+ fbreak;
83
+ }
84
+
85
+ include http_parser_common "http11_parser_common.rl";
86
+
87
+ }%%
88
+
89
+ /** Data **/
90
+ %% write data;
91
+
92
+ int http_parser_init(http_parser *parser) {
93
+ int cs = 0;
94
+ %% write init;
95
+ parser->cs = cs;
96
+ parser->body_start = 0;
97
+ parser->content_len = 0;
98
+ parser->mark = 0;
99
+ parser->nread = 0;
100
+ parser->field_len = 0;
101
+ parser->field_start = 0;
102
+
103
+ return(1);
104
+ }
105
+
106
+
107
+ /** exec **/
108
+ size_t http_parser_execute(http_parser *parser, const char *buffer, size_t len, size_t off) {
109
+ const char *p, *pe;
110
+ int cs = parser->cs;
111
+
112
+ assert(off <= len && "offset past end of buffer");
113
+
114
+ p = buffer+off;
115
+ pe = buffer+len;
116
+
117
+ assert(*pe == '\0' && "pointer does not end on NUL");
118
+ assert(pe - p == len - off && "pointers aren't same distance");
119
+
120
+ %% write exec;
121
+
122
+ if (!http_parser_has_error(parser))
123
+ parser->cs = cs;
124
+ parser->nread += p - (buffer + off);
125
+
126
+ assert(p <= pe && "buffer overflow after parsing execute");
127
+ assert(parser->nread <= len && "nread longer than length");
128
+ assert(parser->body_start <= len && "body starts after buffer end");
129
+ assert(parser->mark < len && "mark is after buffer end");
130
+ assert(parser->field_len <= len && "field has length longer than whole buffer");
131
+ assert(parser->field_start < len && "field starts after buffer end");
132
+
133
+ return(parser->nread);
134
+ }
135
+
136
+ int http_parser_finish(http_parser *parser)
137
+ {
138
+ if (http_parser_has_error(parser) ) {
139
+ return -1;
140
+ } else if (http_parser_is_finished(parser) ) {
141
+ return 1;
142
+ } else {
143
+ return 0;
144
+ }
145
+ }
146
+
147
+ int http_parser_has_error(http_parser *parser) {
148
+ return parser->cs == http_parser_error;
149
+ }
150
+
151
+ int http_parser_is_finished(http_parser *parser) {
152
+ return parser->cs == http_parser_first_final;
153
+ }
@@ -0,0 +1,55 @@
1
+ %%{
2
+
3
+ machine http_parser_common;
4
+
5
+ #### HTTP PROTOCOL GRAMMAR
6
+ # line endings
7
+ CRLF = "\r\n";
8
+
9
+ # character types
10
+ CTL = (cntrl | 127);
11
+ safe = ("$" | "-" | "_" | ".");
12
+ extra = ("!" | "*" | "'" | "(" | ")" | ",");
13
+ reserved = (";" | "/" | "?" | ":" | "@" | "&" | "=" | "+");
14
+ sorta_safe = ("\"" | "<" | ">");
15
+ unsafe = (CTL | " " | "#" | "%" | sorta_safe);
16
+ national = any -- (alpha | digit | reserved | extra | safe | unsafe);
17
+ unreserved = (alpha | digit | safe | extra | national);
18
+ escape = ("%" xdigit xdigit);
19
+ uchar = (unreserved | escape | sorta_safe);
20
+ pchar = (uchar | ":" | "@" | "&" | "=" | "+");
21
+ tspecials = ("(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\\" | "\"" | "/" | "[" | "]" | "?" | "=" | "{" | "}" | " " | "\t");
22
+
23
+ # elements
24
+ token = (ascii -- (CTL | tspecials));
25
+
26
+ # URI schemes and absolute paths
27
+ scheme = ( alpha | digit | "+" | "-" | "." )* ;
28
+ absolute_uri = (scheme ":" (uchar | reserved )*);
29
+
30
+ path = ( pchar+ ( "/" pchar* )* ) ;
31
+ query = ( uchar | reserved )* %query_string ;
32
+ param = ( pchar | "/" )* ;
33
+ params = ( param ( ";" param )* ) ;
34
+ rel_path = ( path? %request_path (";" params)? ) ("?" %start_query query)?;
35
+ absolute_path = ( "/"+ rel_path );
36
+
37
+ Request_URI = ( "*" | absolute_uri | absolute_path ) >mark %request_uri;
38
+ Fragment = ( uchar | reserved )* >mark %fragment;
39
+ Method = ( upper | digit | safe ){1,20} >mark %request_method;
40
+
41
+ http_number = ( digit+ "." digit+ ) ;
42
+ HTTP_Version = ( "HTTP/" http_number ) >mark %http_version ;
43
+ Request_Line = ( Method " " Request_URI ("#" Fragment){0,1} " " HTTP_Version CRLF ) ;
44
+
45
+ field_name = ( token -- ":" )+ >start_field $snake_upcase_field %write_field;
46
+
47
+ field_value = any* >start_value %write_value;
48
+
49
+ message_header = field_name ":" " "* field_value :> CRLF;
50
+
51
+ Request = Request_Line ( message_header )* ( CRLF @done );
52
+
53
+ main := Request;
54
+
55
+ }%%
data/lib/unicorn.rb ADDED
@@ -0,0 +1,548 @@
1
+ require 'logger'
2
+
3
+ require 'unicorn/socket'
4
+ require 'unicorn/const'
5
+ require 'unicorn/http_request'
6
+ require 'unicorn/http_response'
7
+ require 'unicorn/configurator'
8
+ require 'unicorn/util'
9
+
10
+ # Unicorn module containing all of the classes (include C extensions) for running
11
+ # a Unicorn web server. It contains a minimalist HTTP server with just enough
12
+ # functionality to service web application requests fast as possible.
13
+ module Unicorn
14
+ class << self
15
+ def run(app, options = {})
16
+ HttpServer.new(app, options).start.join
17
+ end
18
+ end
19
+
20
+ # This is the process manager of Unicorn. This manages worker
21
+ # processes which in turn handle the I/O and application process.
22
+ # Listener sockets are started in the master process and shared with
23
+ # forked worker children.
24
+ class HttpServer
25
+ attr_reader :logger
26
+ include Process
27
+ include ::Unicorn::SocketHelper
28
+
29
+ DEFAULT_START_CTX = {
30
+ :argv => ARGV.map { |arg| arg.dup },
31
+ # don't rely on Dir.pwd here since it's not symlink-aware, and
32
+ # symlink dirs are the default with Capistrano...
33
+ :cwd => `/bin/sh -c pwd`.chomp("\n"),
34
+ :zero => $0.dup,
35
+ :environ => {}.merge!(ENV),
36
+ :umask => File.umask,
37
+ }.freeze
38
+
39
+ Worker = Struct.new(:nr, :tempfile) unless defined?(Worker)
40
+ class Worker
41
+ # worker objects may be compared to just plain numbers
42
+ def ==(other_nr)
43
+ self.nr == other_nr
44
+ end
45
+ end
46
+
47
+ # Creates a working server on host:port (strange things happen if
48
+ # port isn't a Number). Use HttpServer::run to start the server and
49
+ # HttpServer.workers.join to join the thread that's processing
50
+ # incoming requests on the socket.
51
+ def initialize(app, options = {})
52
+ start_ctx = options.delete(:start_ctx)
53
+ @start_ctx = DEFAULT_START_CTX.dup
54
+ @start_ctx.merge!(start_ctx) if start_ctx
55
+ @app = app
56
+ @mode = :idle
57
+ @master_pid = $$
58
+ @workers = Hash.new
59
+ @io_purgatory = [] # prevents IO objects in here from being GC-ed
60
+ @request = @rd_sig = @wr_sig = nil
61
+ @reexec_pid = 0
62
+ @config = Configurator.new(options.merge(:use_defaults => true))
63
+ @config.commit!(self, :skip => [:listeners, :pid])
64
+ @listeners = []
65
+ end
66
+
67
+ # Runs the thing. Returns self so you can run join on it
68
+ def start
69
+ BasicSocket.do_not_reverse_lookup = true
70
+
71
+ # inherit sockets from parents, they need to be plain Socket objects
72
+ # before they become UNIXServer or TCPServer
73
+ inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
74
+ io = Socket.for_fd(fd.to_i)
75
+ set_server_sockopt(io)
76
+ @io_purgatory << io
77
+ logger.info "inherited: #{io} fd=#{fd} addr=#{sock_name(io)}"
78
+ server_cast(io)
79
+ end
80
+
81
+ config_listeners = @config[:listeners].dup
82
+ @listeners.replace(inherited)
83
+
84
+ # we start out with generic Socket objects that get cast to either
85
+ # TCPServer or UNIXServer objects; but since the Socket objects
86
+ # share the same OS-level file descriptor as the higher-level *Server
87
+ # objects; we need to prevent Socket objects from being garbage-collected
88
+ config_listeners -= listener_names
89
+ config_listeners.each { |addr| listen(addr) }
90
+ raise ArgumentError, "no listeners" if @listeners.empty?
91
+ self.pid = @config[:pid]
92
+ build_app! if @preload_app
93
+ $stderr.reopen(File.open(@stderr_path, "a")) if @stderr_path
94
+ $stdout.reopen(File.open(@stdout_path, "a")) if @stdout_path
95
+ $stderr.sync = $stdout.sync = true
96
+ spawn_missing_workers
97
+ self
98
+ end
99
+
100
+ # replaces current listener set with +listeners+. This will
101
+ # close the socket if it will not exist in the new listener set
102
+ def listeners=(listeners)
103
+ cur_names = listener_names
104
+ set_names = listener_names(listeners)
105
+ dead_names = cur_names - set_names
106
+
107
+ @listeners.delete_if do |io|
108
+ if dead_names.include?(sock_name(io))
109
+ @io_purgatory.delete_if { |pio| pio.fileno == io.fileno }
110
+ destroy_safely(io)
111
+ true
112
+ else
113
+ false
114
+ end
115
+ end
116
+
117
+ (set_names - cur_names).each { |addr| listen(addr) }
118
+ end
119
+
120
+ # sets the path for the PID file of the master process
121
+ def pid=(path)
122
+ if path
123
+ if x = valid_pid?(path)
124
+ return path if @pid && path == @pid && x == $$
125
+ raise ArgumentError, "Already running on PID:#{x} " \
126
+ "(or pid=#{path} is stale)"
127
+ end
128
+ File.open(path, 'wb') { |fp| fp.syswrite("#{$$}\n") }
129
+ end
130
+ unlink_pid_safe(@pid) if @pid && @pid != path
131
+ @pid = path
132
+ end
133
+
134
+ # add a given address to the +listeners+ set, idempotently
135
+ # Allows workers to add a private, per-process listener via the
136
+ # @after_fork hook. Very useful for debugging and testing.
137
+ def listen(address)
138
+ return if String === address && listener_names.include?(address)
139
+
140
+ if io = bind_listen(address, @backlog)
141
+ if Socket == io.class
142
+ @io_purgatory << io
143
+ io = server_cast(io)
144
+ end
145
+ logger.info "#{io} listening on PID:#{$$} " \
146
+ "fd=#{io.fileno} addr=#{sock_name(io)}"
147
+ @listeners << io
148
+ else
149
+ logger.error "adding listener failed addr=#{address} (in use)"
150
+ raise Errno::EADDRINUSE, address
151
+ end
152
+ end
153
+
154
+ # monitors children and receives signals forever
155
+ # (or until a termination signal is sent). This handles signals
156
+ # one-at-a-time time and we'll happily drop signals in case somebody
157
+ # is signalling us too often.
158
+ def join
159
+ # this pipe is used to wake us up from select(2) in #join when signals
160
+ # are trapped. See trap_deferred
161
+ @rd_sig, @wr_sig = IO.pipe unless (@rd_sig && @wr_sig)
162
+ @rd_sig.nonblock = @wr_sig.nonblock = true
163
+
164
+ reset_master
165
+ $0 = "unicorn master"
166
+ logger.info "master process ready" # test relies on this message
167
+ begin
168
+ loop do
169
+ reap_all_workers
170
+ case @mode
171
+ when :idle
172
+ murder_lazy_workers
173
+ spawn_missing_workers
174
+ when 'QUIT' # graceful shutdown
175
+ break
176
+ when 'TERM', 'INT' # immediate shutdown
177
+ stop(false)
178
+ break
179
+ when 'USR1' # user-defined (probably something like log reopening)
180
+ kill_each_worker('USR1')
181
+ Unicorn::Util.reopen_logs
182
+ reset_master
183
+ when 'USR2' # exec binary, stay alive in case something went wrong
184
+ reexec
185
+ reset_master
186
+ when 'HUP'
187
+ if @config.config_file
188
+ load_config!
189
+ reset_master
190
+ redo # immediate reaping since we may have QUIT workers
191
+ else # exec binary and exit if there's no config file
192
+ logger.info "config_file not present, reexecuting binary"
193
+ reexec
194
+ break
195
+ end
196
+ else
197
+ logger.error "master process in unknown mode: #{@mode}, resetting"
198
+ reset_master
199
+ end
200
+ reap_all_workers
201
+
202
+ ready = begin
203
+ IO.select([@rd_sig], nil, nil, 1) or next
204
+ rescue Errno::EINTR # next
205
+ end
206
+ ready[0] && ready[0][0] or next
207
+ begin # just consume the pipe when we're awakened, @mode is set
208
+ loop { @rd_sig.sysread(Const::CHUNK_SIZE) }
209
+ rescue Errno::EAGAIN, Errno::EINTR # next
210
+ end
211
+ end
212
+ rescue Errno::EINTR
213
+ retry
214
+ rescue Object => e
215
+ logger.error "Unhandled master loop exception #{e.inspect}."
216
+ logger.error e.backtrace.join("\n")
217
+ reset_master
218
+ retry
219
+ end
220
+ stop # gracefully shutdown all workers on our way out
221
+ logger.info "master PID:#{$$} join complete"
222
+ unlink_pid_safe(@pid) if @pid
223
+ end
224
+
225
+ # Terminates all workers, but does not exit master process
226
+ def stop(graceful = true)
227
+ kill_each_worker(graceful ? 'QUIT' : 'TERM')
228
+ timeleft = @timeout
229
+ step = 0.2
230
+ reap_all_workers
231
+ until @workers.empty?
232
+ sleep(step)
233
+ reap_all_workers
234
+ (timeleft -= step) > 0 and next
235
+ kill_each_worker('KILL')
236
+ end
237
+ ensure
238
+ self.listeners = []
239
+ end
240
+
241
+ private
242
+
243
+ # list of signals we care about and trap in master.
244
+ TRAP_SIGS = %w(QUIT INT TERM USR1 USR2 HUP).map { |x| x.freeze }.freeze
245
+
246
+ # defer a signal for later processing in #join (master process)
247
+ def trap_deferred(signal)
248
+ trap(signal) do |sig_nr|
249
+ # we only handle/defer one signal at a time and ignore all others
250
+ # until we're ready again. Queueing signals can lead to more bugs,
251
+ # and simplicity is the most important thing
252
+ TRAP_SIGS.each { |sig| trap(sig, 'IGNORE') }
253
+ if Symbol === @mode
254
+ @mode = signal
255
+ begin
256
+ @wr_sig.syswrite('.') # wakeup master process from IO.select
257
+ rescue Errno::EAGAIN
258
+ rescue Errno::EINTR
259
+ retry
260
+ end
261
+ end
262
+ end
263
+ end
264
+
265
+
266
+ def reset_master
267
+ @mode = :idle
268
+ TRAP_SIGS.each { |sig| trap_deferred(sig) }
269
+ end
270
+
271
+ # reaps all unreaped workers
272
+ def reap_all_workers
273
+ begin
274
+ loop do
275
+ pid = waitpid(-1, WNOHANG) or break
276
+ if @reexec_pid == pid
277
+ logger.error "reaped exec()-ed PID:#{pid} status=#{$?.exitstatus}"
278
+ @reexec_pid = 0
279
+ self.pid = @pid.chomp('.oldbin') if @pid
280
+ else
281
+ worker = @workers.delete(pid)
282
+ worker.tempfile.close rescue nil
283
+ logger.info "reaped PID:#{pid} " \
284
+ "worker=#{worker.nr rescue 'unknown'} " \
285
+ "status=#{$?.exitstatus}"
286
+ end
287
+ end
288
+ rescue Errno::ECHILD
289
+ end
290
+ end
291
+
292
+ # reexecutes the @start_ctx with a new binary
293
+ def reexec
294
+ if @reexec_pid > 0
295
+ begin
296
+ Process.kill(0, @reexec_pid)
297
+ logger.error "reexec-ed child already running PID:#{@reexec_pid}"
298
+ return
299
+ rescue Errno::ESRCH
300
+ @reexec_pid = 0
301
+ end
302
+ end
303
+
304
+ if @pid
305
+ old_pid = "#{@pid}.oldbin"
306
+ prev_pid = @pid.dup
307
+ begin
308
+ self.pid = old_pid # clear the path for a new pid file
309
+ rescue ArgumentError
310
+ logger.error "old PID:#{valid_pid?(old_pid)} running with " \
311
+ "existing pid=#{old_pid}, refusing rexec"
312
+ return
313
+ rescue Object => e
314
+ logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
315
+ return
316
+ end
317
+ end
318
+
319
+ @reexec_pid = fork do
320
+ @rd_sig.close if @rd_sig
321
+ @wr_sig.close if @wr_sig
322
+ @workers.values.each { |other| other.tempfile.close rescue nil }
323
+
324
+ ENV.replace(@start_ctx[:environ])
325
+ ENV['UNICORN_FD'] = @listeners.map { |sock| sock.fileno }.join(',')
326
+ File.umask(@start_ctx[:umask])
327
+ Dir.chdir(@start_ctx[:cwd])
328
+ cmd = [ @start_ctx[:zero] ] + @start_ctx[:argv]
329
+ logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
330
+ @before_exec.call(self) if @before_exec
331
+ exec(*cmd)
332
+ end
333
+ end
334
+
335
+ # forcibly terminate all workers that haven't checked in in @timeout
336
+ # seconds. The timeout is implemented using an unlinked tempfile
337
+ # shared between the parent process and each worker. The worker
338
+ # runs File#chmod to modify the ctime of the tempfile. If the ctime
339
+ # is stale for >@timeout seconds, then we'll kill the corresponding
340
+ # worker.
341
+ def murder_lazy_workers
342
+ now = Time.now
343
+ @workers.each_pair do |pid, worker|
344
+ (now - worker.tempfile.ctime) <= @timeout and next
345
+ logger.error "worker=#{worker.nr} PID:#{pid} is too old, killing"
346
+ kill_worker('KILL', pid) # take no prisoners for @timeout violations
347
+ worker.tempfile.close rescue nil
348
+ end
349
+ end
350
+
351
+ def spawn_missing_workers
352
+ return if @workers.size == @worker_processes
353
+ (0...@worker_processes).each do |worker_nr|
354
+ @workers.values.include?(worker_nr) and next
355
+ tempfile = Tempfile.new('') # as short as possible to save dir space
356
+ tempfile.unlink # don't allow other processes to find or see it
357
+ tempfile.sync = true
358
+ worker = Worker.new(worker_nr, tempfile)
359
+ @before_fork.call(self, worker.nr)
360
+ pid = fork { worker_loop(worker) }
361
+ @workers[pid] = worker
362
+ end
363
+ end
364
+
365
+ # once a client is accepted, it is processed in its entirety here
366
+ # in 3 easy steps: read request, call app, write app response
367
+ def process_client(client)
368
+ env = @request.read(client) or return
369
+ app_response = @app.call(env)
370
+ HttpResponse.write(client, app_response)
371
+ rescue EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
372
+ client.closed? or client.close rescue nil
373
+ rescue Object => e
374
+ logger.error "Read error: #{e.inspect}"
375
+ logger.error e.backtrace.join("\n")
376
+ ensure
377
+ begin
378
+ client.closed? or client.close
379
+ rescue Object => e
380
+ logger.error "Client error: #{e.inspect}"
381
+ logger.error e.backtrace.join("\n")
382
+ end
383
+ @request.reset
384
+ end
385
+
386
+ # gets rid of stuff the worker has no business keeping track of
387
+ # to free some resources and drops all sig handlers.
388
+ # traps for USR1, USR2, and HUP may be set in the @after_fork Proc
389
+ # by the user.
390
+ def init_worker_process(worker)
391
+ build_app! unless @preload_app
392
+ TRAP_SIGS.each { |sig| trap(sig, 'IGNORE') }
393
+ trap('CHLD', 'DEFAULT')
394
+ trap('USR1') do
395
+ @logger.info "worker=#{worker.nr} rotating logs..."
396
+ Unicorn::Util.reopen_logs
397
+ @logger.info "worker=#{worker.nr} done rotating logs"
398
+ end
399
+
400
+ $0 = "unicorn worker[#{worker.nr}]"
401
+ @rd_sig.close if @rd_sig
402
+ @wr_sig.close if @wr_sig
403
+ @workers.values.each { |other| other.tempfile.close rescue nil }
404
+ @workers.clear
405
+ @start_ctx.clear
406
+ @mode = @start_ctx = @workers = @rd_sig = @wr_sig = nil
407
+ @listeners.each { |sock| set_cloexec(sock) }
408
+ ENV.delete('UNICORN_FD')
409
+ @after_fork.call(self, worker.nr) if @after_fork
410
+ @request = HttpRequest.new(logger)
411
+ end
412
+
413
+ # runs inside each forked worker, this sits around and waits
414
+ # for connections and doesn't die until the parent dies (or is
415
+ # given a INT, QUIT, or TERM signal)
416
+ def worker_loop(worker)
417
+ init_worker_process(worker)
418
+ nr = 0
419
+ tempfile = worker.tempfile
420
+ alive = true
421
+ ready = @listeners
422
+ client = nil
423
+ %w(TERM INT).each { |sig| trap(sig) { exit(0) } } # instant shutdown
424
+ trap('QUIT') do
425
+ alive = false
426
+ @listeners.each { |sock| sock.close rescue nil } # break IO.select
427
+ end
428
+
429
+ while alive && @master_pid == ppid
430
+ # we're a goner in @timeout seconds anyways if tempfile.chmod
431
+ # breaks, so don't trap the exception. Using fchmod() since
432
+ # futimes() is not available in base Ruby and I very strongly
433
+ # prefer temporary files to be unlinked for security,
434
+ # performance and reliability reasons, so utime is out. No-op
435
+ # changes with chmod doesn't update ctime on all filesystems; so
436
+ # we increment our counter each and every time.
437
+ tempfile.chmod(nr += 1)
438
+
439
+ begin
440
+ accepted = false
441
+ ready.each do |sock|
442
+ begin
443
+ client = begin
444
+ sock.accept_nonblock
445
+ rescue Errno::EAGAIN
446
+ next
447
+ end
448
+ accepted = client.sync = true
449
+ client.nonblock = false
450
+ set_client_sockopt(client) if TCPSocket === client
451
+ process_client(client)
452
+ rescue Errno::ECONNABORTED
453
+ # client closed the socket even before accept
454
+ if client && !client.closed?
455
+ client.close rescue nil
456
+ end
457
+ end
458
+ tempfile.chmod(nr += 1)
459
+ end
460
+ client = nil
461
+
462
+ # make the following bet: if we accepted clients this round,
463
+ # we're probably reasonably busy, so avoid calling select(2)
464
+ # and try to do a blind non-blocking accept(2) on everything
465
+ # before we sleep again in select
466
+ if accepted
467
+ ready = @listeners
468
+ else
469
+ begin
470
+ tempfile.chmod(nr += 1)
471
+ # timeout used so we can detect parent death:
472
+ ret = IO.select(@listeners, nil, nil, @timeout/2.0) or next
473
+ ready = ret[0]
474
+ rescue Errno::EINTR
475
+ ready = @listeners
476
+ rescue Errno::EBADF => e
477
+ exit(alive ? 1 : 0)
478
+ end
479
+ end
480
+ rescue SystemExit => e
481
+ exit(e.status)
482
+ rescue Object => e
483
+ if alive
484
+ logger.error "Unhandled listen loop exception #{e.inspect}."
485
+ logger.error e.backtrace.join("\n")
486
+ end
487
+ end
488
+ end
489
+ end
490
+
491
+ # delivers a signal to a worker and fails gracefully if the worker
492
+ # is no longer running.
493
+ def kill_worker(signal, pid)
494
+ begin
495
+ kill(signal, pid)
496
+ rescue Errno::ESRCH
497
+ worker = @workers.delete(pid) and worker.tempfile.close rescue nil
498
+ end
499
+ end
500
+
501
+ # delivers a signal to each worker
502
+ def kill_each_worker(signal)
503
+ @workers.keys.each { |pid| kill_worker(signal, pid) }
504
+ end
505
+
506
+ # unlinks a PID file at given +path+ if it contains the current PID
507
+ # useful as an at_exit handler.
508
+ def unlink_pid_safe(path)
509
+ (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
510
+ end
511
+
512
+ # returns a PID if a given path contains a non-stale PID file,
513
+ # nil otherwise.
514
+ def valid_pid?(path)
515
+ if File.exist?(path) && (pid = File.read(path).to_i) > 1
516
+ begin
517
+ kill(0, pid)
518
+ return pid
519
+ rescue Errno::ESRCH
520
+ end
521
+ end
522
+ nil
523
+ end
524
+
525
+ def load_config!
526
+ begin
527
+ logger.info "reloading config_file=#{@config.config_file}"
528
+ @config.reload
529
+ @config.commit!(self)
530
+ kill_each_worker('QUIT')
531
+ logger.info "done reloading config_file=#{@config.config_file}"
532
+ rescue Object => e
533
+ logger.error "error reloading config_file=#{@config.config_file}: " \
534
+ "#{e.class} #{e.message}"
535
+ end
536
+ end
537
+
538
+ # returns an array of string names for the given listener array
539
+ def listener_names(listeners = @listeners)
540
+ listeners.map { |io| sock_name(io) }
541
+ end
542
+
543
+ def build_app!
544
+ @app = @app.call if @app.respond_to?(:arity) && @app.arity == 0
545
+ end
546
+
547
+ end
548
+ end