defmacro-unicorn 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/unicorn.rb ADDED
@@ -0,0 +1,741 @@
1
+ # -*- encoding: binary -*-
2
+
3
+ require 'fcntl'
4
+ require 'unicorn/socket_helper'
5
+ require 'etc'
6
+
7
+ # Unicorn module containing all of the classes (include C extensions) for running
8
+ # a Unicorn web server. It contains a minimalist server with just enough
9
+ # functionality to service application requests fast as possible.
10
+ module Unicorn
11
+
12
+ autoload :Const, 'unicorn/const'
13
+ autoload :Configurator, 'unicorn/configurator'
14
+ autoload :Util, 'unicorn/util'
15
+
16
+ class << self
17
+ def run(options = {})
18
+ Server.new(options).start.join
19
+ end
20
+ end
21
+
22
+ # This is the process manager of Unicorn. This manages worker
23
+ # processes which in turn handle the I/O and application process.
24
+ # Listener sockets are started in the master process and shared with
25
+ # forked worker children.
26
+
27
+ class Server < Struct.new(:app, :timeout, :worker_processes,
28
+ :before_fork, :after_fork, :before_exec,
29
+ :logger, :pid, :listener_opts, :preload_app,
30
+ :reexec_pid, :orig_app, :init_listeners,
31
+ :master_pid, :config, :ready_pipe, :user)
32
+ include ::Unicorn::SocketHelper
33
+
34
+ # prevents IO objects in here from being GC-ed
35
+ IO_PURGATORY = []
36
+
37
+ # all bound listener sockets
38
+ LISTENERS = []
39
+
40
+ # This hash maps PIDs to Workers
41
+ WORKERS = {}
42
+
43
+ # We use SELF_PIPE differently in the master and worker processes:
44
+ #
45
+ # * The master process never closes or reinitializes this once
46
+ # initialized. Signal handlers in the master process will write to
47
+ # it to wake up the master from IO.select in exactly the same manner
48
+ # djb describes in http://cr.yp.to/docs/selfpipe.html
49
+ #
50
+ # * The workers immediately close the pipe they inherit from the
51
+ # master and replace it with a new pipe after forking. This new
52
+ # pipe is also used to wakeup from IO.select from inside (worker)
53
+ # signal handlers. However, workers *close* the pipe descriptors in
54
+ # the signal handlers to raise EBADF in IO.select instead of writing
55
+ # like we do in the master. We cannot easily use the reader set for
56
+ # IO.select because LISTENERS is already that set, and it's extra
57
+ # work (and cycles) to distinguish the pipe FD from the reader set
58
+ # once IO.select returns. So we're lazy and just close the pipe when
59
+ # a (rare) signal arrives in the worker and reinitialize the pipe later.
60
+ SELF_PIPE = []
61
+
62
+ # signal queue used for self-piping
63
+ SIG_QUEUE = []
64
+
65
+ # We populate this at startup so we can figure out how to reexecute
66
+ # and upgrade the currently running instance of Unicorn
67
+ # This Hash is considered a stable interface and changing its contents
68
+ # will allow you to switch between different installations of Unicorn
69
+ # or even different installations of the same applications without
70
+ # downtime. Keys of this constant Hash are described as follows:
71
+ #
72
+ # * 0 - the path to the unicorn/unicorn_rails executable
73
+ # * :argv - a deep copy of the ARGV array the executable originally saw
74
+ # * :cwd - the working directory of the application, this is where
75
+ # you originally started Unicorn.
76
+ #
77
+ # The following example may be used in your Unicorn config file to
78
+ # change your working directory during a config reload (HUP) without
79
+ # upgrading or restarting:
80
+ #
81
+ # Dir.chdir(Unicorn::Server::START_CTX[:cwd] = path)
82
+ #
83
+ # To change your unicorn executable to a different path without downtime,
84
+ # you can set the following in your Unicorn config file, HUP and then
85
+ # continue with the traditional USR2 + QUIT upgrade steps:
86
+ #
87
+ # Unicorn::Server::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
88
+ START_CTX = {
89
+ :argv => ARGV.map { |arg| arg.dup },
90
+ :cwd => lambda {
91
+ # favor ENV['PWD'] since it is (usually) symlink aware for
92
+ # Capistrano and like systems
93
+ begin
94
+ a = File.stat(pwd = ENV['PWD'])
95
+ b = File.stat(Dir.pwd)
96
+ a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
97
+ rescue
98
+ Dir.pwd
99
+ end
100
+ }.call,
101
+ 0 => $0.dup,
102
+ }
103
+
104
+ # This class and its members can be considered a stable interface
105
+ # and will not change in a backwards-incompatible fashion between
106
+ # releases of Unicorn. You may need to access it in the
107
+ # before_fork/after_fork hooks. See the Unicorn::Configurator RDoc
108
+ # for examples.
109
+ class Worker < Struct.new(:nr, :tmp, :switched)
110
+
111
+ # worker objects may be compared to just plain numbers
112
+ def ==(other_nr)
113
+ self.nr == other_nr
114
+ end
115
+
116
+ # Changes the worker process to the specified +user+ and +group+
117
+ # This is only intended to be called from within the worker
118
+ # process from the +after_fork+ hook. This should be called in
119
+ # the +after_fork+ hook after any priviledged functions need to be
120
+ # run (e.g. to set per-worker CPU affinity, niceness, etc)
121
+ #
122
+ # Any and all errors raised within this method will be propagated
123
+ # directly back to the caller (usually the +after_fork+ hook.
124
+ # These errors commonly include ArgumentError for specifying an
125
+ # invalid user/group and Errno::EPERM for insufficient priviledges
126
+ def user(user, group = nil)
127
+ # we do not protect the caller, checking Process.euid == 0 is
128
+ # insufficient because modern systems have fine-grained
129
+ # capabilities. Let the caller handle any and all errors.
130
+ uid = Etc.getpwnam(user).uid
131
+ gid = Etc.getgrnam(group).gid if group
132
+ Unicorn::Util.chown_logs(uid, gid)
133
+ tmp.chown(uid, gid)
134
+ if gid && Process.egid != gid
135
+ Process.initgroups(user, gid)
136
+ Process::GID.change_privilege(gid)
137
+ end
138
+ Process.euid != uid and Process::UID.change_privilege(uid)
139
+ self.switched = true
140
+ end
141
+
142
+ end
143
+
144
+ # Creates a working server on host:port (strange things happen if
145
+ # port isn't a Number). Use Server::run to start the server and
146
+ # Server.run.join to join the thread that's processing
147
+ # incoming requests on the socket.
148
+ def initialize(options = {})
149
+ self.app = options.delete(:app)
150
+ self.reexec_pid = 0
151
+ self.ready_pipe = options.delete(:ready_pipe)
152
+ self.init_listeners = options[:listeners] ? options[:listeners].dup : []
153
+ self.config = Configurator.new(options.merge(:use_defaults => true))
154
+ self.listener_opts = {}
155
+
156
+ # we try inheriting listeners first, so we bind them later.
157
+ # we don't write the pid file until we've bound listeners in case
158
+ # unicorn was started twice by mistake. Even though our #pid= method
159
+ # checks for stale/existing pid files, race conditions are still
160
+ # possible (and difficult/non-portable to avoid) and can be likely
161
+ # to clobber the pid if the second start was in quick succession
162
+ # after the first, so we rely on the listener binding to fail in
163
+ # that case. Some tests (in and outside of this source tree) and
164
+ # monitoring tools may also rely on pid files existing before we
165
+ # attempt to connect to the listener(s)
166
+ config.commit!(self, :skip => [:listeners, :pid])
167
+ self.orig_app = app
168
+ end
169
+
170
+ # Runs the thing. Returns self so you can run join on it
171
+ def start
172
+ BasicSocket.do_not_reverse_lookup = true
173
+
174
+ # inherit sockets from parents, they need to be plain Socket objects
175
+ # before they become UNIXServer or TCPServer
176
+ inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
177
+ io = Socket.for_fd(fd.to_i)
178
+ set_server_sockopt(io, listener_opts[sock_name(io)])
179
+ IO_PURGATORY << io
180
+ logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
181
+ server_cast(io)
182
+ end
183
+
184
+ config_listeners = config[:listeners].dup
185
+ LISTENERS.replace(inherited)
186
+
187
+ # we start out with generic Socket objects that get cast to either
188
+ # TCPServer or UNIXServer objects; but since the Socket objects
189
+ # share the same OS-level file descriptor as the higher-level *Server
190
+ # objects; we need to prevent Socket objects from being garbage-collected
191
+ config_listeners -= listener_names
192
+ if config_listeners.empty? && LISTENERS.empty?
193
+ config_listeners << Unicorn::Const::DEFAULT_LISTEN
194
+ init_listeners << Unicorn::Const::DEFAULT_LISTEN
195
+ START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
196
+ end
197
+ config_listeners.each { |addr| listen(addr) }
198
+ raise ArgumentError, "no listeners" if LISTENERS.empty?
199
+
200
+ # this pipe is used to wake us up from select(2) in #join when signals
201
+ # are trapped. See trap_deferred.
202
+ init_self_pipe!
203
+
204
+ # setup signal handlers before writing pid file in case people get
205
+ # trigger happy and send signals as soon as the pid file exists.
206
+ # Note that signals don't actually get handled until the #join method
207
+ QUEUE_SIGS.each { |sig| trap_deferred(sig) }
208
+ trap(:CHLD) { |_| awaken_master }
209
+ self.pid = config[:pid]
210
+
211
+ self.master_pid = $$
212
+ build_app! if preload_app
213
+ maintain_worker_count
214
+ self
215
+ end
216
+
217
+ # replaces current listener set with +listeners+. This will
218
+ # close the socket if it will not exist in the new listener set
219
+ def listeners=(listeners)
220
+ cur_names, dead_names = [], []
221
+ listener_names.each do |name|
222
+ if ?/ == name[0]
223
+ # mark unlinked sockets as dead so we can rebind them
224
+ (File.socket?(name) ? cur_names : dead_names) << name
225
+ else
226
+ cur_names << name
227
+ end
228
+ end
229
+ set_names = listener_names(listeners)
230
+ dead_names.concat(cur_names - set_names).uniq!
231
+
232
+ LISTENERS.delete_if do |io|
233
+ if dead_names.include?(sock_name(io))
234
+ IO_PURGATORY.delete_if do |pio|
235
+ pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
236
+ end
237
+ (io.close rescue nil).nil? # true
238
+ else
239
+ set_server_sockopt(io, listener_opts[sock_name(io)])
240
+ false
241
+ end
242
+ end
243
+
244
+ (set_names - cur_names).each { |addr| listen(addr) }
245
+ end
246
+
247
+ def stdout_path=(path); redirect_io($stdout, path); end
248
+ def stderr_path=(path); redirect_io($stderr, path); end
249
+
250
+ # sets the path for the PID file of the master process
251
+ def pid=(path)
252
+ if path
253
+ if x = valid_pid?(path)
254
+ return path if pid && path == pid && x == $$
255
+ raise ArgumentError, "Already running on PID:#{x} " \
256
+ "(or pid=#{path} is stale)"
257
+ end
258
+ end
259
+ unlink_pid_safe(pid) if pid
260
+
261
+ if path
262
+ fp = begin
263
+ tmp = "#{File.dirname(path)}/#{rand}.#$$"
264
+ File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
265
+ rescue Errno::EEXIST
266
+ retry
267
+ end
268
+ fp.syswrite("#$$\n")
269
+ File.rename(fp.path, path)
270
+ fp.close
271
+ end
272
+ super(path)
273
+ end
274
+
275
+ # add a given address to the +listeners+ set, idempotently
276
+ # Allows workers to add a private, per-process listener via the
277
+ # after_fork hook. Very useful for debugging and testing.
278
+ # +:tries+ may be specified as an option for the number of times
279
+ # to retry, and +:delay+ may be specified as the time in seconds
280
+ # to delay between retries.
281
+ # A negative value for +:tries+ indicates the listen will be
282
+ # retried indefinitely, this is useful when workers belonging to
283
+ # different masters are spawned during a transparent upgrade.
284
+ def listen(address, opt = {}.merge(listener_opts[address] || {}))
285
+ address = config.expand_addr(address)
286
+ return if String === address && listener_names.include?(address)
287
+
288
+ delay = opt[:delay] || 0.5
289
+ tries = opt[:tries] || 5
290
+ begin
291
+ io = bind_listen(address, opt)
292
+ unless TCPServer === io || UNIXServer === io
293
+ IO_PURGATORY << io
294
+ io = server_cast(io)
295
+ end
296
+ logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
297
+ LISTENERS << io
298
+ io
299
+ rescue Errno::EADDRINUSE => err
300
+ logger.error "adding listener failed addr=#{address} (in use)"
301
+ raise err if tries == 0
302
+ tries -= 1
303
+ logger.error "retrying in #{delay} seconds " \
304
+ "(#{tries < 0 ? 'infinite' : tries} tries left)"
305
+ sleep(delay)
306
+ retry
307
+ rescue => err
308
+ logger.fatal "error adding listener addr=#{address}"
309
+ raise err
310
+ end
311
+ end
312
+
313
+ # monitors children and receives signals forever
314
+ # (or until a termination signal is sent). This handles signals
315
+ # one-at-a-time time and we'll happily drop signals in case somebody
316
+ # is signalling us too often.
317
+ def join
318
+ respawn = true
319
+ last_check = Time.now
320
+
321
+ proc_name 'master'
322
+ logger.info "master process ready" # test_exec.rb relies on this message
323
+ if ready_pipe
324
+ ready_pipe.syswrite($$.to_s)
325
+ ready_pipe.close rescue nil
326
+ self.ready_pipe = nil
327
+ end
328
+ begin
329
+ loop do
330
+ reap_all_workers
331
+ case SIG_QUEUE.shift
332
+ when nil
333
+ # avoid murdering workers after our master process (or the
334
+ # machine) comes out of suspend/hibernation
335
+ if (last_check + timeout) >= (last_check = Time.now)
336
+ murder_lazy_workers
337
+ end
338
+ maintain_worker_count if respawn
339
+ master_sleep
340
+ when :QUIT # graceful shutdown
341
+ break
342
+ when :TERM, :INT # immediate shutdown
343
+ stop(false)
344
+ break
345
+ when :USR1 # rotate logs
346
+ logger.info "master reopening logs..."
347
+ Unicorn::Util.reopen_logs
348
+ logger.info "master done reopening logs"
349
+ kill_each_worker(:USR1)
350
+ when :USR2 # exec binary, stay alive in case something went wrong
351
+ reexec
352
+ when :WINCH
353
+ if Process.ppid == 1 || Process.getpgrp != $$
354
+ respawn = false
355
+ logger.info "gracefully stopping all workers"
356
+ kill_each_worker(:QUIT)
357
+ else
358
+ logger.info "SIGWINCH ignored because we're not daemonized"
359
+ end
360
+ when :TTIN
361
+ self.worker_processes += 1
362
+ when :TTOU
363
+ self.worker_processes -= 1 if self.worker_processes > 0
364
+ when :HUP
365
+ respawn = true
366
+ if config.config_file
367
+ load_config!
368
+ redo # immediate reaping since we may have QUIT workers
369
+ else # exec binary and exit if there's no config file
370
+ logger.info "config_file not present, reexecuting binary"
371
+ reexec
372
+ break
373
+ end
374
+ end
375
+ end
376
+ rescue Errno::EINTR
377
+ retry
378
+ rescue => e
379
+ logger.error "Unhandled master loop exception #{e.inspect}."
380
+ logger.error e.backtrace.join("\n")
381
+ retry
382
+ end
383
+ stop # gracefully shutdown all workers on our way out
384
+ logger.info "master complete"
385
+ unlink_pid_safe(pid) if pid
386
+ end
387
+
388
+ # Terminates all workers, but does not exit master process
389
+ def stop(graceful = true)
390
+ self.listeners = []
391
+ limit = Time.now + timeout
392
+ until WORKERS.empty? || Time.now > limit
393
+ kill_each_worker(graceful ? :QUIT : :TERM)
394
+ sleep(0.1)
395
+ reap_all_workers
396
+ end
397
+ kill_each_worker(:KILL)
398
+ end
399
+
400
+ private
401
+
402
+ # list of signals we care about and trap in master.
403
+ QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
404
+ :TTIN, :TTOU ]
405
+
406
+ # defer a signal for later processing in #join (master process)
407
+ def trap_deferred(signal)
408
+ trap(signal) do |sig_nr|
409
+ if SIG_QUEUE.size < 5
410
+ SIG_QUEUE << signal
411
+ awaken_master
412
+ else
413
+ logger.error "ignoring SIG#{signal}, queue=#{SIG_QUEUE.inspect}"
414
+ end
415
+ end
416
+ end
417
+
418
+ # wait for a signal hander to wake us up and then consume the pipe
419
+ # Wake up every second anyways to run murder_lazy_workers
420
+ def master_sleep
421
+ begin
422
+ ready = IO.select([SELF_PIPE.first], nil, nil, 1) or return
423
+ ready.first && ready.first.first or return
424
+ loop { SELF_PIPE.first.read_nonblock(Const::CHUNK_SIZE) }
425
+ rescue Errno::EAGAIN, Errno::EINTR
426
+ end
427
+ end
428
+
429
+ def awaken_master
430
+ begin
431
+ SELF_PIPE.last.write_nonblock('.') # wakeup master process from select
432
+ rescue Errno::EAGAIN, Errno::EINTR
433
+ # pipe is full, master should wake up anyways
434
+ retry
435
+ end
436
+ end
437
+
438
+ # reaps all unreaped workers
439
+ def reap_all_workers
440
+ begin
441
+ loop do
442
+ wpid, status = Process.waitpid2(-1, Process::WNOHANG)
443
+ wpid or break
444
+ if reexec_pid == wpid
445
+ logger.error "reaped #{status.inspect} exec()-ed"
446
+ self.reexec_pid = 0
447
+ self.pid = pid.chomp('.oldbin') if pid
448
+ proc_name 'master'
449
+ else
450
+ worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
451
+ logger.info "reaped #{status.inspect} " \
452
+ "worker=#{worker.nr rescue 'unknown'}"
453
+ end
454
+ end
455
+ rescue Errno::ECHILD
456
+ end
457
+ end
458
+
459
+ # reexecutes the START_CTX with a new binary
460
+ def reexec
461
+ if reexec_pid > 0
462
+ begin
463
+ Process.kill(0, reexec_pid)
464
+ logger.error "reexec-ed child already running PID:#{reexec_pid}"
465
+ return
466
+ rescue Errno::ESRCH
467
+ self.reexec_pid = 0
468
+ end
469
+ end
470
+
471
+ if pid
472
+ old_pid = "#{pid}.oldbin"
473
+ prev_pid = pid.dup
474
+ begin
475
+ self.pid = old_pid # clear the path for a new pid file
476
+ rescue ArgumentError
477
+ logger.error "old PID:#{valid_pid?(old_pid)} running with " \
478
+ "existing pid=#{old_pid}, refusing rexec"
479
+ return
480
+ rescue => e
481
+ logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
482
+ return
483
+ end
484
+ end
485
+
486
+ self.reexec_pid = fork do
487
+ listener_fds = LISTENERS.map { |sock| sock.fileno }
488
+ ENV['UNICORN_FD'] = listener_fds.join(',')
489
+ Dir.chdir(START_CTX[:cwd])
490
+ cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
491
+
492
+ # avoid leaking FDs we don't know about, but let before_exec
493
+ # unset FD_CLOEXEC, if anything else in the app eventually
494
+ # relies on FD inheritence.
495
+ (3..1024).each do |io|
496
+ next if listener_fds.include?(io)
497
+ io = IO.for_fd(io) rescue nil
498
+ io or next
499
+ IO_PURGATORY << io
500
+ io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
501
+ end
502
+ logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
503
+ before_exec.call(self)
504
+ exec(*cmd)
505
+ end
506
+ proc_name 'master (old)'
507
+ end
508
+
509
+ # forcibly terminate all workers that haven't checked in in timeout
510
+ # seconds. The timeout is implemented using an unlinked File
511
+ # shared between the parent process and each worker. The worker
512
+ # runs File#chmod to modify the ctime of the File. If the ctime
513
+ # is stale for >timeout seconds, then we'll kill the corresponding
514
+ # worker.
515
+ def murder_lazy_workers
516
+ WORKERS.dup.each_pair do |wpid, worker|
517
+ stat = worker.tmp.stat
518
+ # skip workers that disable fchmod or have never fchmod-ed
519
+ stat.mode == 0100600 and next
520
+ (diff = (Time.now - stat.ctime)) <= timeout and next
521
+ logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
522
+ "(#{diff}s > #{timeout}s), killing"
523
+ kill_worker(:KILL, wpid) # take no prisoners for timeout violations
524
+ end
525
+ end
526
+
527
+ def spawn_missing_workers
528
+ (0...worker_processes).each do |worker_nr|
529
+ WORKERS.values.include?(worker_nr) and next
530
+ worker = Worker.new(worker_nr, Unicorn::Util.tmpio)
531
+ before_fork.call(self, worker)
532
+ WORKERS[fork {
533
+ ready_pipe.close if ready_pipe
534
+ self.ready_pipe = nil
535
+ worker_loop(worker)
536
+ }] = worker
537
+ end
538
+ end
539
+
540
+ def maintain_worker_count
541
+ (off = WORKERS.size - worker_processes) == 0 and return
542
+ off < 0 and return spawn_missing_workers
543
+ WORKERS.dup.each_pair { |wpid,w|
544
+ w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
545
+ }
546
+ end
547
+
548
+ # once a client is accepted, it is processed in its entirety here
549
+ # in 3 easy steps: read request, call app, write app response
550
+ def process_client(client)
551
+ client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
552
+ app.call(client)
553
+ end
554
+
555
+ # gets rid of stuff the worker has no business keeping track of
556
+ # to free some resources and drops all sig handlers.
557
+ # traps for USR1, USR2, and HUP may be set in the after_fork Proc
558
+ # by the user.
559
+ def init_worker_process(worker)
560
+ QUEUE_SIGS.each { |sig| trap(sig, nil) }
561
+ trap(:CHLD, 'DEFAULT')
562
+ SIG_QUEUE.clear
563
+ proc_name "work[#{worker.nr}]"
564
+ START_CTX.clear
565
+ init_self_pipe!
566
+ WORKERS.values.each { |other| other.tmp.close rescue nil }
567
+ WORKERS.clear
568
+ LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
569
+ worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
570
+ after_fork.call(self, worker) # can drop perms
571
+ worker.user(*user) if user.kind_of?(Array) && ! worker.switched
572
+ self.timeout /= 2.0 # halve it for select()
573
+ build_app! unless preload_app
574
+ end
575
+
576
+ def reopen_worker_logs(worker_nr)
577
+ logger.info "worker=#{worker_nr} reopening logs..."
578
+ Unicorn::Util.reopen_logs
579
+ logger.info "worker=#{worker_nr} done reopening logs"
580
+ init_self_pipe!
581
+ end
582
+
583
+ # runs inside each forked worker, this sits around and waits
584
+ # for connections and doesn't die until the parent dies (or is
585
+ # given a INT, QUIT, or TERM signal)
586
+ def worker_loop(worker)
587
+ ppid = master_pid
588
+ init_worker_process(worker)
589
+ nr = 0 # this becomes negative if we need to reopen logs
590
+ alive = worker.tmp # tmp is our lifeline to the master process
591
+ ready = LISTENERS
592
+
593
+ # closing anything we IO.select on will raise EBADF
594
+ trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil }
595
+ trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
596
+ [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
597
+ logger.info "worker=#{worker.nr} ready"
598
+ m = 0
599
+
600
+ begin
601
+ nr < 0 and reopen_worker_logs(worker.nr)
602
+ nr = 0
603
+
604
+ # we're a goner in timeout seconds anyways if alive.chmod
605
+ # breaks, so don't trap the exception. Using fchmod() since
606
+ # futimes() is not available in base Ruby and I very strongly
607
+ # prefer temporary files to be unlinked for security,
608
+ # performance and reliability reasons, so utime is out. No-op
609
+ # changes with chmod doesn't update ctime on all filesystems; so
610
+ # we change our counter each and every time (after process_client
611
+ # and before IO.select).
612
+ alive.chmod(m = 0 == m ? 1 : 0)
613
+
614
+ ready.each do |sock|
615
+ begin
616
+ process_client(sock.accept_nonblock)
617
+ nr += 1
618
+ alive.chmod(m = 0 == m ? 1 : 0)
619
+ rescue Errno::EAGAIN, Errno::ECONNABORTED
620
+ end
621
+ break if nr < 0
622
+ end
623
+
624
+ # make the following bet: if we accepted clients this round,
625
+ # we're probably reasonably busy, so avoid calling select()
626
+ # and do a speculative accept_nonblock on ready listeners
627
+ # before we sleep again in select().
628
+ redo unless nr == 0 # (nr < 0) => reopen logs
629
+
630
+ ppid == Process.ppid or return
631
+ alive.chmod(m = 0 == m ? 1 : 0)
632
+ begin
633
+ # timeout used so we can detect parent death:
634
+ ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
635
+ ready = ret.first
636
+ rescue Errno::EINTR
637
+ ready = LISTENERS
638
+ rescue Errno::EBADF
639
+ nr < 0 or return
640
+ end
641
+ rescue => e
642
+ if alive
643
+ logger.error "Unhandled listen loop exception #{e.inspect}."
644
+ logger.error e.backtrace.join("\n")
645
+ end
646
+ end while alive
647
+ end
648
+
649
+ # delivers a signal to a worker and fails gracefully if the worker
650
+ # is no longer running.
651
+ def kill_worker(signal, wpid)
652
+ begin
653
+ Process.kill(signal, wpid)
654
+ rescue Errno::ESRCH
655
+ worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
656
+ end
657
+ end
658
+
659
+ # delivers a signal to each worker
660
+ def kill_each_worker(signal)
661
+ WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
662
+ end
663
+
664
+ # unlinks a PID file at given +path+ if it contains the current PID
665
+ # still potentially racy without locking the directory (which is
666
+ # non-portable and may interact badly with other programs), but the
667
+ # window for hitting the race condition is small
668
+ def unlink_pid_safe(path)
669
+ (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
670
+ end
671
+
672
+ # returns a PID if a given path contains a non-stale PID file,
673
+ # nil otherwise.
674
+ def valid_pid?(path)
675
+ wpid = File.read(path).to_i
676
+ wpid <= 0 and return nil
677
+ begin
678
+ Process.kill(0, wpid)
679
+ wpid
680
+ rescue Errno::ESRCH
681
+ # don't unlink stale pid files, racy without non-portable locking...
682
+ end
683
+ rescue Errno::ENOENT
684
+ end
685
+
686
+ def load_config!
687
+ loaded_app = app
688
+ begin
689
+ logger.info "reloading config_file=#{config.config_file}"
690
+ config[:listeners].replace(init_listeners)
691
+ config.reload
692
+ config.commit!(self)
693
+ kill_each_worker(:QUIT)
694
+ Unicorn::Util.reopen_logs
695
+ self.app = orig_app
696
+ build_app! if preload_app
697
+ logger.info "done reloading config_file=#{config.config_file}"
698
+ rescue StandardError, LoadError, SyntaxError => e
699
+ logger.error "error reloading config_file=#{config.config_file}: " \
700
+ "#{e.class} #{e.message}"
701
+ self.app = loaded_app
702
+ end
703
+ end
704
+
705
+ # returns an array of string names for the given listener array
706
+ def listener_names(listeners = LISTENERS)
707
+ listeners.map { |io| sock_name(io) }
708
+ end
709
+
710
+ def build_app!
711
+ if app.respond_to?(:arity) && app.arity == 0
712
+ # exploit COW in case of preload_app. Also avoids race
713
+ # conditions in Rainbows! since load/require are not thread-safe
714
+ Unicorn.constants.each { |x| Unicorn.const_get(x) }
715
+
716
+ if defined?(Gem) && Gem.respond_to?(:refresh)
717
+ logger.info "Refreshing Gem list"
718
+ Gem.refresh
719
+ end
720
+ self.app = app.call
721
+ end
722
+ end
723
+
724
+ def proc_name(tag)
725
+ $0 = ([ File.basename(START_CTX[0]), tag
726
+ ]).concat(START_CTX[:argv]).join(' ')
727
+ end
728
+
729
+ def redirect_io(io, path)
730
+ File.open(path, 'ab') { |fp| io.reopen(fp) } if path
731
+ io.sync = true
732
+ end
733
+
734
+ def init_self_pipe!
735
+ SELF_PIPE.each { |io| io.close rescue nil }
736
+ SELF_PIPE.replace(IO.pipe)
737
+ SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
738
+ end
739
+
740
+ end
741
+ end