resque-master 0.0.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/HISTORY.md +488 -0
- data/LICENSE +20 -0
- data/README.markdown +920 -0
- data/Rakefile +57 -0
- data/bin/resque +81 -0
- data/bin/resque-web +31 -0
- data/lib/resque.rb +578 -0
- data/lib/resque/data_store.rb +326 -0
- data/lib/resque/errors.rb +21 -0
- data/lib/resque/failure.rb +119 -0
- data/lib/resque/failure/airbrake.rb +33 -0
- data/lib/resque/failure/base.rb +73 -0
- data/lib/resque/failure/multiple.rb +68 -0
- data/lib/resque/failure/redis.rb +128 -0
- data/lib/resque/failure/redis_multi_queue.rb +104 -0
- data/lib/resque/helpers.rb +48 -0
- data/lib/resque/job.rb +296 -0
- data/lib/resque/log_formatters/quiet_formatter.rb +7 -0
- data/lib/resque/log_formatters/verbose_formatter.rb +7 -0
- data/lib/resque/log_formatters/very_verbose_formatter.rb +8 -0
- data/lib/resque/logging.rb +18 -0
- data/lib/resque/plugin.rb +78 -0
- data/lib/resque/server.rb +299 -0
- data/lib/resque/server/helpers.rb +64 -0
- data/lib/resque/server/public/favicon.ico +0 -0
- data/lib/resque/server/public/idle.png +0 -0
- data/lib/resque/server/public/jquery-1.12.4.min.js +5 -0
- data/lib/resque/server/public/jquery.relatize_date.js +95 -0
- data/lib/resque/server/public/poll.png +0 -0
- data/lib/resque/server/public/ranger.js +78 -0
- data/lib/resque/server/public/reset.css +44 -0
- data/lib/resque/server/public/style.css +91 -0
- data/lib/resque/server/public/working.png +0 -0
- data/lib/resque/server/test_helper.rb +19 -0
- data/lib/resque/server/views/error.erb +1 -0
- data/lib/resque/server/views/failed.erb +29 -0
- data/lib/resque/server/views/failed_job.erb +50 -0
- data/lib/resque/server/views/failed_queues_overview.erb +24 -0
- data/lib/resque/server/views/key_sets.erb +17 -0
- data/lib/resque/server/views/key_string.erb +11 -0
- data/lib/resque/server/views/layout.erb +44 -0
- data/lib/resque/server/views/next_more.erb +22 -0
- data/lib/resque/server/views/overview.erb +4 -0
- data/lib/resque/server/views/queues.erb +58 -0
- data/lib/resque/server/views/stats.erb +62 -0
- data/lib/resque/server/views/workers.erb +111 -0
- data/lib/resque/server/views/working.erb +72 -0
- data/lib/resque/stat.rb +58 -0
- data/lib/resque/tasks.rb +72 -0
- data/lib/resque/thread_signal.rb +45 -0
- data/lib/resque/vendor/utf8_util.rb +26 -0
- data/lib/resque/vendor/utf8_util/utf8_util_18.rb +91 -0
- data/lib/resque/vendor/utf8_util/utf8_util_19.rb +6 -0
- data/lib/resque/version.rb +5 -0
- data/lib/resque/worker.rb +891 -0
- data/lib/tasks/redis.rake +161 -0
- data/lib/tasks/resque.rake +2 -0
- metadata +177 -0
data/lib/resque/stat.rb
ADDED
@@ -0,0 +1,58 @@
|
|
1
|
+
module Resque
|
2
|
+
# The stat subsystem. Used to keep track of integer counts.
|
3
|
+
#
|
4
|
+
# Get a stat: Stat[name]
|
5
|
+
# Incr a stat: Stat.incr(name)
|
6
|
+
# Decr a stat: Stat.decr(name)
|
7
|
+
# Kill a stat: Stat.clear(name)
|
8
|
+
module Stat
|
9
|
+
extend self
|
10
|
+
|
11
|
+
# Direct access to the Redis instance.
|
12
|
+
def redis
|
13
|
+
Resque.redis
|
14
|
+
end
|
15
|
+
alias :data_store :redis
|
16
|
+
|
17
|
+
# Returns the int value of a stat, given a string stat name.
|
18
|
+
def get(stat)
|
19
|
+
data_store.stat(stat)
|
20
|
+
end
|
21
|
+
|
22
|
+
# Alias of `get`
|
23
|
+
def [](stat)
|
24
|
+
get(stat)
|
25
|
+
end
|
26
|
+
|
27
|
+
# For a string stat name, increments the stat by one.
|
28
|
+
#
|
29
|
+
# Can optionally accept a second int parameter. The stat is then
|
30
|
+
# incremented by that amount.
|
31
|
+
def incr(stat, by = 1)
|
32
|
+
data_store.increment_stat(stat,by)
|
33
|
+
end
|
34
|
+
|
35
|
+
# Increments a stat by one.
|
36
|
+
def <<(stat)
|
37
|
+
incr stat
|
38
|
+
end
|
39
|
+
|
40
|
+
# For a string stat name, decrements the stat by one.
|
41
|
+
#
|
42
|
+
# Can optionally accept a second int parameter. The stat is then
|
43
|
+
# decremented by that amount.
|
44
|
+
def decr(stat, by = 1)
|
45
|
+
data_store.decremet_stat(stat,by)
|
46
|
+
end
|
47
|
+
|
48
|
+
# Decrements a stat by one.
|
49
|
+
def >>(stat)
|
50
|
+
decr stat
|
51
|
+
end
|
52
|
+
|
53
|
+
# Removes a stat from Redis, effectively setting it to 0.
|
54
|
+
def clear(stat)
|
55
|
+
data_store.clear_stat(stat)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
data/lib/resque/tasks.rb
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
# require 'resque/tasks'
|
2
|
+
# will give you the resque tasks
|
3
|
+
|
4
|
+
|
5
|
+
namespace :resque do
|
6
|
+
task :setup
|
7
|
+
|
8
|
+
desc "Start a Resque worker"
|
9
|
+
task :work => [ :preload, :setup ] do
|
10
|
+
require 'resque'
|
11
|
+
|
12
|
+
begin
|
13
|
+
worker = Resque::Worker.new
|
14
|
+
rescue Resque::NoQueueError
|
15
|
+
abort "set QUEUE env var, e.g. $ QUEUE=critical,high rake resque:work"
|
16
|
+
end
|
17
|
+
|
18
|
+
worker.prepare
|
19
|
+
worker.log "Starting worker #{self}"
|
20
|
+
worker.work(ENV['INTERVAL'] || 5) # interval, will block
|
21
|
+
end
|
22
|
+
|
23
|
+
desc "Start multiple Resque workers. Should only be used in dev mode."
|
24
|
+
task :workers do
|
25
|
+
threads = []
|
26
|
+
|
27
|
+
if ENV['COUNT'].to_i < 1
|
28
|
+
abort "set COUNT env var, e.g. $ COUNT=2 rake resque:workers"
|
29
|
+
end
|
30
|
+
|
31
|
+
ENV['COUNT'].to_i.times do
|
32
|
+
threads << Thread.new do
|
33
|
+
system "rake resque:work"
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
threads.each { |thread| thread.join }
|
38
|
+
end
|
39
|
+
|
40
|
+
# Preload app files if this is Rails
|
41
|
+
task :preload => :setup do
|
42
|
+
if defined?(Rails)
|
43
|
+
if Rails::VERSION::MAJOR > 3
|
44
|
+
ActiveSupport.run_load_hooks(:before_eager_load, Rails.application)
|
45
|
+
Rails.application.config.eager_load_namespaces.each(&:eager_load!)
|
46
|
+
|
47
|
+
elsif Rails::VERSION::MAJOR == 3
|
48
|
+
ActiveSupport.run_load_hooks(:before_eager_load, Rails.application)
|
49
|
+
Rails.application.eager_load!
|
50
|
+
|
51
|
+
elsif defined?(Rails::Initializer)
|
52
|
+
$rails_rake_task = false
|
53
|
+
Rails::Initializer.run :load_application_classes
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
namespace :failures do
|
59
|
+
desc "Sort the 'failed' queue for the redis_multi_queue failure backend"
|
60
|
+
task :sort do
|
61
|
+
require 'resque'
|
62
|
+
require 'resque/failure/redis'
|
63
|
+
|
64
|
+
warn "Sorting #{Resque::Failure.count} failures..."
|
65
|
+
Resque::Failure.each(0, Resque::Failure.count) do |_, failure|
|
66
|
+
data = Resque.encode(failure)
|
67
|
+
Resque.redis.rpush(Resque::Failure.failure_queue_name(failure['queue']), data)
|
68
|
+
end
|
69
|
+
warn "done!"
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
class Resque::ThreadSignal
|
2
|
+
if RUBY_VERSION <= "1.9"
|
3
|
+
def initialize
|
4
|
+
@signaled = false
|
5
|
+
end
|
6
|
+
|
7
|
+
def signal
|
8
|
+
@signaled = true
|
9
|
+
end
|
10
|
+
|
11
|
+
def wait_for_signal(timeout)
|
12
|
+
(10 * timeout).times do
|
13
|
+
sleep(0.1)
|
14
|
+
return true if @signaled
|
15
|
+
end
|
16
|
+
|
17
|
+
@signaled
|
18
|
+
end
|
19
|
+
|
20
|
+
else
|
21
|
+
def initialize
|
22
|
+
@mutex = Mutex.new
|
23
|
+
@signaled = false
|
24
|
+
@received = ConditionVariable.new
|
25
|
+
end
|
26
|
+
|
27
|
+
def signal
|
28
|
+
@mutex.synchronize do
|
29
|
+
@signaled = true
|
30
|
+
@received.signal
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def wait_for_signal(timeout)
|
35
|
+
@mutex.synchronize do
|
36
|
+
unless @signaled
|
37
|
+
@received.wait(@mutex, timeout)
|
38
|
+
end
|
39
|
+
|
40
|
+
@signaled
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module UTF8Util
|
2
|
+
# use '?' intsead of the unicode replace char, since that is 3 bytes
|
3
|
+
# and can increase the string size if it's done a lot
|
4
|
+
REPLACEMENT_CHAR = "?"
|
5
|
+
|
6
|
+
# Replace invalid UTF-8 character sequences with a replacement character
|
7
|
+
#
|
8
|
+
# Returns self as valid UTF-8.
|
9
|
+
def self.clean!(str)
|
10
|
+
raise NotImplementedError
|
11
|
+
end
|
12
|
+
|
13
|
+
# Replace invalid UTF-8 character sequences with a replacement character
|
14
|
+
#
|
15
|
+
# Returns a copy of this String as valid UTF-8.
|
16
|
+
def self.clean(str)
|
17
|
+
clean!(str.dup)
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
|
22
|
+
if RUBY_VERSION <= '1.9'
|
23
|
+
require 'resque/vendor/utf8_util/utf8_util_18'
|
24
|
+
else
|
25
|
+
require 'resque/vendor/utf8_util/utf8_util_19'
|
26
|
+
end
|
@@ -0,0 +1,91 @@
|
|
1
|
+
require 'strscan'
|
2
|
+
|
3
|
+
module UTF8Util
|
4
|
+
HIGH_BIT_RANGE = /[\x80-\xff]/
|
5
|
+
|
6
|
+
# Check if this String is valid UTF-8
|
7
|
+
#
|
8
|
+
# Returns true or false.
|
9
|
+
def self.valid?(str)
|
10
|
+
sc = StringScanner.new(str)
|
11
|
+
|
12
|
+
while sc.skip_until(HIGH_BIT_RANGE)
|
13
|
+
sc.pos -= 1
|
14
|
+
|
15
|
+
if !sequence_length(sc)
|
16
|
+
return false
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
true
|
21
|
+
end
|
22
|
+
|
23
|
+
# Replace invalid UTF-8 character sequences with a replacement character
|
24
|
+
#
|
25
|
+
# Returns self as valid UTF-8.
|
26
|
+
def self.clean!(str)
|
27
|
+
sc = StringScanner.new(str)
|
28
|
+
while sc.skip_until(HIGH_BIT_RANGE)
|
29
|
+
pos = sc.pos = sc.pos-1
|
30
|
+
|
31
|
+
if !sequence_length(sc)
|
32
|
+
str[pos] = REPLACEMENT_CHAR
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
str
|
37
|
+
end
|
38
|
+
|
39
|
+
# Validate the UTF-8 sequence at the current scanner position.
|
40
|
+
#
|
41
|
+
# scanner - StringScanner instance so we can advance the pointer as we verify.
|
42
|
+
#
|
43
|
+
# Returns The length in bytes of this UTF-8 sequence, false if invalid.
|
44
|
+
def self.sequence_length(scanner)
|
45
|
+
leader = scanner.get_byte[0]
|
46
|
+
|
47
|
+
if (leader >> 5) == 0x6
|
48
|
+
if check_next_sequence(scanner)
|
49
|
+
return 2
|
50
|
+
else
|
51
|
+
scanner.pos -= 1
|
52
|
+
end
|
53
|
+
elsif (leader >> 4) == 0x0e
|
54
|
+
if check_next_sequence(scanner)
|
55
|
+
if check_next_sequence(scanner)
|
56
|
+
return 3
|
57
|
+
else
|
58
|
+
scanner.pos -= 2
|
59
|
+
end
|
60
|
+
else
|
61
|
+
scanner.pos -= 1
|
62
|
+
end
|
63
|
+
elsif (leader >> 3) == 0x1e
|
64
|
+
if check_next_sequence(scanner)
|
65
|
+
if check_next_sequence(scanner)
|
66
|
+
if check_next_sequence(scanner)
|
67
|
+
return 4
|
68
|
+
else
|
69
|
+
scanner.pos -= 3
|
70
|
+
end
|
71
|
+
else
|
72
|
+
scanner.pos -= 2
|
73
|
+
end
|
74
|
+
else
|
75
|
+
scanner.pos -= 1
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
false
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
|
84
|
+
# Read another byte off the scanner oving the scan position forward one place
|
85
|
+
#
|
86
|
+
# Returns nothing.
|
87
|
+
def self.check_next_sequence(scanner)
|
88
|
+
byte = scanner.get_byte[0]
|
89
|
+
(byte >> 6) == 0x2
|
90
|
+
end
|
91
|
+
end
|
@@ -0,0 +1,891 @@
|
|
1
|
+
require 'time'
|
2
|
+
require 'set'
|
3
|
+
|
4
|
+
module Resque
|
5
|
+
# A Resque Worker processes jobs. On platforms that support fork(2),
|
6
|
+
# the worker will fork off a child to process each job. This ensures
|
7
|
+
# a clean slate when beginning the next job and cuts down on gradual
|
8
|
+
# memory growth as well as low level failures.
|
9
|
+
#
|
10
|
+
# It also ensures workers are always listening to signals from you,
|
11
|
+
# their master, and can react accordingly.
|
12
|
+
class Worker
|
13
|
+
include Resque::Helpers
|
14
|
+
extend Resque::Helpers
|
15
|
+
include Resque::Logging
|
16
|
+
|
17
|
+
@@all_heartbeat_threads = []
|
18
|
+
def self.kill_all_heartbeat_threads
|
19
|
+
@@all_heartbeat_threads.each(&:kill).each(&:join)
|
20
|
+
@@all_heartbeat_threads = []
|
21
|
+
end
|
22
|
+
|
23
|
+
def redis
|
24
|
+
Resque.redis
|
25
|
+
end
|
26
|
+
alias :data_store :redis
|
27
|
+
|
28
|
+
def self.redis
|
29
|
+
Resque.redis
|
30
|
+
end
|
31
|
+
|
32
|
+
def self.data_store
|
33
|
+
self.redis
|
34
|
+
end
|
35
|
+
|
36
|
+
# Given a Ruby object, returns a string suitable for storage in a
|
37
|
+
# queue.
|
38
|
+
def encode(object)
|
39
|
+
Resque.encode(object)
|
40
|
+
end
|
41
|
+
|
42
|
+
# Given a string, returns a Ruby object.
|
43
|
+
def decode(object)
|
44
|
+
Resque.decode(object)
|
45
|
+
end
|
46
|
+
|
47
|
+
attr_accessor :term_timeout
|
48
|
+
|
49
|
+
# decide whether to use new_kill_child logic
|
50
|
+
attr_accessor :term_child
|
51
|
+
|
52
|
+
# should term kill workers gracefully (vs. immediately)
|
53
|
+
# Makes SIGTERM work like SIGQUIT
|
54
|
+
attr_accessor :graceful_term
|
55
|
+
|
56
|
+
# When set to true, forked workers will exit with `exit`, calling any `at_exit` code handlers that have been
|
57
|
+
# registered in the application. Otherwise, forked workers exit with `exit!`
|
58
|
+
attr_accessor :run_at_exit_hooks
|
59
|
+
|
60
|
+
attr_writer :fork_per_job
|
61
|
+
attr_writer :hostname
|
62
|
+
attr_writer :to_s
|
63
|
+
attr_writer :pid
|
64
|
+
|
65
|
+
# Returns an array of all worker objects.
|
66
|
+
def self.all
|
67
|
+
data_store.worker_ids.map { |id| find(id, :skip_exists => true) }.compact
|
68
|
+
end
|
69
|
+
|
70
|
+
# Returns an array of all worker objects currently processing
|
71
|
+
# jobs.
|
72
|
+
def self.working
|
73
|
+
names = all
|
74
|
+
return [] unless names.any?
|
75
|
+
|
76
|
+
reportedly_working = {}
|
77
|
+
|
78
|
+
begin
|
79
|
+
reportedly_working = data_store.workers_map(names).reject do |key, value|
|
80
|
+
value.nil? || value.empty?
|
81
|
+
end
|
82
|
+
rescue Redis::Distributed::CannotDistribute
|
83
|
+
names.each do |name|
|
84
|
+
value = data_store.get_worker_payload(name)
|
85
|
+
reportedly_working[name] = value unless value.nil? || value.empty?
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
reportedly_working.keys.map do |key|
|
90
|
+
worker = find(key.sub("worker:", ''), :skip_exists => true)
|
91
|
+
worker.job = worker.decode(reportedly_working[key])
|
92
|
+
worker
|
93
|
+
end.compact
|
94
|
+
end
|
95
|
+
|
96
|
+
# Returns a single worker object. Accepts a string id.
|
97
|
+
def self.find(worker_id, options = {})
|
98
|
+
skip_exists = options[:skip_exists]
|
99
|
+
|
100
|
+
if skip_exists || exists?(worker_id)
|
101
|
+
host, pid, queues_raw = worker_id.split(':')
|
102
|
+
queues = queues_raw.split(',')
|
103
|
+
worker = new(*queues)
|
104
|
+
worker.hostname = host
|
105
|
+
worker.to_s = worker_id
|
106
|
+
worker.pid = pid.to_i
|
107
|
+
worker
|
108
|
+
else
|
109
|
+
nil
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
# Alias of `find`
|
114
|
+
def self.attach(worker_id)
|
115
|
+
find(worker_id)
|
116
|
+
end
|
117
|
+
|
118
|
+
# Given a string worker id, return a boolean indicating whether the
|
119
|
+
# worker exists
|
120
|
+
def self.exists?(worker_id)
|
121
|
+
data_store.worker_exists?(worker_id)
|
122
|
+
end
|
123
|
+
|
124
|
+
# Workers should be initialized with an array of string queue
|
125
|
+
# names. The order is important: a Worker will check the first
|
126
|
+
# queue given for a job. If none is found, it will check the
|
127
|
+
# second queue name given. If a job is found, it will be
|
128
|
+
# processed. Upon completion, the Worker will again check the
|
129
|
+
# first queue given, and so forth. In this way the queue list
|
130
|
+
# passed to a Worker on startup defines the priorities of queues.
|
131
|
+
#
|
132
|
+
# If passed a single "*", this Worker will operate on all queues
|
133
|
+
# in alphabetical order. Queues can be dynamically added or
|
134
|
+
# removed without needing to restart workers using this method.
|
135
|
+
#
|
136
|
+
# Workers should have `#prepare` called after they are initialized
|
137
|
+
# if you are running work on the worker.
|
138
|
+
def initialize(*queues)
|
139
|
+
@shutdown = nil
|
140
|
+
@paused = nil
|
141
|
+
@before_first_fork_hook_ran = false
|
142
|
+
|
143
|
+
verbose_value = ENV['LOGGING'] || ENV['VERBOSE']
|
144
|
+
self.verbose = verbose_value if verbose_value
|
145
|
+
self.very_verbose = ENV['VVERBOSE'] if ENV['VVERBOSE']
|
146
|
+
self.term_timeout = ENV['RESQUE_TERM_TIMEOUT'] || 4.0
|
147
|
+
self.term_child = ENV['TERM_CHILD']
|
148
|
+
self.graceful_term = ENV['GRACEFUL_TERM']
|
149
|
+
self.run_at_exit_hooks = ENV['RUN_AT_EXIT_HOOKS']
|
150
|
+
|
151
|
+
self.queues = queues
|
152
|
+
end
|
153
|
+
|
154
|
+
# Daemonizes the worker if ENV['BACKGROUND'] is set and writes
|
155
|
+
# the process id to ENV['PIDFILE'] if set. Should only be called
|
156
|
+
# once per worker.
|
157
|
+
def prepare
|
158
|
+
if ENV['BACKGROUND']
|
159
|
+
unless Process.respond_to?('daemon')
|
160
|
+
abort "env var BACKGROUND is set, which requires ruby >= 1.9"
|
161
|
+
end
|
162
|
+
Process.daemon(true)
|
163
|
+
self.reconnect
|
164
|
+
end
|
165
|
+
|
166
|
+
if ENV['PIDFILE']
|
167
|
+
File.open(ENV['PIDFILE'], 'w') { |f| f << pid }
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
def queues=(queues)
|
172
|
+
queues = queues.empty? ? (ENV["QUEUES"] || ENV['QUEUE']).to_s.split(',') : queues
|
173
|
+
@queues = queues.map { |queue| queue.to_s.strip }
|
174
|
+
unless ['*', '?', '{', '}', '[', ']'].any? {|char| @queues.join.include?(char) }
|
175
|
+
@static_queues = @queues.flatten.uniq
|
176
|
+
end
|
177
|
+
validate_queues
|
178
|
+
end
|
179
|
+
|
180
|
+
# A worker must be given a queue, otherwise it won't know what to
|
181
|
+
# do with itself.
|
182
|
+
#
|
183
|
+
# You probably never need to call this.
|
184
|
+
def validate_queues
|
185
|
+
if @queues.nil? || @queues.empty?
|
186
|
+
raise NoQueueError.new("Please give each worker at least one queue.")
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
# Returns a list of queues to use when searching for a job.
|
191
|
+
# A splat ("*") means you want every queue (in alpha order) - this
|
192
|
+
# can be useful for dynamically adding new queues.
|
193
|
+
def queues
|
194
|
+
return @static_queues if @static_queues
|
195
|
+
@queues.map { |queue| glob_match(queue) }.flatten.uniq
|
196
|
+
end
|
197
|
+
|
198
|
+
def glob_match(pattern)
|
199
|
+
Resque.queues.select do |queue|
|
200
|
+
File.fnmatch?(pattern, queue)
|
201
|
+
end.sort
|
202
|
+
end
|
203
|
+
|
204
|
+
# This is the main workhorse method. Called on a Worker instance,
|
205
|
+
# it begins the worker life cycle.
|
206
|
+
#
|
207
|
+
# The following events occur during a worker's life cycle:
|
208
|
+
#
|
209
|
+
# 1. Startup: Signals are registered, dead workers are pruned,
|
210
|
+
# and this worker is registered.
|
211
|
+
# 2. Work loop: Jobs are pulled from a queue and processed.
|
212
|
+
# 3. Teardown: This worker is unregistered.
|
213
|
+
#
|
214
|
+
# Can be passed a float representing the polling frequency.
|
215
|
+
# The default is 5 seconds, but for a semi-active site you may
|
216
|
+
# want to use a smaller value.
|
217
|
+
#
|
218
|
+
# Also accepts a block which will be passed the job as soon as it
|
219
|
+
# has completed processing. Useful for testing.
|
220
|
+
def work(interval = 5.0, &block)
|
221
|
+
interval = Float(interval)
|
222
|
+
startup
|
223
|
+
|
224
|
+
loop do
|
225
|
+
break if shutdown?
|
226
|
+
|
227
|
+
unless work_one_job(&block)
|
228
|
+
break if interval.zero?
|
229
|
+
log_with_severity :debug, "Sleeping for #{interval} seconds"
|
230
|
+
procline paused? ? "Paused" : "Waiting for #{queues.join(',')}"
|
231
|
+
sleep interval
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
unregister_worker
|
236
|
+
rescue Exception => exception
|
237
|
+
return if exception.class == SystemExit && !@child && run_at_exit_hooks
|
238
|
+
log_with_severity :error, "Failed to start worker : #{exception.inspect}"
|
239
|
+
unregister_worker(exception)
|
240
|
+
end
|
241
|
+
|
242
|
+
def work_one_job(job = nil, &block)
|
243
|
+
return false if paused?
|
244
|
+
return false unless job ||= reserve
|
245
|
+
|
246
|
+
working_on job
|
247
|
+
procline "Processing #{job.queue} since #{Time.now.to_i} [#{job.payload_class_name}]"
|
248
|
+
|
249
|
+
log_with_severity :info, "got: #{job.inspect}"
|
250
|
+
job.worker = self
|
251
|
+
|
252
|
+
if fork_per_job?
|
253
|
+
perform_with_fork(job, &block)
|
254
|
+
else
|
255
|
+
perform(job, &block)
|
256
|
+
end
|
257
|
+
|
258
|
+
done_working
|
259
|
+
true
|
260
|
+
end
|
261
|
+
|
262
|
+
# DEPRECATED. Processes a single job. If none is given, it will
|
263
|
+
# try to produce one. Usually run in the child.
|
264
|
+
def process(job = nil, &block)
|
265
|
+
return unless job ||= reserve
|
266
|
+
|
267
|
+
job.worker = self
|
268
|
+
working_on job
|
269
|
+
perform(job, &block)
|
270
|
+
ensure
|
271
|
+
done_working
|
272
|
+
end
|
273
|
+
|
274
|
+
# Reports the exception and marks the job as failed
|
275
|
+
def report_failed_job(job,exception)
|
276
|
+
log_with_severity :error, "#{job.inspect} failed: #{exception.inspect}"
|
277
|
+
begin
|
278
|
+
job.fail(exception)
|
279
|
+
rescue Object => exception
|
280
|
+
log_with_severity :error, "Received exception when reporting failure: #{exception.inspect}"
|
281
|
+
end
|
282
|
+
begin
|
283
|
+
failed!
|
284
|
+
rescue Object => exception
|
285
|
+
log_with_severity :error, "Received exception when increasing failed jobs counter (redis issue) : #{exception.inspect}"
|
286
|
+
end
|
287
|
+
end
|
288
|
+
|
289
|
+
|
290
|
+
# Processes a given job in the child.
|
291
|
+
def perform(job)
|
292
|
+
begin
|
293
|
+
if fork_per_job?
|
294
|
+
reconnect
|
295
|
+
run_hook :after_fork, job
|
296
|
+
end
|
297
|
+
job.perform
|
298
|
+
rescue Object => e
|
299
|
+
report_failed_job(job,e)
|
300
|
+
else
|
301
|
+
log_with_severity :info, "done: #{job.inspect}"
|
302
|
+
ensure
|
303
|
+
yield job if block_given?
|
304
|
+
end
|
305
|
+
end
|
306
|
+
|
307
|
+
# Attempts to grab a job off one of the provided queues. Returns
|
308
|
+
# nil if no job can be found.
|
309
|
+
def reserve
|
310
|
+
queues.each do |queue|
|
311
|
+
log_with_severity :debug, "Checking #{queue}"
|
312
|
+
if job = Resque.reserve(queue)
|
313
|
+
log_with_severity :debug, "Found job on #{queue}"
|
314
|
+
return job
|
315
|
+
end
|
316
|
+
end
|
317
|
+
|
318
|
+
nil
|
319
|
+
rescue Exception => e
|
320
|
+
log_with_severity :error, "Error reserving job: #{e.inspect}"
|
321
|
+
log_with_severity :error, e.backtrace.join("\n")
|
322
|
+
raise e
|
323
|
+
end
|
324
|
+
|
325
|
+
# Reconnect to Redis to avoid sharing a connection with the parent,
|
326
|
+
# retry up to 3 times with increasing delay before giving up.
|
327
|
+
def reconnect
|
328
|
+
tries = 0
|
329
|
+
begin
|
330
|
+
data_store.reconnect
|
331
|
+
rescue Redis::BaseConnectionError
|
332
|
+
if (tries += 1) <= 3
|
333
|
+
log_with_severity :error, "Error reconnecting to Redis; retrying"
|
334
|
+
sleep(tries)
|
335
|
+
retry
|
336
|
+
else
|
337
|
+
log_with_severity :error, "Error reconnecting to Redis; quitting"
|
338
|
+
raise
|
339
|
+
end
|
340
|
+
end
|
341
|
+
end
|
342
|
+
|
343
|
+
# Runs all the methods needed when a worker begins its lifecycle.
|
344
|
+
def startup
|
345
|
+
$0 = "resque: Starting"
|
346
|
+
|
347
|
+
enable_gc_optimizations
|
348
|
+
register_signal_handlers
|
349
|
+
start_heartbeat
|
350
|
+
prune_dead_workers
|
351
|
+
run_hook :before_first_fork
|
352
|
+
register_worker
|
353
|
+
|
354
|
+
# Fix buffering so we can `rake resque:work > resque.log` and
|
355
|
+
# get output from the child in there.
|
356
|
+
$stdout.sync = true
|
357
|
+
end
|
358
|
+
|
359
|
+
# Enables GC Optimizations if you're running REE.
|
360
|
+
# http://www.rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
|
361
|
+
def enable_gc_optimizations
|
362
|
+
if GC.respond_to?(:copy_on_write_friendly=)
|
363
|
+
GC.copy_on_write_friendly = true
|
364
|
+
end
|
365
|
+
end
|
366
|
+
|
367
|
+
# Registers the various signal handlers a worker responds to.
|
368
|
+
#
|
369
|
+
# TERM: Shutdown immediately, stop processing jobs.
|
370
|
+
# INT: Shutdown immediately, stop processing jobs.
|
371
|
+
# QUIT: Shutdown after the current job has finished processing.
|
372
|
+
# USR1: Kill the forked child immediately, continue processing jobs.
|
373
|
+
# USR2: Don't process any new jobs
|
374
|
+
# CONT: Start processing jobs again after a USR2
|
375
|
+
def register_signal_handlers
|
376
|
+
trap('TERM') { graceful_term ? shutdown : shutdown! }
|
377
|
+
trap('INT') { shutdown! }
|
378
|
+
|
379
|
+
begin
|
380
|
+
trap('QUIT') { shutdown }
|
381
|
+
if term_child
|
382
|
+
trap('USR1') { new_kill_child }
|
383
|
+
else
|
384
|
+
trap('USR1') { kill_child }
|
385
|
+
end
|
386
|
+
trap('USR2') { pause_processing }
|
387
|
+
trap('CONT') { unpause_processing }
|
388
|
+
rescue ArgumentError
|
389
|
+
log_with_severity :warn, "Signals QUIT, USR1, USR2, and/or CONT not supported."
|
390
|
+
end
|
391
|
+
|
392
|
+
log_with_severity :debug, "Registered signals"
|
393
|
+
end
|
394
|
+
|
395
|
+
def unregister_signal_handlers
|
396
|
+
trap('TERM') do
|
397
|
+
trap ('TERM') do
|
398
|
+
# ignore subsequent terms
|
399
|
+
end
|
400
|
+
raise TermException.new("SIGTERM")
|
401
|
+
end
|
402
|
+
trap('INT', 'DEFAULT')
|
403
|
+
|
404
|
+
begin
|
405
|
+
trap('QUIT', 'DEFAULT')
|
406
|
+
trap('USR1', 'DEFAULT')
|
407
|
+
trap('USR2', 'DEFAULT')
|
408
|
+
rescue ArgumentError
|
409
|
+
end
|
410
|
+
end
|
411
|
+
|
412
|
+
# Schedule this worker for shutdown. Will finish processing the
|
413
|
+
# current job.
|
414
|
+
def shutdown
|
415
|
+
log_with_severity :info, 'Exiting...'
|
416
|
+
@shutdown = true
|
417
|
+
end
|
418
|
+
|
419
|
+
# Kill the child and shutdown immediately.
|
420
|
+
# If not forking, abort this process.
|
421
|
+
def shutdown!
|
422
|
+
shutdown
|
423
|
+
if term_child
|
424
|
+
if fork_per_job?
|
425
|
+
new_kill_child
|
426
|
+
else
|
427
|
+
# Raise TermException in the same process
|
428
|
+
trap('TERM') do
|
429
|
+
# ignore subsequent terms
|
430
|
+
end
|
431
|
+
raise TermException.new("SIGTERM")
|
432
|
+
end
|
433
|
+
else
|
434
|
+
kill_child
|
435
|
+
end
|
436
|
+
end
|
437
|
+
|
438
|
+
# Should this worker shutdown as soon as current job is finished?
|
439
|
+
def shutdown?
|
440
|
+
@shutdown
|
441
|
+
end
|
442
|
+
|
443
|
+
# Kills the forked child immediately, without remorse. The job it
|
444
|
+
# is processing will not be completed.
|
445
|
+
def kill_child
|
446
|
+
if @child
|
447
|
+
log_with_severity :debug, "Killing child at #{@child}"
|
448
|
+
if `ps -o pid,state -p #{@child}`
|
449
|
+
Process.kill("KILL", @child) rescue nil
|
450
|
+
else
|
451
|
+
log_with_severity :debug, "Child #{@child} not found, restarting."
|
452
|
+
shutdown
|
453
|
+
end
|
454
|
+
end
|
455
|
+
end
|
456
|
+
|
457
|
+
def heartbeat
|
458
|
+
data_store.heartbeat(self)
|
459
|
+
end
|
460
|
+
|
461
|
+
def remove_heartbeat
|
462
|
+
data_store.remove_heartbeat(self)
|
463
|
+
end
|
464
|
+
|
465
|
+
def heartbeat!(time = data_store.server_time)
|
466
|
+
data_store.heartbeat!(self, time)
|
467
|
+
end
|
468
|
+
|
469
|
+
def self.all_heartbeats
|
470
|
+
data_store.all_heartbeats
|
471
|
+
end
|
472
|
+
|
473
|
+
# Returns a list of workers that have sent a heartbeat in the past, but which
|
474
|
+
# already expired (does NOT include workers that have never sent a heartbeat at all).
|
475
|
+
def self.all_workers_with_expired_heartbeats
|
476
|
+
workers = Worker.all
|
477
|
+
heartbeats = Worker.all_heartbeats
|
478
|
+
now = data_store.server_time
|
479
|
+
|
480
|
+
workers.select do |worker|
|
481
|
+
id = worker.to_s
|
482
|
+
heartbeat = heartbeats[id]
|
483
|
+
|
484
|
+
if heartbeat
|
485
|
+
seconds_since_heartbeat = (now - Time.parse(heartbeat)).to_i
|
486
|
+
seconds_since_heartbeat > Resque.prune_interval
|
487
|
+
else
|
488
|
+
false
|
489
|
+
end
|
490
|
+
end
|
491
|
+
end
|
492
|
+
|
493
|
+
def start_heartbeat
|
494
|
+
remove_heartbeat
|
495
|
+
|
496
|
+
@heartbeat_thread_signal = Resque::ThreadSignal.new
|
497
|
+
|
498
|
+
@heartbeat_thread = Thread.new do
|
499
|
+
loop do
|
500
|
+
heartbeat!
|
501
|
+
signaled = @heartbeat_thread_signal.wait_for_signal(Resque.heartbeat_interval)
|
502
|
+
break if signaled
|
503
|
+
end
|
504
|
+
end
|
505
|
+
|
506
|
+
@@all_heartbeat_threads << @heartbeat_thread
|
507
|
+
end
|
508
|
+
|
509
|
+
# Kills the forked child immediately with minimal remorse. The job it
|
510
|
+
# is processing will not be completed. Send the child a TERM signal,
|
511
|
+
# wait 5 seconds, and then a KILL signal if it has not quit
|
512
|
+
def new_kill_child
|
513
|
+
if @child
|
514
|
+
unless Process.waitpid(@child, Process::WNOHANG)
|
515
|
+
log_with_severity :debug, "Sending TERM signal to child #{@child}"
|
516
|
+
Process.kill("TERM", @child)
|
517
|
+
(term_timeout.to_f * 10).round.times do |i|
|
518
|
+
sleep(0.1)
|
519
|
+
return if Process.waitpid(@child, Process::WNOHANG)
|
520
|
+
end
|
521
|
+
log_with_severity :debug, "Sending KILL signal to child #{@child}"
|
522
|
+
Process.kill("KILL", @child)
|
523
|
+
else
|
524
|
+
log_with_severity :debug, "Child #{@child} already quit."
|
525
|
+
end
|
526
|
+
end
|
527
|
+
rescue SystemCallError
|
528
|
+
log_with_severity :error, "Child #{@child} already quit and reaped."
|
529
|
+
end
|
530
|
+
|
531
|
+
# are we paused?
|
532
|
+
def paused?
|
533
|
+
@paused
|
534
|
+
end
|
535
|
+
|
536
|
+
# Stop processing jobs after the current one has completed (if we're
|
537
|
+
# currently running one).
|
538
|
+
def pause_processing
|
539
|
+
log_with_severity :info, "USR2 received; pausing job processing"
|
540
|
+
run_hook :before_pause, self
|
541
|
+
@paused = true
|
542
|
+
end
|
543
|
+
|
544
|
+
# Start processing jobs again after a pause
|
545
|
+
def unpause_processing
|
546
|
+
log_with_severity :info, "CONT received; resuming job processing"
|
547
|
+
@paused = false
|
548
|
+
run_hook :after_pause, self
|
549
|
+
end
|
550
|
+
|
551
|
+
# Looks for any workers which should be running on this server
|
552
|
+
# and, if they're not, removes them from Redis.
|
553
|
+
#
|
554
|
+
# This is a form of garbage collection. If a server is killed by a
|
555
|
+
# hard shutdown, power failure, or something else beyond our
|
556
|
+
# control, the Resque workers will not die gracefully and therefore
|
557
|
+
# will leave stale state information in Redis.
|
558
|
+
#
|
559
|
+
# By checking the current Redis state against the actual
|
560
|
+
# environment, we can determine if Redis is old and clean it up a bit.
|
561
|
+
def prune_dead_workers
|
562
|
+
all_workers = Worker.all
|
563
|
+
|
564
|
+
unless all_workers.empty?
|
565
|
+
known_workers = worker_pids
|
566
|
+
all_workers_with_expired_heartbeats = Worker.all_workers_with_expired_heartbeats
|
567
|
+
end
|
568
|
+
|
569
|
+
all_workers.each do |worker|
|
570
|
+
# If the worker hasn't sent a heartbeat, remove it from the registry.
|
571
|
+
#
|
572
|
+
# If the worker hasn't ever sent a heartbeat, we won't remove it since
|
573
|
+
# the first heartbeat is sent before the worker is registred it means
|
574
|
+
# that this is a worker that doesn't support heartbeats, e.g., another
|
575
|
+
# client library or an older version of Resque. We won't touch these.
|
576
|
+
if all_workers_with_expired_heartbeats.include?(worker)
|
577
|
+
log_with_severity :info, "Pruning dead worker: #{worker}"
|
578
|
+
worker.unregister_worker(PruneDeadWorkerDirtyExit.new(worker.to_s))
|
579
|
+
next
|
580
|
+
end
|
581
|
+
|
582
|
+
host, pid, worker_queues_raw = worker.id.split(':')
|
583
|
+
worker_queues = worker_queues_raw.split(",")
|
584
|
+
unless @queues.include?("*") || (worker_queues.to_set == @queues.to_set)
|
585
|
+
# If the worker we are trying to prune does not belong to the queues
|
586
|
+
# we are listening to, we should not touch it.
|
587
|
+
# Attempt to prune a worker from different queues may easily result in
|
588
|
+
# an unknown class exception, since that worker could easily be even
|
589
|
+
# written in different language.
|
590
|
+
next
|
591
|
+
end
|
592
|
+
|
593
|
+
next unless host == hostname
|
594
|
+
next if known_workers.include?(pid)
|
595
|
+
|
596
|
+
log_with_severity :debug, "Pruning dead worker: #{worker}"
|
597
|
+
worker.unregister_worker
|
598
|
+
end
|
599
|
+
end
|
600
|
+
|
601
|
+
# Registers ourself as a worker. Useful when entering the worker
|
602
|
+
# lifecycle on startup.
|
603
|
+
def register_worker
|
604
|
+
data_store.register_worker(self)
|
605
|
+
end
|
606
|
+
|
607
|
+
# Runs a named hook, passing along any arguments.
|
608
|
+
def run_hook(name, *args)
|
609
|
+
return unless hooks = Resque.send(name)
|
610
|
+
return if name == :before_first_fork && @before_first_fork_hook_ran
|
611
|
+
msg = "Running #{name} hooks"
|
612
|
+
msg << " with #{args.inspect}" if args.any?
|
613
|
+
log_with_severity :info, msg
|
614
|
+
|
615
|
+
hooks.each do |hook|
|
616
|
+
args.any? ? hook.call(*args) : hook.call
|
617
|
+
@before_first_fork_hook_ran = true if name == :before_first_fork
|
618
|
+
end
|
619
|
+
end
|
620
|
+
|
621
|
+
def kill_background_threads
|
622
|
+
if @heartbeat_thread
|
623
|
+
@heartbeat_thread_signal.signal
|
624
|
+
@heartbeat_thread.join
|
625
|
+
end
|
626
|
+
end
|
627
|
+
|
628
|
+
# Unregisters ourself as a worker. Useful when shutting down.
|
629
|
+
def unregister_worker(exception = nil)
|
630
|
+
# If we're still processing a job, make sure it gets logged as a
|
631
|
+
# failure.
|
632
|
+
if (hash = processing) && !hash.empty?
|
633
|
+
job = Job.new(hash['queue'], hash['payload'])
|
634
|
+
# Ensure the proper worker is attached to this job, even if
|
635
|
+
# it's not the precise instance that died.
|
636
|
+
job.worker = self
|
637
|
+
begin
|
638
|
+
job.fail(exception || DirtyExit.new("Job still being processed"))
|
639
|
+
rescue RuntimeError => e
|
640
|
+
log_with_severity :error, e.message
|
641
|
+
end
|
642
|
+
end
|
643
|
+
|
644
|
+
kill_background_threads
|
645
|
+
|
646
|
+
data_store.unregister_worker(self) do
|
647
|
+
Stat.clear("processed:#{self}")
|
648
|
+
Stat.clear("failed:#{self}")
|
649
|
+
end
|
650
|
+
rescue Exception => exception_while_unregistering
|
651
|
+
message = exception_while_unregistering.message
|
652
|
+
if exception
|
653
|
+
message = message + "\nOriginal Exception (#{exception.class}): #{exception.message}\n" +
|
654
|
+
" #{exception.backtrace.join(" \n")}"
|
655
|
+
end
|
656
|
+
fail(exception_while_unregistering.class,
|
657
|
+
message,
|
658
|
+
exception_while_unregistering.backtrace)
|
659
|
+
end
|
660
|
+
|
661
|
+
# Given a job, tells Redis we're working on it. Useful for seeing
|
662
|
+
# what workers are doing and when.
|
663
|
+
def working_on(job)
|
664
|
+
data = encode \
|
665
|
+
:queue => job.queue,
|
666
|
+
:run_at => Time.now.utc.iso8601,
|
667
|
+
:payload => job.payload
|
668
|
+
data_store.set_worker_payload(self,data)
|
669
|
+
end
|
670
|
+
|
671
|
+
# Called when we are done working - clears our `working_on` state
|
672
|
+
# and tells Redis we processed a job.
|
673
|
+
def done_working
|
674
|
+
data_store.worker_done_working(self) do
|
675
|
+
processed!
|
676
|
+
end
|
677
|
+
end
|
678
|
+
|
679
|
+
# How many jobs has this worker processed? Returns an int.
|
680
|
+
def processed
|
681
|
+
Stat["processed:#{self}"]
|
682
|
+
end
|
683
|
+
|
684
|
+
# Tell Redis we've processed a job.
|
685
|
+
def processed!
|
686
|
+
Stat << "processed"
|
687
|
+
Stat << "processed:#{self}"
|
688
|
+
end
|
689
|
+
|
690
|
+
# How many failed jobs has this worker seen? Returns an int.
|
691
|
+
def failed
|
692
|
+
Stat["failed:#{self}"]
|
693
|
+
end
|
694
|
+
|
695
|
+
# Tells Redis we've failed a job.
|
696
|
+
def failed!
|
697
|
+
Stat << "failed"
|
698
|
+
Stat << "failed:#{self}"
|
699
|
+
end
|
700
|
+
|
701
|
+
# What time did this worker start? Returns an instance of `Time`
|
702
|
+
def started
|
703
|
+
data_store.worker_start_time(self)
|
704
|
+
end
|
705
|
+
|
706
|
+
# Tell Redis we've started
|
707
|
+
def started!
|
708
|
+
data_store.worker_started(self)
|
709
|
+
end
|
710
|
+
|
711
|
+
# Returns a hash explaining the Job we're currently processing, if any.
|
712
|
+
def job(reload = true)
|
713
|
+
@job = nil if reload
|
714
|
+
@job ||= decode(data_store.get_worker_payload(self)) || {}
|
715
|
+
end
|
716
|
+
attr_writer :job
|
717
|
+
alias_method :processing, :job
|
718
|
+
|
719
|
+
# Boolean - true if working, false if not
|
720
|
+
def working?
|
721
|
+
state == :working
|
722
|
+
end
|
723
|
+
|
724
|
+
# Boolean - true if idle, false if not
|
725
|
+
def idle?
|
726
|
+
state == :idle
|
727
|
+
end
|
728
|
+
|
729
|
+
def fork_per_job?
|
730
|
+
return @fork_per_job if defined?(@fork_per_job)
|
731
|
+
@fork_per_job = ENV["FORK_PER_JOB"] != 'false' && Kernel.respond_to?(:fork)
|
732
|
+
end
|
733
|
+
|
734
|
+
# Returns a symbol representing the current worker state,
|
735
|
+
# which can be either :working or :idle
|
736
|
+
def state
|
737
|
+
data_store.get_worker_payload(self) ? :working : :idle
|
738
|
+
end
|
739
|
+
|
740
|
+
# Is this worker the same as another worker?
|
741
|
+
def ==(other)
|
742
|
+
to_s == other.to_s
|
743
|
+
end
|
744
|
+
|
745
|
+
def inspect
|
746
|
+
"#<Worker #{to_s}>"
|
747
|
+
end
|
748
|
+
|
749
|
+
# The string representation is the same as the id for this worker
|
750
|
+
# instance. Can be used with `Worker.find`.
|
751
|
+
def to_s
|
752
|
+
@to_s ||= "#{hostname}:#{pid}:#{@queues.join(',')}"
|
753
|
+
end
|
754
|
+
alias_method :id, :to_s
|
755
|
+
|
756
|
+
# chomp'd hostname of this worker's machine
|
757
|
+
def hostname
|
758
|
+
@hostname ||= Socket.gethostname
|
759
|
+
end
|
760
|
+
|
761
|
+
# Returns Integer PID of running worker
|
762
|
+
def pid
|
763
|
+
@pid ||= Process.pid
|
764
|
+
end
|
765
|
+
|
766
|
+
# Returns an Array of string pids of all the other workers on this
|
767
|
+
# machine. Useful when pruning dead workers on startup.
|
768
|
+
def worker_pids
|
769
|
+
if RUBY_PLATFORM =~ /solaris/
|
770
|
+
solaris_worker_pids
|
771
|
+
elsif RUBY_PLATFORM =~ /mingw32/
|
772
|
+
windows_worker_pids
|
773
|
+
else
|
774
|
+
linux_worker_pids
|
775
|
+
end
|
776
|
+
end
|
777
|
+
|
778
|
+
# Returns an Array of string pids of all the other workers on this
|
779
|
+
# machine. Useful when pruning dead workers on startup.
|
780
|
+
def windows_worker_pids
|
781
|
+
tasklist_output = `tasklist /FI "IMAGENAME eq ruby.exe" /FO list`.encode("UTF-8", Encoding.locale_charmap)
|
782
|
+
tasklist_output.split($/).select { |line| line =~ /^PID:/}.collect{ |line| line.gsub /PID:\s+/, '' }
|
783
|
+
end
|
784
|
+
|
785
|
+
# Find Resque worker pids on Linux and OS X.
|
786
|
+
#
|
787
|
+
def linux_worker_pids
|
788
|
+
`ps -A -o pid,command | grep -E "[r]esque:work|[r]esque:\sStarting|[r]esque-[0-9]" | grep -v "resque-web"`.split("\n").map do |line|
|
789
|
+
line.split(' ')[0]
|
790
|
+
end
|
791
|
+
end
|
792
|
+
|
793
|
+
# Find Resque worker pids on Solaris.
|
794
|
+
#
|
795
|
+
# Returns an Array of string pids of all the other workers on this
|
796
|
+
# machine. Useful when pruning dead workers on startup.
|
797
|
+
def solaris_worker_pids
|
798
|
+
`ps -A -o pid,comm | grep "[r]uby" | grep -v "resque-web"`.split("\n").map do |line|
|
799
|
+
real_pid = line.split(' ')[0]
|
800
|
+
pargs_command = `pargs -a #{real_pid} 2>/dev/null | grep [r]esque | grep -v "resque-web"`
|
801
|
+
if pargs_command.split(':')[1] == " resque-#{Resque::Version}"
|
802
|
+
real_pid
|
803
|
+
end
|
804
|
+
end.compact
|
805
|
+
end
|
806
|
+
|
807
|
+
# Given a string, sets the procline ($0) and logs.
|
808
|
+
# Procline is always in the format of:
|
809
|
+
# RESQUE_PROCLINE_PREFIXresque-VERSION: STRING
|
810
|
+
def procline(string)
|
811
|
+
$0 = "#{ENV['RESQUE_PROCLINE_PREFIX']}resque-#{Resque::Version}: #{string}"
|
812
|
+
log_with_severity :debug, $0
|
813
|
+
end
|
814
|
+
|
815
|
+
def log(message)
|
816
|
+
info(message)
|
817
|
+
end
|
818
|
+
|
819
|
+
def log!(message)
|
820
|
+
debug(message)
|
821
|
+
end
|
822
|
+
|
823
|
+
|
824
|
+
def verbose
|
825
|
+
@verbose
|
826
|
+
end
|
827
|
+
|
828
|
+
def very_verbose
|
829
|
+
@very_verbose
|
830
|
+
end
|
831
|
+
|
832
|
+
def verbose=(value);
|
833
|
+
if value && !very_verbose
|
834
|
+
Resque.logger.formatter = VerboseFormatter.new
|
835
|
+
Resque.logger.level = Logger::INFO
|
836
|
+
elsif !value
|
837
|
+
Resque.logger.formatter = QuietFormatter.new
|
838
|
+
end
|
839
|
+
|
840
|
+
@verbose = value
|
841
|
+
end
|
842
|
+
|
843
|
+
def very_verbose=(value)
|
844
|
+
if value
|
845
|
+
Resque.logger.formatter = VeryVerboseFormatter.new
|
846
|
+
Resque.logger.level = Logger::DEBUG
|
847
|
+
elsif !value && verbose
|
848
|
+
Resque.logger.formatter = VerboseFormatter.new
|
849
|
+
Resque.logger.level = Logger::INFO
|
850
|
+
else
|
851
|
+
Resque.logger.formatter = QuietFormatter.new
|
852
|
+
end
|
853
|
+
|
854
|
+
@very_verbose = value
|
855
|
+
end
|
856
|
+
|
857
|
+
private
|
858
|
+
|
859
|
+
def perform_with_fork(job, &block)
|
860
|
+
run_hook :before_fork, job
|
861
|
+
|
862
|
+
begin
|
863
|
+
@child = fork do
|
864
|
+
unregister_signal_handlers if term_child
|
865
|
+
perform(job, &block)
|
866
|
+
exit! unless run_at_exit_hooks
|
867
|
+
end
|
868
|
+
rescue NotImplementedError
|
869
|
+
@fork_per_job = false
|
870
|
+
perform(job, &block)
|
871
|
+
return
|
872
|
+
end
|
873
|
+
|
874
|
+
srand # Reseeding
|
875
|
+
procline "Forked #{@child} at #{Time.now.to_i}"
|
876
|
+
|
877
|
+
begin
|
878
|
+
Process.waitpid(@child)
|
879
|
+
rescue SystemCallError
|
880
|
+
nil
|
881
|
+
end
|
882
|
+
|
883
|
+
job.fail(DirtyExit.new("Child process received unhandled signal #{$?.stopsig}", $?)) if $?.signaled?
|
884
|
+
@child = nil
|
885
|
+
end
|
886
|
+
|
887
|
+
def log_with_severity(severity, message)
|
888
|
+
Logging.log(severity, message)
|
889
|
+
end
|
890
|
+
end
|
891
|
+
end
|