resque_manager 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/MIT-LICENSE +21 -0
- data/README.markdown +417 -0
- data/Rakefile +41 -0
- data/app/assets/images/resque_manager/idle.png +0 -0
- data/app/assets/images/resque_manager/poll.png +0 -0
- data/app/assets/images/resque_manager/working.png +0 -0
- data/app/assets/javascripts/resque_manager/application.js +15 -0
- data/app/assets/javascripts/resque_manager/jquery-1.3.2.min.js +19 -0
- data/app/assets/javascripts/resque_manager/jquery.relatize_date.js +95 -0
- data/app/assets/javascripts/resque_manager/ranger.js +24 -0
- data/app/assets/stylesheets/resque_manager/application.css +14 -0
- data/app/assets/stylesheets/resque_manager/resque/resque.css +93 -0
- data/app/assets/stylesheets/resque_manager/resque/resque_reset.css +48 -0
- data/app/assets/stylesheets/resque_manager/resque_cleaner/cleaner.css +62 -0
- data/app/controllers/resque_manager/resque_controller.rb +313 -0
- data/app/helpers/resque_manager/application_helper.rb +4 -0
- data/app/helpers/resque_manager/resque_helper.rb +142 -0
- data/app/models/resque_manager/paginate.rb +54 -0
- data/app/views/layouts/resque_manager/application.html.erb +37 -0
- data/app/views/resque_manager/resque/_key.html.erb +17 -0
- data/app/views/resque_manager/resque/_limiter.html.erb +12 -0
- data/app/views/resque_manager/resque/_next_more.html.erb +10 -0
- data/app/views/resque_manager/resque/_paginate.html.erb +53 -0
- data/app/views/resque_manager/resque/_queues.html.erb +59 -0
- data/app/views/resque_manager/resque/_status_styles.erb +98 -0
- data/app/views/resque_manager/resque/_workers.html.erb +138 -0
- data/app/views/resque_manager/resque/_working.html.erb +69 -0
- data/app/views/resque_manager/resque/cleaner.html.erb +41 -0
- data/app/views/resque_manager/resque/cleaner_exec.html.erb +6 -0
- data/app/views/resque_manager/resque/cleaner_list.html.erb +172 -0
- data/app/views/resque_manager/resque/delayed.html.erb +35 -0
- data/app/views/resque_manager/resque/delayed_timestamp.html.erb +26 -0
- data/app/views/resque_manager/resque/error.erb +1 -0
- data/app/views/resque_manager/resque/overview.html.erb +4 -0
- data/app/views/resque_manager/resque/schedule.html.erb +96 -0
- data/app/views/resque_manager/resque/stats.html.erb +62 -0
- data/app/views/resque_manager/resque/status.html.erb +57 -0
- data/app/views/resque_manager/resque/statuses.html.erb +72 -0
- data/app/views/resque_manager/resque/workers.html.erb +1 -0
- data/config/routes.rb +38 -0
- data/config/sample_redis.yml +43 -0
- data/config/sample_resque_manager.yml +23 -0
- data/lib/resque_manager/engine.rb +9 -0
- data/lib/resque_manager/overrides/resque/failure/redis.rb +11 -0
- data/lib/resque_manager/overrides/resque/job.rb +69 -0
- data/lib/resque_manager/overrides/resque/resque.rb +8 -0
- data/lib/resque_manager/overrides/resque/worker.rb +291 -0
- data/lib/resque_manager/overrides/resque_scheduler/resque_scheduler.rb +58 -0
- data/lib/resque_manager/overrides/resque_status/chained_status.rb +46 -0
- data/lib/resque_manager/overrides/resque_status/hash.rb +12 -0
- data/lib/resque_manager/overrides/resque_status/status.rb +161 -0
- data/lib/resque_manager/recipes.rb +185 -0
- data/lib/resque_manager/version.rb +3 -0
- data/lib/resque_manager.rb +47 -0
- data/lib/tasks/failure.rake +8 -0
- data/lib/tasks/scheduler.rake +11 -0
- data/lib/tasks/worker.rake +129 -0
- data/test/dummy/README.rdoc +261 -0
- data/test/dummy/Rakefile +7 -0
- data/test/dummy/app/assets/javascripts/application.js +15 -0
- data/test/dummy/app/assets/stylesheets/application.css +13 -0
- data/test/dummy/app/controllers/application_controller.rb +3 -0
- data/test/dummy/app/helpers/application_helper.rb +2 -0
- data/test/dummy/app/views/layouts/application.html.erb +14 -0
- data/test/dummy/config/application.rb +65 -0
- data/test/dummy/config/boot.rb +10 -0
- data/test/dummy/config/environment.rb +5 -0
- data/test/dummy/config/environments/development.rb +37 -0
- data/test/dummy/config/environments/production.rb +67 -0
- data/test/dummy/config/environments/test.rb +37 -0
- data/test/dummy/config/initializers/backtrace_silencers.rb +7 -0
- data/test/dummy/config/initializers/inflections.rb +15 -0
- data/test/dummy/config/initializers/mime_types.rb +5 -0
- data/test/dummy/config/initializers/secret_token.rb +7 -0
- data/test/dummy/config/initializers/session_store.rb +8 -0
- data/test/dummy/config/initializers/wrap_parameters.rb +14 -0
- data/test/dummy/config/locales/en.yml +5 -0
- data/test/dummy/config/routes.rb +4 -0
- data/test/dummy/config.ru +4 -0
- data/test/dummy/log/development.log +5045 -0
- data/test/dummy/public/404.html +26 -0
- data/test/dummy/public/422.html +26 -0
- data/test/dummy/public/500.html +25 -0
- data/test/dummy/public/favicon.ico +0 -0
- data/test/dummy/script/rails +6 -0
- data/test/dummy/tmp/cache/assets/C2A/A10/sprockets%2Fb2e622954654f415590723e9b882063e +0 -0
- data/test/dummy/tmp/cache/assets/C60/1D0/sprockets%2F8ed12e4193473760f95b973567a8c206 +0 -0
- data/test/dummy/tmp/cache/assets/CA1/970/sprockets%2Fc387148880e015d1eab0dc838b326022 +0 -0
- data/test/dummy/tmp/cache/assets/CAE/930/sprockets%2Fe227278d3c65d8aa1159da720263f771 +0 -0
- data/test/dummy/tmp/cache/assets/CD8/370/sprockets%2F357970feca3ac29060c1e3861e2c0953 +0 -0
- data/test/dummy/tmp/cache/assets/CDC/E30/sprockets%2Fe1207380d69eeee3284e02636c26f24a +0 -0
- data/test/dummy/tmp/cache/assets/CF1/720/sprockets%2Fd91a5918f5aa43a43c8135a67c78e989 +0 -0
- data/test/dummy/tmp/cache/assets/D0E/820/sprockets%2F00c6cc9dc46bf64347b3775d7d15541b +0 -0
- data/test/dummy/tmp/cache/assets/D16/180/sprockets%2F73d6fa09352cb76ac81e1683e832b93f +0 -0
- data/test/dummy/tmp/cache/assets/D27/170/sprockets%2Fec164819553e2e5b28f1efc9bd970978 +0 -0
- data/test/dummy/tmp/cache/assets/D2B/DA0/sprockets%2F989465d3ea8575dd0b54981a9e8add38 +0 -0
- data/test/dummy/tmp/cache/assets/D32/A10/sprockets%2F13fe41fee1fe35b49d145bcc06610705 +0 -0
- data/test/dummy/tmp/cache/assets/D37/1F0/sprockets%2F97119b908ebed2633edfd00ac90d9011 +0 -0
- data/test/dummy/tmp/cache/assets/D38/FB0/sprockets%2F74e5ba1cca7a1470d53c54fb60368b78 +0 -0
- data/test/dummy/tmp/cache/assets/D42/4E0/sprockets%2F0fa6e3c14356aa527d68a8d56fa37f28 +0 -0
- data/test/dummy/tmp/cache/assets/D43/C20/sprockets%2F1efd074fd1074b3dc88145b480ff961f +0 -0
- data/test/dummy/tmp/cache/assets/D46/CD0/sprockets%2F67f1ef70e7ede542318b8d55e25b16c3 +0 -0
- data/test/dummy/tmp/cache/assets/D4E/1B0/sprockets%2Ff7cbd26ba1d28d48de824f0e94586655 +0 -0
- data/test/dummy/tmp/cache/assets/D5A/EA0/sprockets%2Fd771ace226fc8215a3572e0aa35bb0d6 +0 -0
- data/test/dummy/tmp/cache/assets/D68/080/sprockets%2Fa26f2ae225aa4b87c462d540c7cf43f9 +0 -0
- data/test/dummy/tmp/cache/assets/D9A/B20/sprockets%2F0eddc19d46318e2e286cc171ae4cc73e +0 -0
- data/test/dummy/tmp/cache/assets/DA4/900/sprockets%2F515bf984438c6ec4b8a515fcc13baf8e +0 -0
- data/test/dummy/tmp/cache/assets/DBD/070/sprockets%2F60ffef45ddefd5c7746d17977fff0717 +0 -0
- data/test/dummy/tmp/cache/assets/DD7/AC0/sprockets%2Fc7c983c5c607dbfdb726eecc36146ca9 +0 -0
- data/test/dummy/tmp/cache/assets/DDC/400/sprockets%2Fcffd775d018f68ce5dba1ee0d951a994 +0 -0
- data/test/dummy/tmp/cache/assets/DF5/480/sprockets%2Fea4f3c726fc1046cad1ad243faf84e7d +0 -0
- data/test/dummy/tmp/cache/assets/E04/890/sprockets%2F2f5173deea6c795b8fdde723bb4b63af +0 -0
- data/test/dummy/tmp/cache/assets/E2B/7A0/sprockets%2Fd44ef07be0aa6d5b5dea4d37d7f72b4f +0 -0
- data/test/functional/resque_manager/resque_controller_test.rb +9 -0
- data/test/integration/navigation_test.rb +10 -0
- data/test/resque_manager_test.rb +7 -0
- data/test/test_helper.rb +15 -0
- data/test/unit/helpers/resque_manager/resque_helper_test.rb +6 -0
- metadata +307 -0
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
require 'socket'
|
|
2
|
+
|
|
3
|
+
module Resque
|
|
4
|
+
class Worker
|
|
5
|
+
@@local_ip = nil
|
|
6
|
+
|
|
7
|
+
def local_ip
|
|
8
|
+
@@local_ip ||= begin
|
|
9
|
+
UDPSocket.open do |s|
|
|
10
|
+
s.connect 'google.com', 1
|
|
11
|
+
s.addr.last
|
|
12
|
+
end
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
# The string representation is the same as the id for this worker
|
|
17
|
+
# instance. Can be used with `Worker.find`.
|
|
18
|
+
def to_s
|
|
19
|
+
@to_s || "#{hostname}(#{local_ip}):#{Process.pid}:#{Thread.current.object_id}:#{Thread.current[:path]}:#{Thread.current[:queues]}"
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
alias_method :id, :to_s
|
|
23
|
+
|
|
24
|
+
# When the worker gets the -USR2 signal, to_s may give a different value for the thread and queue portion
|
|
25
|
+
def pause_key
|
|
26
|
+
key = to_s.split(':')
|
|
27
|
+
"worker:#{key.first}:#{key.second}:all_workers:paused"
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def pid
|
|
31
|
+
to_s.split(':').second
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def thread
|
|
35
|
+
to_s.split(':').third
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def path
|
|
39
|
+
to_s.split(':').fourth
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def queue
|
|
43
|
+
to_s.split(':').last
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
def workers_in_pid
|
|
47
|
+
Array(Resque.redis.smembers(:workers)).select { |id| id =~ /\(#{ip}\):#{pid}/ }.map { |id| Resque::Worker.find(id) }.compact
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def ip
|
|
51
|
+
to_s.split(':').first[/\b(?:\d{1,3}\.){3}\d{1,3}\b/]
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def queues_in_pid
|
|
55
|
+
workers_in_pid.collect { |w| w.queue }
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
#OVERRIDE for multithreaded workers
|
|
59
|
+
def queues
|
|
60
|
+
Thread.current[:queues] == "*" ? Resque.queues.sort : Thread.current[:queues].split(',')
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
# Runs all the methods needed when a worker begins its lifecycle.
|
|
64
|
+
#OVERRIDE for multithreaded workers
|
|
65
|
+
def startup
|
|
66
|
+
enable_gc_optimizations
|
|
67
|
+
if Thread.current == Thread.main
|
|
68
|
+
register_signal_handlers
|
|
69
|
+
prune_dead_workers
|
|
70
|
+
end
|
|
71
|
+
run_hook :before_first_fork
|
|
72
|
+
register_worker
|
|
73
|
+
|
|
74
|
+
# Fix buffering so we can `rake resque:work > resque.log` and
|
|
75
|
+
# get output from the child in there.
|
|
76
|
+
$stdout.sync = true
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
# Schedule this worker for shutdown. Will finish processing the
|
|
80
|
+
# current job.
|
|
81
|
+
#OVERRIDE for multithreaded workers
|
|
82
|
+
def shutdown
|
|
83
|
+
log 'Exiting...'
|
|
84
|
+
Thread.list.each { |t| t[:shutdown] = true }
|
|
85
|
+
@shutdown = true
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def paused
|
|
89
|
+
Resque.redis.get pause_key
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
# are we paused?
|
|
93
|
+
# OVERRIDE so UI can tell if we're paused
|
|
94
|
+
def paused?
|
|
95
|
+
@paused || paused.present?
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
# Stop processing jobs after the current one has completed (if we're
|
|
99
|
+
# currently running one).
|
|
100
|
+
#OVERRIDE to set a redis key so UI knows it's paused too
|
|
101
|
+
# Would prefer to call super but get no superclass method error
|
|
102
|
+
def pause_processing
|
|
103
|
+
log "USR2 received; pausing job processing"
|
|
104
|
+
@paused = true
|
|
105
|
+
Resque.redis.set(pause_key, Time.now.to_s)
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
# Start processing jobs again after a pause
|
|
109
|
+
#OVERRIDE to set remove redis key so UI knows it's unpaused too
|
|
110
|
+
# Would prefer to call super but get no superclass method error
|
|
111
|
+
def unpause_processing
|
|
112
|
+
log "CONT received; resuming job processing"
|
|
113
|
+
@paused = false
|
|
114
|
+
Resque.redis.del(pause_key)
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
# Looks for any workers which should be running on this server
|
|
118
|
+
# and, if they're not, removes them from Redis.
|
|
119
|
+
#
|
|
120
|
+
# This is a form of garbage collection. If a server is killed by a
|
|
121
|
+
# hard shutdown, power failure, or something else beyond our
|
|
122
|
+
# control, the Resque workers will not die gracefully and therefor
|
|
123
|
+
# will leave stale state information in Redis.
|
|
124
|
+
#
|
|
125
|
+
# By checking the current Redis state against the actual
|
|
126
|
+
# environment, we can determine if Redis is old and clean it up a bit.
|
|
127
|
+
def prune_dead_workers
|
|
128
|
+
Worker.all.each do |worker|
|
|
129
|
+
host, pid, thread, path, queues = worker.id.split(':')
|
|
130
|
+
next unless host.include?(hostname)
|
|
131
|
+
next if worker_pids.include?(pid)
|
|
132
|
+
log! "Pruning dead worker: #{worker}"
|
|
133
|
+
worker.unregister_worker
|
|
134
|
+
end
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
# Unregisters ourself as a worker. Useful when shutting down.
|
|
138
|
+
# OVERRIDE to also remove the pause key
|
|
139
|
+
# Would prefer to call super but get no superclass method error
|
|
140
|
+
def unregister_worker_with_pause(exception = nil)
|
|
141
|
+
unregister_worker_without_pause(exception)
|
|
142
|
+
|
|
143
|
+
Resque.redis.del(pause_key)
|
|
144
|
+
end
|
|
145
|
+
alias_method_chain :unregister_worker, :pause
|
|
146
|
+
|
|
147
|
+
def all_workers_in_pid_working
|
|
148
|
+
workers_in_pid.select { |w| (hash = w.processing) && !hash.empty? }
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
# This is the main workhorse method. Called on a Worker instance,
|
|
152
|
+
# it begins the worker life cycle.
|
|
153
|
+
#
|
|
154
|
+
# The following events occur during a worker's life cycle:
|
|
155
|
+
#
|
|
156
|
+
# 1. Startup: Signals are registered, dead workers are pruned,
|
|
157
|
+
# and this worker is registered.
|
|
158
|
+
# 2. Work loop: Jobs are pulled from a queue and processed.
|
|
159
|
+
# 3. Teardown: This worker is unregistered.
|
|
160
|
+
#
|
|
161
|
+
# Can be passed an integer representing the polling frequency.
|
|
162
|
+
# The default is 5 seconds, but for a semi-active site you may
|
|
163
|
+
# want to use a smaller value.
|
|
164
|
+
#
|
|
165
|
+
# Also accepts a block which will be passed the job as soon as it
|
|
166
|
+
# has completed processing. Useful for testing.
|
|
167
|
+
#OVERRIDE for multithreaded workers
|
|
168
|
+
def work(interval = 5.0, &block)
|
|
169
|
+
interval = Float(interval)
|
|
170
|
+
$0 = "resque: Starting"
|
|
171
|
+
startup
|
|
172
|
+
|
|
173
|
+
loop do
|
|
174
|
+
break if shutdown? || Thread.current[:shutdown]
|
|
175
|
+
|
|
176
|
+
if not paused? and job = reserve
|
|
177
|
+
log "got: #{job.inspect}"
|
|
178
|
+
job.worker = self
|
|
179
|
+
working_on job
|
|
180
|
+
|
|
181
|
+
procline "Processing #{job.queue} since #{Time.now.to_i} [#{job.payload_class}]"
|
|
182
|
+
if @child = fork(job) do
|
|
183
|
+
unregister_signal_handlers if term_child
|
|
184
|
+
reconnect
|
|
185
|
+
perform(job, &block)
|
|
186
|
+
exit! unless run_at_exit_hooks
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
srand # Reseeding
|
|
190
|
+
procline "Forked #{@child} at #{Time.now.to_i}"
|
|
191
|
+
begin
|
|
192
|
+
Process.waitpid(@child)
|
|
193
|
+
rescue SystemCallError
|
|
194
|
+
nil
|
|
195
|
+
end
|
|
196
|
+
job.fail(DirtyExit.new($?.to_s)) if $?.signaled?
|
|
197
|
+
else
|
|
198
|
+
reconnect
|
|
199
|
+
perform(job, &block)
|
|
200
|
+
end
|
|
201
|
+
done_working
|
|
202
|
+
@child = nil
|
|
203
|
+
else
|
|
204
|
+
break if interval.zero?
|
|
205
|
+
log! "Sleeping for #{interval} seconds"
|
|
206
|
+
procline paused? ? "Paused" : "Waiting for #{@queues.join(',')}"
|
|
207
|
+
sleep interval
|
|
208
|
+
end
|
|
209
|
+
end
|
|
210
|
+
|
|
211
|
+
unregister_worker
|
|
212
|
+
loop do
|
|
213
|
+
#hang onto the process until all threads are done
|
|
214
|
+
break if all_workers_in_pid_working.blank?
|
|
215
|
+
sleep interval.to_i
|
|
216
|
+
end
|
|
217
|
+
rescue Exception => exception
|
|
218
|
+
log "Failed to start worker : #{exception.inspect}"
|
|
219
|
+
|
|
220
|
+
unregister_worker(exception)
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
# logic for mappged_mget changed where it returns keys with nil values in latest redis gem.
|
|
224
|
+
def self.working
|
|
225
|
+
names = all
|
|
226
|
+
return [] unless names.any?
|
|
227
|
+
names.map! { |name| "worker:#{name}" }
|
|
228
|
+
Resque.redis.mapped_mget(*names).map do |key, value|
|
|
229
|
+
find key.sub("worker:", '') unless value.nil?
|
|
230
|
+
end.compact
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
def overview_message=(message)
|
|
234
|
+
data = encode(job.merge('overview_message' => message))
|
|
235
|
+
Resque.redis.set("worker:#{self}", data)
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
def overview_message
|
|
239
|
+
job['overview_message']
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
def self.start(options)
|
|
243
|
+
ips = options[:hosts]
|
|
244
|
+
application_path = options[:application_path]
|
|
245
|
+
queues = options[:queues]
|
|
246
|
+
if Rails.env =~ /development|test/
|
|
247
|
+
Thread.new(application_path, queues) { |application_path, queue| system("cd #{application_path || '.'}; bundle exec #{ResqueManager.resque_worker_rake || 'rake'} RAILS_ENV=#{Rails.env} QUEUE=#{queue} resque:work") }
|
|
248
|
+
else
|
|
249
|
+
Thread.new(ips, application_path, queues) { |ip_list, application_path, queue| system("cd #{Rails.root}; bundle exec cap #{Rails.env} resque:work host=#{ip_list} application_path=#{application_path} queue=#{queue}") }
|
|
250
|
+
end
|
|
251
|
+
end
|
|
252
|
+
|
|
253
|
+
def quit
|
|
254
|
+
if Rails.env =~ /development|test/
|
|
255
|
+
if RUBY_PLATFORM =~ /java/
|
|
256
|
+
#jruby doesn't trap the -QUIT signal
|
|
257
|
+
#-TERM gracefully kills the main pid and does a -9 on the child if there is one.
|
|
258
|
+
#Since jruby doesn't fork a child, the main worker is gracefully killed.
|
|
259
|
+
system("kill -TERM #{self.pid}")
|
|
260
|
+
else
|
|
261
|
+
system("kill -QUIT #{self.pid}")
|
|
262
|
+
end
|
|
263
|
+
else
|
|
264
|
+
system("cd #{Rails.root}; bundle exec cap #{Rails.env} resque:quit_worker pid=#{self.pid} host=#{self.ip} application_path=#{self.path}")
|
|
265
|
+
end
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
def pause
|
|
269
|
+
if Rails.env =~ /development|test/
|
|
270
|
+
system("kill -USR2 #{self.pid}")
|
|
271
|
+
else
|
|
272
|
+
system("cd #{Rails.root}; bundle exec cap #{Rails.env} resque:pause_worker pid=#{self.pid} host=#{self.ip}")
|
|
273
|
+
end
|
|
274
|
+
end
|
|
275
|
+
|
|
276
|
+
def continue
|
|
277
|
+
if Rails.env =~ /development|test/
|
|
278
|
+
system("kill -CONT #{self.pid}")
|
|
279
|
+
else
|
|
280
|
+
system("cd #{Rails.root}; bundle exec cap #{Rails.env} resque:continue_worker pid=#{self.pid} host=#{self.ip}")
|
|
281
|
+
end
|
|
282
|
+
end
|
|
283
|
+
|
|
284
|
+
def restart
|
|
285
|
+
queues = self.queues_in_pid.join('#')
|
|
286
|
+
quit
|
|
287
|
+
self.class.start(hosts: self.ip, queues: queues, application_path: self.path)
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
end
|
|
291
|
+
end
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
module ResqueScheduler
|
|
2
|
+
def schedule=(schedule_hash)
|
|
3
|
+
raise 'not implemented'
|
|
4
|
+
end
|
|
5
|
+
|
|
6
|
+
# Returns the schedule hash
|
|
7
|
+
def schedule
|
|
8
|
+
#the scheduler gem expects a hash, but it's now stored in
|
|
9
|
+
#redis as an array.
|
|
10
|
+
hash = {}
|
|
11
|
+
Resque.list_range(:scheduled, 0, -0).each do |job|
|
|
12
|
+
hash.merge! job
|
|
13
|
+
end
|
|
14
|
+
hash
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def self.start(ips)
|
|
18
|
+
if Rails.env =~ /development|test/
|
|
19
|
+
Thread.new{system("rake resque:scheduler")}
|
|
20
|
+
else
|
|
21
|
+
Thread.new(ips){|ip_list|system("cd #{Rails.root}; #{ResqueManager::Cap.path} #{Rails.env} resque:scheduler host=#{ip_list}")}
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def self.quit(ips)
|
|
26
|
+
if Rails.env =~ /development|test/
|
|
27
|
+
system("rake resque:quit_scheduler")
|
|
28
|
+
else
|
|
29
|
+
system("cd #{Rails.root}; #{ResqueManager::Cap.path} #{Rails.env} resque:quit_scheduler host=#{ips}")
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def self.restart(ips)
|
|
34
|
+
quit(ips)
|
|
35
|
+
start(ips)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def self.farm_status
|
|
39
|
+
status = {}
|
|
40
|
+
if Rails.env =~ /development|test/
|
|
41
|
+
status['localhost'] = pids.present? ? 'Running' : 'Stopped'
|
|
42
|
+
else
|
|
43
|
+
Resque.schedule.values.collect{|job| job['ip']}.each do |ip|
|
|
44
|
+
cap = `cd #{Rails.root}; #{ResqueManager::Cap.path} #{Rails.env} resque:scheduler_status hosts=#{ip}`
|
|
45
|
+
status[ip] = cap =~ /resque:scheduler is up/ ? 'Running' : 'Stopped'
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
status
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Returns an array of string pids of all the other workers on this
|
|
52
|
+
# machine. Useful when pruning dead workers on startup.
|
|
53
|
+
def self.pids
|
|
54
|
+
`ps -A -o pid,command | grep [r]esque:scheduler`.split("\n").map do |line|
|
|
55
|
+
line.split(' ')[0]
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
end
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
module Resque
|
|
2
|
+
module Plugins
|
|
3
|
+
module ChainedStatus
|
|
4
|
+
|
|
5
|
+
def self.included(base)
|
|
6
|
+
base.class_eval do
|
|
7
|
+
include Resque::Plugins::Status
|
|
8
|
+
extend ClassOverrides
|
|
9
|
+
include InstanceOverrides
|
|
10
|
+
end
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
module InstanceOverrides
|
|
14
|
+
# OVERRIDE to just use the name of it's parent job.
|
|
15
|
+
def name
|
|
16
|
+
status.name rescue nil
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def completed(*messages)
|
|
20
|
+
super(*messages)
|
|
21
|
+
# "You must override this method to provide your own logic of when to actually call complete."
|
|
22
|
+
# if counter(:processed) >= options['total']
|
|
23
|
+
# super
|
|
24
|
+
# end
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
module ClassOverrides
|
|
29
|
+
# OVERRIDE to grab the uuid out of options so it can be chained to the calling worker
|
|
30
|
+
# instead of creating a new uuid.
|
|
31
|
+
def enqueue_to(queue, klass, options = {})
|
|
32
|
+
#tie this job to the status of the calling job
|
|
33
|
+
opts = HashWithIndifferentAccess.new(options)
|
|
34
|
+
raise ArgumentError, "You must supply a :uuid attribute in your call to create." unless opts['uuid']
|
|
35
|
+
uuid = opts['uuid']
|
|
36
|
+
if Resque.enqueue_to(queue, klass, uuid, options)
|
|
37
|
+
uuid
|
|
38
|
+
else
|
|
39
|
+
Resque::Plugins::Status::Hash.remove(uuid)
|
|
40
|
+
nil
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
module Resque
|
|
2
|
+
module Plugins
|
|
3
|
+
module Status
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
#OVERRIDE so we can add OverridesAndExtensionsClassMethods
|
|
7
|
+
def self.included(base)
|
|
8
|
+
attr_reader :worker
|
|
9
|
+
|
|
10
|
+
# can't call super, so add ClassMethods here that resque-status was doing
|
|
11
|
+
base.extend(ClassMethods) #add the methods in the resque-status gem
|
|
12
|
+
base.extend(ClassOverridesAndExtensions)
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
module ClassOverridesAndExtensions
|
|
16
|
+
|
|
17
|
+
#OVERRIDE to set the name that will be displayed on the status page for this job when it is first queued.
|
|
18
|
+
#The name will be changed when set_status is called, which is called on #tick, to the value set in your name method,
|
|
19
|
+
#but the UI name field is blank when it is first queued, so setting it here so we have something.
|
|
20
|
+
def enqueue_to(queue, klass, options = {})
|
|
21
|
+
uuid = Resque::Plugins::Status::Hash.generate_uuid
|
|
22
|
+
Resque::Plugins::Status::Hash.create uuid, {name: "#{self.name}: #{options.inspect}"}.merge(options)
|
|
23
|
+
|
|
24
|
+
if Resque.enqueue_to(queue, klass, uuid, options)
|
|
25
|
+
uuid
|
|
26
|
+
else
|
|
27
|
+
Resque::Plugins::Status::Hash.remove(uuid)
|
|
28
|
+
nil
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# This is the method called by Resque::Worker when processing jobs. It
|
|
33
|
+
# creates a new instance of the job class and populates it with the uuid and
|
|
34
|
+
# options.
|
|
35
|
+
#
|
|
36
|
+
# You should not override this method, rather the <tt>perform</tt> instance method.
|
|
37
|
+
# OVERRIDE to get the worker and set when initializing the class
|
|
38
|
+
def perform(uuid=nil, options = {})
|
|
39
|
+
uuid ||= Resque::Plugins::Status::Hash.generate_uuid
|
|
40
|
+
worker = yield if block_given?
|
|
41
|
+
instance = new(uuid, worker, options)
|
|
42
|
+
instance.safe_perform!
|
|
43
|
+
instance
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
# OVERRIDE to clear all the keys that have the UUI. status, counters, etc.
|
|
47
|
+
def remove(uuid)
|
|
48
|
+
Resque.redis.zrem(set_key, uuid)
|
|
49
|
+
Resque.redis.keys("*#{uuid}").each do |key|
|
|
50
|
+
Resque.redis.del(key)
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
#If multiple workers are running at once and you need an incrementer, you can't use the status' num attribute because of race conditions.
|
|
55
|
+
#You can use a counter and call incr on it instead
|
|
56
|
+
def counter_key(counter, uuid)
|
|
57
|
+
"#{counter}:#{uuid}"
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def counter(counter, uuid)
|
|
61
|
+
Resque.redis[counter_key(counter, uuid)].to_i
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def incr_counter(counter, uuid)
|
|
65
|
+
key = counter_key(counter, uuid)
|
|
66
|
+
n = Resque.redis.incr(key)
|
|
67
|
+
if Resque::Plugins::Status::Hash.expire_in
|
|
68
|
+
Resque.redis.expire(key, Resque::Plugins::Status::Hash.expire_in)
|
|
69
|
+
end
|
|
70
|
+
n
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
# sets the status of the job for the current iteration. You should use
|
|
75
|
+
# the <tt>at</tt> method if you have actual numbers to track the iteration count.
|
|
76
|
+
# This will kill the job if it has been added to the kill list with
|
|
77
|
+
# <tt>Resque::Status.kill()</tt>
|
|
78
|
+
def tick(*messages)
|
|
79
|
+
kill! if should_kill? || status.killed?
|
|
80
|
+
set_status({'status' => 'working'}, *messages)
|
|
81
|
+
# check to see if the worker doing the job has been paused, pause the job if so
|
|
82
|
+
if self.worker && self.worker.paused?
|
|
83
|
+
loop do
|
|
84
|
+
# Set the status to paused.
|
|
85
|
+
# May need to do this repeatedly because there could be workers in a chained job still doing work.
|
|
86
|
+
pause! unless status.paused?
|
|
87
|
+
break unless self.worker.paused?
|
|
88
|
+
sleep 60
|
|
89
|
+
end
|
|
90
|
+
set_status({'status' => 'working'}, *messages) unless status && (status.completed? || status.paused? || status.killed?)
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
# Pause the current job, setting the status to 'paused'
|
|
95
|
+
def pause!
|
|
96
|
+
set_status({
|
|
97
|
+
'status' => 'paused',
|
|
98
|
+
'message' => "#{worker} paused at #{Time.now}"
|
|
99
|
+
})
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
# Create a new instance with <tt>uuid</tt> and <tt>options</tt>
|
|
103
|
+
# OVERRIDE to add the worker attr
|
|
104
|
+
def initialize(uuid, worker = nil, options = {})
|
|
105
|
+
@uuid = uuid
|
|
106
|
+
@options = options
|
|
107
|
+
@worker = worker
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
# Run by the Resque::Worker when processing this job. It wraps the <tt>perform</tt>
|
|
111
|
+
# method ensuring that the final status of the job is set regardless of error.
|
|
112
|
+
# If an error occurs within the job's work, it will set the status as failed and
|
|
113
|
+
# re-raise the error.
|
|
114
|
+
#OVERRIDE to kill it. The parent job may have been killed, so all child jobs should die as well.
|
|
115
|
+
def safe_perform!
|
|
116
|
+
k = should_kill?
|
|
117
|
+
kill! if k
|
|
118
|
+
unless k || (status && status.killed?)
|
|
119
|
+
set_status({'status' => 'working'})
|
|
120
|
+
perform
|
|
121
|
+
if status && status.failed?
|
|
122
|
+
on_failure(status.message) if respond_to?(:on_failure)
|
|
123
|
+
return
|
|
124
|
+
elsif status && !status.completed?
|
|
125
|
+
completed
|
|
126
|
+
end
|
|
127
|
+
on_success if respond_to?(:on_success)
|
|
128
|
+
end
|
|
129
|
+
rescue Killed
|
|
130
|
+
Rails.logger.info "Job #{self} Killed at #{Time.now}"
|
|
131
|
+
Resque::Plugins::Status::Hash.killed(uuid)
|
|
132
|
+
on_killed if respond_to?(:on_killed)
|
|
133
|
+
rescue => e
|
|
134
|
+
Rails.logger.error e
|
|
135
|
+
failed("The task failed because of an error: #{e}")
|
|
136
|
+
if respond_to?(:on_failure)
|
|
137
|
+
on_failure(e)
|
|
138
|
+
else
|
|
139
|
+
raise e
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
# sets a message for the job on the overview page
|
|
144
|
+
# it can be set repeatedly durring the job's processing to
|
|
145
|
+
# indicate the status of the job.
|
|
146
|
+
def overview_message=(message)
|
|
147
|
+
# there is no worker when run inline
|
|
148
|
+
self.worker.overview_message = message if self.worker
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
def incr_counter(counter)
|
|
152
|
+
self.class.incr_counter(counter, uuid)
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
def counter(counter)
|
|
156
|
+
self.class.counter(counter, uuid)
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
end
|
|
160
|
+
end
|
|
161
|
+
end
|