resque_ui 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Gemfile +6 -0
- data/Gemfile.lock +102 -0
- data/History.txt +5 -0
- data/MIT-LICENSE +21 -0
- data/README.markdown +279 -0
- data/Rakefile +49 -0
- data/VERSION.yml +5 -0
- data/app/assets/images/idle.png +0 -0
- data/app/assets/images/poll.png +0 -0
- data/app/assets/images/working.png +0 -0
- data/app/assets/javascripts/resque/jquery-1.3.2.min.js +19 -0
- data/app/assets/javascripts/resque/jquery.relatize_date.js +95 -0
- data/app/assets/javascripts/resque/ranger.js +24 -0
- data/app/assets/stylesheets/resque/resque.css +93 -0
- data/app/assets/stylesheets/resque/resque_reset.css +48 -0
- data/app/controllers/resque_controller.rb +236 -0
- data/app/helpers/resque_helper.rb +107 -0
- data/app/views/layouts/resque.html.erb +39 -0
- data/app/views/resque/_key.html.erb +17 -0
- data/app/views/resque/_next_more.html.erb +10 -0
- data/app/views/resque/_queues.html.erb +52 -0
- data/app/views/resque/_status_styles.erb +98 -0
- data/app/views/resque/_workers.html.erb +110 -0
- data/app/views/resque/_working.html.erb +69 -0
- data/app/views/resque/delayed.html.erb +35 -0
- data/app/views/resque/delayed_timestamp.html.erb +26 -0
- data/app/views/resque/error.erb +1 -0
- data/app/views/resque/failed.html.erb +54 -0
- data/app/views/resque/overview.html.erb +4 -0
- data/app/views/resque/schedule.html.erb +96 -0
- data/app/views/resque/stats.html.erb +62 -0
- data/app/views/resque/status.html.erb +57 -0
- data/app/views/resque/statuses.html.erb +72 -0
- data/app/views/resque/workers.html.erb +1 -0
- data/lib/resque_ui/cap.rb +6 -0
- data/lib/resque_ui/cap_recipes.rb +106 -0
- data/lib/resque_ui/overrides/resque/failure/failure.rb +22 -0
- data/lib/resque_ui/overrides/resque/job.rb +12 -0
- data/lib/resque_ui/overrides/resque/resque.rb +8 -0
- data/lib/resque_ui/overrides/resque/worker.rb +230 -0
- data/lib/resque_ui/overrides/resque_scheduler/resque_scheduler.rb +58 -0
- data/lib/resque_ui/overrides/resque_status/chained_job_with_status.rb +24 -0
- data/lib/resque_ui/overrides/resque_status/job_with_status.rb +59 -0
- data/lib/resque_ui/overrides/resque_status/status.rb +53 -0
- data/lib/resque_ui.rb +26 -0
- data/lib/tasks/failure.rake +8 -0
- data/lib/tasks/scheduler.rake +11 -0
- data/lib/tasks/worker.rake +80 -0
- data/rdoc/Resque/ChainedJobWithStatus.html +284 -0
- data/rdoc/Resque/Failure/Base.html +229 -0
- data/rdoc/Resque/Failure.html +202 -0
- data/rdoc/Resque/Job.html +202 -0
- data/rdoc/Resque/JobWithStatus.html +410 -0
- data/rdoc/Resque/Status.html +368 -0
- data/rdoc/Resque/Worker.html +1104 -0
- data/rdoc/Resque.html +232 -0
- data/rdoc/ResqueScheduler.html +434 -0
- data/rdoc/ResqueUi/Cap.html +150 -0
- data/rdoc/ResqueUi/Engine.html +150 -0
- data/rdoc/ResqueUi.html +157 -0
- data/rdoc/created.rid +13 -0
- data/rdoc/images/brick.png +0 -0
- data/rdoc/images/brick_link.png +0 -0
- data/rdoc/images/bug.png +0 -0
- data/rdoc/images/bullet_black.png +0 -0
- data/rdoc/images/bullet_toggle_minus.png +0 -0
- data/rdoc/images/bullet_toggle_plus.png +0 -0
- data/rdoc/images/date.png +0 -0
- data/rdoc/images/find.png +0 -0
- data/rdoc/images/loadingAnimation.gif +0 -0
- data/rdoc/images/macFFBgHack.png +0 -0
- data/rdoc/images/package.png +0 -0
- data/rdoc/images/page_green.png +0 -0
- data/rdoc/images/page_white_text.png +0 -0
- data/rdoc/images/page_white_width.png +0 -0
- data/rdoc/images/plugin.png +0 -0
- data/rdoc/images/ruby.png +0 -0
- data/rdoc/images/tag_green.png +0 -0
- data/rdoc/images/wrench.png +0 -0
- data/rdoc/images/wrench_orange.png +0 -0
- data/rdoc/images/zoom.png +0 -0
- data/rdoc/index.html +163 -0
- data/rdoc/js/darkfish.js +116 -0
- data/rdoc/js/jquery.js +32 -0
- data/rdoc/js/quicksearch.js +114 -0
- data/rdoc/js/thickbox-compressed.js +10 -0
- data/rdoc/lib/resque_overrides_rb.html +54 -0
- data/rdoc/lib/resque_scheduler_overrides_rb.html +52 -0
- data/rdoc/lib/resque_status_overrides_rb.html +52 -0
- data/rdoc/lib/resque_ui/cap_rb.html +52 -0
- data/rdoc/lib/resque_ui/cap_recipes_rb.html +58 -0
- data/rdoc/lib/resque_ui/engine_rb.html +52 -0
- data/rdoc/lib/resque_ui/overrides/resque/failure/failure_rb.html +54 -0
- data/rdoc/lib/resque_ui/overrides/resque/job_rb.html +52 -0
- data/rdoc/lib/resque_ui/overrides/resque/resque_rb.html +52 -0
- data/rdoc/lib/resque_ui/overrides/resque/worker_rb.html +54 -0
- data/rdoc/lib/resque_ui/overrides/resque_scheduler/resque_scheduler_rb.html +52 -0
- data/rdoc/lib/resque_ui/overrides/resque_status/chained_job_with_status_rb.html +52 -0
- data/rdoc/lib/resque_ui/overrides/resque_status/job_with_status_rb.html +52 -0
- data/rdoc/lib/resque_ui/overrides/resque_status/status_rb.html +52 -0
- data/rdoc/lib/resque_ui/resque_ui_rb.html +52 -0
- data/rdoc/lib/resque_ui/tasks_rb.html +64 -0
- data/rdoc/lib/resque_ui_rb.html +76 -0
- data/rdoc/rdoc.css +763 -0
- data/resque_ui.gemspec +153 -0
- data/test/resque_ui_test.rb +8 -0
- data/test/test_helper.rb +3 -0
- metadata +205 -0
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
<%= render(:partial => 'status_styles') %>
|
|
2
|
+
|
|
3
|
+
<h1 class='wi'>Statuses</h1>
|
|
4
|
+
<%unless @statuses.empty?%>
|
|
5
|
+
<form method="POST" action="clear_statuses" class='clear-failed'>
|
|
6
|
+
<input type='submit' name='' value='Clear Statuses' />
|
|
7
|
+
</form>
|
|
8
|
+
<%end%>
|
|
9
|
+
<p class='intro'>These are recent jobs created with the JobWithStatus class</p>
|
|
10
|
+
<p class='sub'>Showing <%=@start%> to <%= @start + 20 %> of <b><%= @size %></b> statuses</p>
|
|
11
|
+
<table>
|
|
12
|
+
<tr>
|
|
13
|
+
<th>ID</th>
|
|
14
|
+
<th style="width:40%">Name</th>
|
|
15
|
+
<th>Status</th>
|
|
16
|
+
<th>Last Updated</th>
|
|
17
|
+
<th class="progress">% Complete</th>
|
|
18
|
+
<th>Message</th>
|
|
19
|
+
<th>Kill</th>
|
|
20
|
+
</tr>
|
|
21
|
+
<% unless @statuses.empty? %>
|
|
22
|
+
<% @statuses.each do |status| %>
|
|
23
|
+
<tr>
|
|
24
|
+
<td><%= link_to(status.uuid, {:action => 'status', :id => status.uuid}) %></td>
|
|
25
|
+
<td><%= status.name %></td>
|
|
26
|
+
<td class="status status-<%= status.status %>"><%= status.status %></td>
|
|
27
|
+
<td class="time"><%= format_time(Time.zone.parse(status.time.to_s)) %></td>
|
|
28
|
+
<td class="progress">
|
|
29
|
+
<div class="progress-bar" style="width:<%= status.pct_complete %>%"> </div>
|
|
30
|
+
<div class="progress-pct"><%= status.pct_complete ? "#{status.pct_complete}%" : '' %></div>
|
|
31
|
+
</td>
|
|
32
|
+
<td><%= status.message.html_safe %></td>
|
|
33
|
+
<td><% if status.killable? %><%= link_to('Kill', {:action => :kill, :id => status.uuid}, :class => 'kill') %><% end %></td>
|
|
34
|
+
</tr>
|
|
35
|
+
<% end %>
|
|
36
|
+
<% else %>
|
|
37
|
+
<tr>
|
|
38
|
+
<td colspan="7" class='no-data'>No Statuses right now...</td>
|
|
39
|
+
</tr>
|
|
40
|
+
<% end %>
|
|
41
|
+
</table>
|
|
42
|
+
|
|
43
|
+
<% unless @statuses.empty? %>
|
|
44
|
+
<%= render(:partial => 'next_more', :locals => {:start => @start, :size => @size}) %>
|
|
45
|
+
<% end %>
|
|
46
|
+
|
|
47
|
+
<%= status_poll(@start) %>
|
|
48
|
+
|
|
49
|
+
<script type="text/javascript" charset="utf-8">
|
|
50
|
+
jQuery(function($) {
|
|
51
|
+
|
|
52
|
+
$('a.kill').click(function(e) {
|
|
53
|
+
e.preventDefault();
|
|
54
|
+
var $link = $(this),
|
|
55
|
+
url = $link.attr('href'),
|
|
56
|
+
confirmed = confirm("Are you sure you want to kill this job? There is only do or do not. There is no undo.");
|
|
57
|
+
if (confirmed) {
|
|
58
|
+
$link.animate({opacity: 0.5});
|
|
59
|
+
$.ajax({
|
|
60
|
+
url: url,
|
|
61
|
+
type: 'post',
|
|
62
|
+
success: function() {
|
|
63
|
+
$link.remove();
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
} else {
|
|
67
|
+
return false
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
});
|
|
72
|
+
</script>
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
<%= render(:partial => 'workers') %>
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# ====================================
|
|
2
|
+
# Resque TASKS
|
|
3
|
+
# ====================================
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
#You must set the path to your rake task in your deploy.rb file.
|
|
7
|
+
#ex.
|
|
8
|
+
# set :rake, "/opt/ruby-enterprise-1.8.6-20090421/bin/rake"
|
|
9
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
|
10
|
+
namespace :resque do
|
|
11
|
+
desc "start a resque worker. optional arg: host=ip queue=name"
|
|
12
|
+
task :work, :roles => :app do
|
|
13
|
+
default_run_options[:pty] = true
|
|
14
|
+
hosts = ENV['host'] || find_servers_for_task(current_task).collect { |s| s.host }
|
|
15
|
+
queue = ENV['queue'] || '*'
|
|
16
|
+
rake = fetch(:rake, "rake")
|
|
17
|
+
run("cd #{current_path}; nohup #{rake} RAILS_ENV=#{stage} QUEUE=#{queue} resque:work >> log/resque_worker.log 2>&1", :hosts => hosts)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
desc "Gracefully kill a worker. If the worker is working, it will finish before shutting down. arg: host=ip pid=pid"
|
|
21
|
+
task :quit_worker, :roles => :app do
|
|
22
|
+
if ENV['host'].nil? || ENV['host'].empty? || ENV['pid'].nil? || ENV['pid'].empty?
|
|
23
|
+
puts 'You must enter the host and pid to kill..cap resque:quit host=ip pid=pid'
|
|
24
|
+
else
|
|
25
|
+
hosts = ENV['host'] || find_servers_for_task(current_task).collect { |s| s.host }
|
|
26
|
+
run("kill -INT #{ENV['pid']}", :hosts => hosts)
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
desc "Gracefully kill all workers on all servers. If the worker is working, it will finish before shutting down."
|
|
31
|
+
task :quit_workers, :roles => :app, :only => {:resque_restart => true} do
|
|
32
|
+
default_run_options[:pty] = true
|
|
33
|
+
rake = fetch(:rake, "rake")
|
|
34
|
+
run("cd #{current_path}; #{rake} RAILS_ENV=#{stage} resque:quit_workers")
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
desc "Kill a rogue worker. If the worker is working, it will not finish and the job will go to the Failed queue as a DirtExit. arg: host=ip pid=pid"
|
|
38
|
+
task :kill_worker_with_impunity, :roles => :app do
|
|
39
|
+
if ENV['host'].nil? || ENV['host'].empty? || ENV['pid'].nil? || ENV['pid'].empty?
|
|
40
|
+
puts 'You must enter the host and pid to kill..cap resque:quit host=ip pid=pid'
|
|
41
|
+
else
|
|
42
|
+
hosts = ENV['host'] || find_servers_for_task(current_task).collect { |s| s.host }
|
|
43
|
+
run("kill -9 #{ENV['pid']}", :hosts => hosts)
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
desc "Kill all rogue workers on all servers. If the worker is working, it will not finish and the job will go to the Failed queue as a DirtExit. arg: host=ip pid=pid"
|
|
48
|
+
task :kill_workers_with_impunity, :roles => :app do
|
|
49
|
+
default_run_options[:pty] = true
|
|
50
|
+
rake = fetch(:rake, "rake")
|
|
51
|
+
run("cd #{current_path}; #{rake} RAILS_ENV=#{stage} resque:kill_workers_with_impunity")
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
desc "start multiple resque workers. arg:count=x optional arg: host=ip queue=name"
|
|
55
|
+
task :workers, :roles => :app do
|
|
56
|
+
default_run_options[:pty] = true
|
|
57
|
+
hosts = ENV['host'] || find_servers_for_task(current_task).collect { |s| s.host }
|
|
58
|
+
queue = ENV['queue'] || '*'
|
|
59
|
+
count = ENV['count'] || '1'
|
|
60
|
+
rake = fetch(:rake, "rake")
|
|
61
|
+
run("cd #{current_path}; nohup #{rake} RAILS_ENV=#{stage} COUNT=#{count} QUEUE=#{queue} resque:work >> log/resque_worker.log 2>&1", :hosts => hosts)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
desc "Restart all workers on all servers"
|
|
65
|
+
task :restart_workers, :roles => :app, :only => {:resque_restart => true} do
|
|
66
|
+
default_run_options[:pty] = true
|
|
67
|
+
rake = fetch(:rake, "rake")
|
|
68
|
+
run("cd #{current_path}; nohup #{rake} RAILS_ENV=#{stage} resque:restart_workers")
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
# ====================================
|
|
72
|
+
# ResqueScheduler TASKS
|
|
73
|
+
# ====================================
|
|
74
|
+
|
|
75
|
+
desc "start a resque worker. optional arg: host=ip queue=name"
|
|
76
|
+
task :scheduler, :roles => :app do
|
|
77
|
+
default_run_options[:pty] = true
|
|
78
|
+
hosts = ENV['host'] || find_servers_for_task(current_task).collect { |s| s.host }
|
|
79
|
+
rake = fetch(:rake, "rake")
|
|
80
|
+
run("cd #{current_path}; nohup #{rake} RAILS_ENV=#{stage} resque:scheduler", :hosts => hosts)
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
desc "Gracefully kill the scheduler on a server. arg: host=ip"
|
|
84
|
+
task :quit_scheduler, :roles => :app do
|
|
85
|
+
if ENV['host'].nil? || ENV['host'].empty?
|
|
86
|
+
puts 'You must enter the host to kill..cap resque:quit_scheduler host=ip pid=pid'
|
|
87
|
+
else
|
|
88
|
+
hosts = ENV['host'] || find_servers_for_task(current_task).collect { |s| s.host }
|
|
89
|
+
rake = fetch(:rake, "rake")
|
|
90
|
+
run("cd #{current_path}; nohup #{rake} RAILS_ENV=#{stage} resque:quit_scheduler", :hosts => hosts)
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
desc "Determine if the scheduler is running or not on a server"
|
|
95
|
+
task :scheduler_status, :roles => :app do
|
|
96
|
+
hosts = ENV['hosts'].to_s.split(',') || find_servers_for_task(current_task).collect { |s| s.host }
|
|
97
|
+
|
|
98
|
+
status = nil
|
|
99
|
+
|
|
100
|
+
run("ps -eaf | grep resque | grep -v cap", :hosts => hosts) do |channel, stream, data|
|
|
101
|
+
status = (data =~ /resque:scheduler/) ? 'up' : 'down'
|
|
102
|
+
puts " ** [#{stream} :: #{channel[:host]}] resque:scheduler is #{status}"
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
end
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
require 'resque/failure/redis'
|
|
2
|
+
|
|
3
|
+
module Resque
|
|
4
|
+
module Failure
|
|
5
|
+
|
|
6
|
+
# Requeues all failed jobs of a given class
|
|
7
|
+
def self.requeue_class(failed_class)
|
|
8
|
+
length = Resque.redis.llen(:failed)
|
|
9
|
+
i = 0
|
|
10
|
+
length.times do
|
|
11
|
+
f = Resque.list_range(:failed, i, 1)
|
|
12
|
+
if f && (failed_class.blank? || (f["payload"]["class"] == failed_class))
|
|
13
|
+
Resque.redis.lrem(:failed, 0, f.to_json)
|
|
14
|
+
args = f["payload"]["args"]
|
|
15
|
+
Resque.enqueue(eval(f["payload"]["class"]), *args)
|
|
16
|
+
else
|
|
17
|
+
i += 1
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
end
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
module Resque
|
|
2
|
+
class Job
|
|
3
|
+
# Attempts to perform the work represented by this job instance.
|
|
4
|
+
# Calls #perform on the class given in the payload with the
|
|
5
|
+
# arguments given in the payload.
|
|
6
|
+
# The worker is passed in so the status can be set for the UI to display.
|
|
7
|
+
def perform
|
|
8
|
+
args ? payload_class.perform(*args) { |status| self.worker.status = status } : payload_class.perform { |status| self.worker.status = status }
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
end
|
|
12
|
+
end
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
require 'socket'
|
|
2
|
+
|
|
3
|
+
module Resque
|
|
4
|
+
class Worker
|
|
5
|
+
|
|
6
|
+
def local_ip
|
|
7
|
+
orig, Socket.do_not_reverse_lookup = Socket.do_not_reverse_lookup, true # turn off reverse DNS resolution temporarily
|
|
8
|
+
|
|
9
|
+
UDPSocket.open do |s|
|
|
10
|
+
s.connect '64.233.187.99', 1
|
|
11
|
+
s.addr.last
|
|
12
|
+
end
|
|
13
|
+
ensure
|
|
14
|
+
Socket.do_not_reverse_lookup = orig
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
# The string representation is the same as the id for this worker
|
|
18
|
+
# instance. Can be used with `Worker.find`.
|
|
19
|
+
def to_s
|
|
20
|
+
@to_s || "#{hostname}(#{local_ip}):#{Process.pid}:#{Thread.current.object_id}:#{Thread.current[:queues]}"
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
alias_method :id, :to_s
|
|
24
|
+
|
|
25
|
+
def pid
|
|
26
|
+
to_s.split(':').second
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def thread
|
|
30
|
+
to_s.split(':').third
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def queue
|
|
34
|
+
to_s.split(':').last
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def workers_in_pid
|
|
38
|
+
Array(redis.smembers(:workers)).select { |id| id =~ /\(#{ip}\):#{pid}/ }.map { |id| Resque::Worker.find(id) }.compact
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def ip
|
|
42
|
+
to_s.split(':').first[/\b(?:\d{1,3}\.){3}\d{1,3}\b/]
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def queues_in_pid
|
|
46
|
+
workers_in_pid.collect { |w| w.queue }
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
#OVERRIDE for multithreaded workers
|
|
50
|
+
def queues
|
|
51
|
+
Thread.current[:queues] == "*" ? Resque.queues.sort : Thread.current[:queues].split(',')
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Runs all the methods needed when a worker begins its lifecycle.
|
|
55
|
+
#OVERRIDE for multithreaded workers
|
|
56
|
+
def startup
|
|
57
|
+
enable_gc_optimizations
|
|
58
|
+
if Thread.current == Thread.main
|
|
59
|
+
register_signal_handlers
|
|
60
|
+
prune_dead_workers
|
|
61
|
+
end
|
|
62
|
+
run_hook :before_first_fork
|
|
63
|
+
register_worker
|
|
64
|
+
|
|
65
|
+
# Fix buffering so we can `rake resque:work > resque.log` and
|
|
66
|
+
# get output from the child in there.
|
|
67
|
+
$stdout.sync = true
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
# Schedule this worker for shutdown. Will finish processing the
|
|
71
|
+
# current job.
|
|
72
|
+
#OVERRIDE for multithreaded workers
|
|
73
|
+
def shutdown
|
|
74
|
+
log 'Exiting...'
|
|
75
|
+
Thread.list.each { |t| t[:shutdown] = true }
|
|
76
|
+
@shutdown = true
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
# Looks for any workers which should be running on this server
|
|
80
|
+
# and, if they're not, removes them from Redis.
|
|
81
|
+
#
|
|
82
|
+
# This is a form of garbage collection. If a server is killed by a
|
|
83
|
+
# hard shutdown, power failure, or something else beyond our
|
|
84
|
+
# control, the Resque workers will not die gracefully and therefor
|
|
85
|
+
# will leave stale state information in Redis.
|
|
86
|
+
#
|
|
87
|
+
# By checking the current Redis state against the actual
|
|
88
|
+
# environment, we can determine if Redis is old and clean it up a bit.
|
|
89
|
+
def prune_dead_workers
|
|
90
|
+
Worker.all.each do |worker|
|
|
91
|
+
host, pid, thread, queues = worker.id.split(':')
|
|
92
|
+
next unless host.include?(hostname)
|
|
93
|
+
next if worker_pids.include?(pid)
|
|
94
|
+
log! "Pruning dead worker: #{worker}"
|
|
95
|
+
worker.unregister_worker
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def all_workers_in_pid_working
|
|
100
|
+
workers_in_pid.select { |w| (hash = w.processing) && !hash.empty? }
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
# Jruby won't allow you to trap the QUIT signal, so we're changing the INT signal to replace it for Jruby.
|
|
104
|
+
def register_signal_handlers
|
|
105
|
+
trap('TERM') { shutdown! }
|
|
106
|
+
trap('INT') { shutdown }
|
|
107
|
+
|
|
108
|
+
begin
|
|
109
|
+
s = trap('QUIT') { shutdown }
|
|
110
|
+
warn "Signal QUIT not supported." unless s
|
|
111
|
+
s = trap('USR1') { kill_child }
|
|
112
|
+
warn "Signal USR1 not supported." unless s
|
|
113
|
+
s = trap('USR2') { pause_processing }
|
|
114
|
+
warn "Signal USR2 not supported." unless s
|
|
115
|
+
s = trap('CONT') { unpause_processing }
|
|
116
|
+
warn "Signal CONT not supported." unless s
|
|
117
|
+
rescue ArgumentError
|
|
118
|
+
warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
# This is the main workhorse method. Called on a Worker instance,
|
|
123
|
+
# it begins the worker life cycle.
|
|
124
|
+
#
|
|
125
|
+
# The following events occur during a worker's life cycle:
|
|
126
|
+
#
|
|
127
|
+
# 1. Startup: Signals are registered, dead workers are pruned,
|
|
128
|
+
# and this worker is registered.
|
|
129
|
+
# 2. Work loop: Jobs are pulled from a queue and processed.
|
|
130
|
+
# 3. Teardown: This worker is unregistered.
|
|
131
|
+
#
|
|
132
|
+
# Can be passed an integer representing the polling frequency.
|
|
133
|
+
# The default is 5 seconds, but for a semi-active site you may
|
|
134
|
+
# want to use a smaller value.
|
|
135
|
+
#
|
|
136
|
+
# Also accepts a block which will be passed the job as soon as it
|
|
137
|
+
# has completed processing. Useful for testing.
|
|
138
|
+
#OVERRIDE for multithreaded workers
|
|
139
|
+
def work(interval = 5, &block)
|
|
140
|
+
$0 = "resque: Starting"
|
|
141
|
+
startup
|
|
142
|
+
|
|
143
|
+
loop do
|
|
144
|
+
break if @shutdown || Thread.current[:shutdown]
|
|
145
|
+
|
|
146
|
+
if not @paused and job = reserve
|
|
147
|
+
log "got: #{job.inspect}"
|
|
148
|
+
run_hook :before_fork
|
|
149
|
+
working_on job
|
|
150
|
+
|
|
151
|
+
if @child = fork
|
|
152
|
+
rand # Reseeding
|
|
153
|
+
procline "Forked #{@child} at #{Time.now.to_i}"
|
|
154
|
+
Process.wait
|
|
155
|
+
else
|
|
156
|
+
procline "Processing #{job.queue} since #{Time.now.to_i}"
|
|
157
|
+
perform(job, &block)
|
|
158
|
+
exit! unless @cant_fork
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
done_working
|
|
162
|
+
@child = nil
|
|
163
|
+
else
|
|
164
|
+
break if interval.to_i == 0
|
|
165
|
+
log! "Sleeping for #{interval.to_i}"
|
|
166
|
+
procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}"
|
|
167
|
+
sleep interval.to_i
|
|
168
|
+
end
|
|
169
|
+
end
|
|
170
|
+
unregister_worker rescue nil
|
|
171
|
+
loop do
|
|
172
|
+
#hang onto the process until all threads are done
|
|
173
|
+
break if all_workers_in_pid_working.blank?
|
|
174
|
+
sleep interval.to_i
|
|
175
|
+
end
|
|
176
|
+
ensure
|
|
177
|
+
unregister_worker
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
# logic for mappged_mget changed where it returns keys with nil values in latest redis gem.
|
|
181
|
+
def self.working
|
|
182
|
+
names = all
|
|
183
|
+
return [] unless names.any?
|
|
184
|
+
names.map! { |name| "worker:#{name}" }
|
|
185
|
+
redis.mapped_mget(*names).map do |key, value|
|
|
186
|
+
find key.sub("worker:", '') unless value.nil?
|
|
187
|
+
end.compact
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
# Returns an array of string pids of all the other workers on this
|
|
191
|
+
# machine. Useful when pruning dead workers on startup.
|
|
192
|
+
def worker_pids
|
|
193
|
+
`ps -A -o pid,command | grep [r]esque`.split("\n").map do |line|
|
|
194
|
+
line.split(' ')[0]
|
|
195
|
+
end
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
def status=(status)
|
|
199
|
+
data = encode(job.merge('status' => status))
|
|
200
|
+
redis.set("worker:#{self}", data)
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
def status
|
|
204
|
+
job['status']
|
|
205
|
+
end
|
|
206
|
+
|
|
207
|
+
def self.start(ips, queues)
|
|
208
|
+
if Rails.env =~ /development|test/
|
|
209
|
+
Thread.new(queues) { |queue| system("rake QUEUE=#{queue} resque:work") }
|
|
210
|
+
else
|
|
211
|
+
Thread.new(queues, ips) { |queue, ip_list| system("cd #{Rails.root}; #{ResqueUi::Cap.path} #{Rails.env} resque:work host=#{ip_list} queue=#{queue}") }
|
|
212
|
+
end
|
|
213
|
+
end
|
|
214
|
+
|
|
215
|
+
def quit
|
|
216
|
+
if Rails.env =~ /development|test/
|
|
217
|
+
system("kill -INT #{self.pid}")
|
|
218
|
+
else
|
|
219
|
+
system("cd #{Rails.root}; #{ResqueUi::Cap.path} #{Rails.env} resque:quit_worker pid=#{self.pid} host=#{self.ip}")
|
|
220
|
+
end
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
def restart
|
|
224
|
+
queues = self.queues_in_pid.join('#')
|
|
225
|
+
quit
|
|
226
|
+
self.class.start(self.ip, queues)
|
|
227
|
+
end
|
|
228
|
+
|
|
229
|
+
end
|
|
230
|
+
end
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
module ResqueScheduler
|
|
2
|
+
def schedule=(schedule_hash)
|
|
3
|
+
raise 'not implemented'
|
|
4
|
+
end
|
|
5
|
+
|
|
6
|
+
# Returns the schedule hash
|
|
7
|
+
def schedule
|
|
8
|
+
#the scheduler gem expects a hash, but it's now stored in
|
|
9
|
+
#redis as an array.
|
|
10
|
+
hash = {}
|
|
11
|
+
Resque.list_range(:scheduled, 0, -0).each do |job|
|
|
12
|
+
hash.merge! job
|
|
13
|
+
end
|
|
14
|
+
hash
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def self.start(ips)
|
|
18
|
+
if Rails.env =~ /development|test/
|
|
19
|
+
Thread.new{system("rake resque:scheduler")}
|
|
20
|
+
else
|
|
21
|
+
Thread.new(ips){|ip_list|system("cd #{Rails.root}; #{ResqueUi::Cap.path} #{Rails.env} resque:scheduler host=#{ip_list}")}
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def self.quit(ips)
|
|
26
|
+
if Rails.env =~ /development|test/
|
|
27
|
+
system("rake resque:quit_scheduler")
|
|
28
|
+
else
|
|
29
|
+
system("cd #{Rails.root}; #{ResqueUi::Cap.path} #{Rails.env} resque:quit_scheduler host=#{ips}")
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def self.restart(ips)
|
|
34
|
+
quit(ips)
|
|
35
|
+
start(ips)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def self.farm_status
|
|
39
|
+
status = {}
|
|
40
|
+
if Rails.env =~ /development|test/
|
|
41
|
+
status['localhost'] = pids.present? ? 'Running' : 'Stopped'
|
|
42
|
+
else
|
|
43
|
+
Resque.schedule.values.collect{|job| job['ip']}.each do |ip|
|
|
44
|
+
cap = `cd #{Rails.root}; #{ResqueUi::Cap.path} #{Rails.env} resque:scheduler_status hosts=#{ip}`
|
|
45
|
+
status[ip] = cap =~ /resque:scheduler is up/ ? 'Running' : 'Stopped'
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
status
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Returns an array of string pids of all the other workers on this
|
|
52
|
+
# machine. Useful when pruning dead workers on startup.
|
|
53
|
+
def self.pids
|
|
54
|
+
`ps -A -o pid,command | grep [r]esque:scheduler`.split("\n").map do |line|
|
|
55
|
+
line.split(' ')[0]
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
end
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
module Resque
|
|
2
|
+
class ChainedJobWithStatus < JobWithStatus
|
|
3
|
+
def name
|
|
4
|
+
status.name rescue nil
|
|
5
|
+
end
|
|
6
|
+
|
|
7
|
+
def completed(*messages)
|
|
8
|
+
super(*messages)
|
|
9
|
+
# "You must override this method to provide your own logic of when to actually call complete."
|
|
10
|
+
# if counter(:processed) >= options['total']
|
|
11
|
+
# super
|
|
12
|
+
# end
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def self.enqueue(klass, options = {})
|
|
16
|
+
#tie this job to the status of the calling job
|
|
17
|
+
opts = HashWithIndifferentAccess.new(options)
|
|
18
|
+
raise ArgumentError, "You must supply a :uuid attribute in your call to create." unless opts['uuid']
|
|
19
|
+
uuid = opts['uuid']
|
|
20
|
+
Resque.enqueue(klass, uuid, options)
|
|
21
|
+
uuid
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
module Resque
|
|
2
|
+
class JobWithStatus
|
|
3
|
+
# Adds a job of type <tt>klass<tt> to the queue with <tt>options<tt>.
|
|
4
|
+
# Returns the UUID of the job
|
|
5
|
+
# override to pass actual parameters instead of a single hash, to make backward compatible with existing resque jobs.
|
|
6
|
+
def self.enqueue(klass, options = {})
|
|
7
|
+
uuid = Resque::Status.create :name => "#{self.name}: #{options.inspect}"
|
|
8
|
+
Resque.enqueue(klass, uuid, options)
|
|
9
|
+
uuid
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
# sets the status of the job for the current iteration. You should use
|
|
13
|
+
# the <tt>at</tt> method if you have actual numbers to track the iteration count.
|
|
14
|
+
# This will kill the job if it has been added to the kill list with
|
|
15
|
+
# <tt>Resque::Status.kill()</tt>
|
|
16
|
+
def tick(*messages)
|
|
17
|
+
kill! if should_kill? || status.killed?
|
|
18
|
+
set_status({'status' => 'working'}, *messages)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
# Run by the Resque::Worker when processing this job. It wraps the <tt>perform</tt>
|
|
22
|
+
# method ensuring that the final status of the job is set regardless of error.
|
|
23
|
+
# If an error occurs within the job's work, it will set the status as failed and
|
|
24
|
+
# re-raise the error.
|
|
25
|
+
def safe_perform!
|
|
26
|
+
unless should_kill? || (status && status.killed?)
|
|
27
|
+
set_status({'status' => 'working'})
|
|
28
|
+
perform
|
|
29
|
+
kill! if should_kill?
|
|
30
|
+
completed unless status && status.completed?
|
|
31
|
+
on_success if respond_to?(:on_success)
|
|
32
|
+
end
|
|
33
|
+
rescue Killed
|
|
34
|
+
logger.info "Job #{self} Killed at #{Time.now}"
|
|
35
|
+
Resque::Status.killed(uuid)
|
|
36
|
+
on_killed if respond_to?(:on_killed)
|
|
37
|
+
rescue => e
|
|
38
|
+
logger.error e
|
|
39
|
+
failed("The task failed because of an error: #{e}")
|
|
40
|
+
if respond_to?(:on_failure)
|
|
41
|
+
on_failure(e)
|
|
42
|
+
else
|
|
43
|
+
raise e
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def name
|
|
48
|
+
"#{self.class.name}: #{options.inspect}"
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def incr_counter(counter)
|
|
52
|
+
Resque::Status.incr_counter(counter, uuid)
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
def counter(counter)
|
|
56
|
+
Resque::Status.counter(counter, uuid)
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
end
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
module Resque
|
|
2
|
+
class Status
|
|
3
|
+
# Return the <tt>num</tt> most recent status/job UUIDs in reverse chronological order.
|
|
4
|
+
#override the gem to fix the ordering
|
|
5
|
+
def self.status_ids(range_start = nil, range_end = nil)
|
|
6
|
+
unless range_end && range_start
|
|
7
|
+
# Because we want a reverse chronological order, we need to get a range starting
|
|
8
|
+
# by the higest negative number.
|
|
9
|
+
redis.zrevrange(set_key, 0, -1) || []
|
|
10
|
+
else
|
|
11
|
+
# Because we want a reverse chronological order, we need to get a range starting
|
|
12
|
+
# by the higest negative number. The ordering is transparent from the API user's
|
|
13
|
+
# perspective so we need to convert the passed params
|
|
14
|
+
if range_start == 0
|
|
15
|
+
range_start = -1
|
|
16
|
+
else
|
|
17
|
+
range_start += 1
|
|
18
|
+
end
|
|
19
|
+
(redis.zrange(set_key, -(range_end.abs), -(range_start.abs)) || []).reverse
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# clear statuses from redis passing an optional range. See `statuses` for info
|
|
24
|
+
# about ranges
|
|
25
|
+
def self.clear(range_start = nil, range_end = nil)
|
|
26
|
+
status_ids(range_start, range_end).each do |id|
|
|
27
|
+
redis.zrem(set_key, id)
|
|
28
|
+
Resque.redis.keys("*#{id}").each do |key|
|
|
29
|
+
Resque.redis.del(key)
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
#If multiple workers are running at once and you need an incrementer, you can't use the status' num attribute because of race conditions.
|
|
35
|
+
#You can use a counter and call incr on it instead
|
|
36
|
+
def self.counter_key(counter, uuid)
|
|
37
|
+
"#{counter}:#{uuid}"
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def self.counter(counter, uuid)
|
|
41
|
+
redis[counter_key(counter, uuid)].to_i
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def self.incr_counter(counter, uuid)
|
|
45
|
+
key = counter_key(counter, uuid)
|
|
46
|
+
n = redis.incr(key)
|
|
47
|
+
if expire_in
|
|
48
|
+
redis.expire(key, expire_in)
|
|
49
|
+
end
|
|
50
|
+
n
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
end
|
data/lib/resque_ui.rb
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
require 'resque/server'
|
|
2
|
+
require 'resque_ui/cap'
|
|
3
|
+
require 'resque_ui/overrides/resque/worker'
|
|
4
|
+
require 'resque_ui/overrides/resque/resque'
|
|
5
|
+
require 'resque_ui/overrides/resque/job'
|
|
6
|
+
require 'resque_ui/overrides/resque/failure/failure'
|
|
7
|
+
if Resque.respond_to? :schedule
|
|
8
|
+
require 'resque_scheduler/tasks'
|
|
9
|
+
require 'resque_ui/overrides/resque_scheduler/resque_scheduler'
|
|
10
|
+
end
|
|
11
|
+
require 'resque/job_with_status'
|
|
12
|
+
require 'resque_ui/overrides/resque_status/status'
|
|
13
|
+
require 'resque_ui/overrides/resque_status/job_with_status'
|
|
14
|
+
require 'resque_ui/overrides/resque_status/chained_job_with_status'
|
|
15
|
+
|
|
16
|
+
Resque::Server.tabs << 'Statuses'
|
|
17
|
+
|
|
18
|
+
module ResqueUi
|
|
19
|
+
class Engine < Rails::Engine
|
|
20
|
+
rake_tasks do
|
|
21
|
+
load 'tasks/worker.rake'
|
|
22
|
+
load 'tasks/failure.rake'
|
|
23
|
+
load 'tasks/scheduler.rake' if Resque.respond_to? :schedule
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
end
|