resque 1.24.1 → 1.25.0.pre

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of resque might be problematic. Click here for more details.

@@ -90,7 +90,9 @@ module Resque
90
90
  end
91
91
 
92
92
  def show_args(args)
93
- Array(args).map { |a| a.inspect }.join("\n")
93
+ Array(args).map do |a|
94
+ a.to_yaml
95
+ end.join("\n")
94
96
  end
95
97
 
96
98
  def worker_hosts
@@ -32,10 +32,14 @@ Resque::Server.helpers do
32
32
  if failed_start_at + failed_per_page > failed_size
33
33
  failed_size
34
34
  else
35
- failed_start_at + failed_per_page
35
+ failed_start_at + failed_per_page
36
36
  end
37
37
  end
38
38
 
39
+ def failed_order
40
+ params[:order] || 'desc'
41
+ end
42
+
39
43
  def failed_class_counts(queue = params[:queue])
40
44
  classes = Hash.new(0)
41
45
  Resque::Failure.each(0, Resque::Failure.count(queue), queue) do |_, item|
@@ -45,4 +49,4 @@ Resque::Server.helpers do
45
49
  end
46
50
  classes
47
51
  end
48
- end
52
+ end
@@ -18,8 +18,9 @@
18
18
  <% else %>
19
19
  <p class='sub'>Showing <%= failed_start_at %> to <%= failed_end_at %> of <b><%= failed_size %></b> jobs</p>
20
20
 
21
+
21
22
  <ul class='failed'>
22
- <% Resque::Failure.each(failed_start_at, failed_per_page, params[:queue], params[:class]) do |id, job| %>
23
+ <% Resque::Failure.each(failed_start_at, failed_per_page, params[:queue], params[:class], failed_order) do |id, job| %>
23
24
  <%= partial :failed_job, :id => id, :job => job %>
24
25
  <% end %>
25
26
  </ul>
@@ -36,9 +36,9 @@
36
36
  </div>
37
37
 
38
38
  <div id="footer">
39
- <p>Powered by <a href="http://github.com/defunkt/resque">Resque</a> v<%=Resque::Version%></p>
39
+ <p>Powered by <a href="http://github.com/resque/resque">Resque</a> v<%=Resque::Version%></p>
40
40
  <p>Connected to Redis namespace <%= Resque.redis.namespace %> on <%=Resque.redis_id%></p>
41
41
  </div>
42
42
 
43
43
  </body>
44
- </html>
44
+ </html>
@@ -3,20 +3,20 @@
3
3
  <% per_page ||= 20 %>
4
4
  <%if start - per_page >= 0 || start + per_page <= size%>
5
5
  <p class='pagination'>
6
- <% if start - per_page >= 0 %>
7
- <a href="<%= current_page %>?start=<%= start - per_page %>" class='less'>&laquo; Previous</a>
6
+ <% if start + per_page <= size %>
7
+ <a href="<%= current_page %>?start=<%= start + per_page %>" class='more'>&laquo; Next</a>
8
8
  <% end %>
9
9
 
10
- <% (0...(size / per_page.to_f).ceil).each do |page_num| %>
10
+ <% (size / per_page.to_f - 1).ceil.downto(0).each do |page_num| %>
11
11
  <% if start == page_num * per_page %>
12
- <%= page_num + 1 %>
12
+ <%= page_num %>
13
13
  <% else %>
14
- <a href="<%= current_page %>?start=<%= page_num * per_page %>"> <%= page_num + 1 %></a>
14
+ <a href="<%= current_page %>?start=<%= page_num * per_page %>"> <%= page_num %></a>
15
15
  <% end %>
16
16
  <% end %>
17
17
 
18
- <% if start + per_page <= size %>
19
- <a href="<%= current_page %>?start=<%= start + per_page %>" class='more'>Next &raquo;</a>
18
+ <% if start - per_page >= 0 %>
19
+ <a href="<%= current_page %>?start=<%= start - per_page %>" class='less'>Previous &raquo;</a>
20
20
  <% end %>
21
21
  </p>
22
22
  <%end%>
@@ -7,7 +7,11 @@ module Resque
7
7
  # Kill a stat: Stat.clear(name)
8
8
  module Stat
9
9
  extend self
10
- extend Helpers
10
+
11
+ # Direct access to the Redis instance.
12
+ def redis
13
+ Resque.redis
14
+ end
11
15
 
12
16
  # Returns the int value of a stat, given a string stat name.
13
17
  def get(stat)
@@ -29,7 +29,7 @@ namespace :resque do
29
29
  unless Process.respond_to?('daemon')
30
30
  abort "env var BACKGROUND is set, which requires ruby >= 1.9"
31
31
  end
32
- Process.daemon(true)
32
+ Process.daemon(true, true)
33
33
  end
34
34
 
35
35
  if ENV['PIDFILE']
@@ -1,3 +1,3 @@
1
1
  module Resque
2
- Version = VERSION = '1.24.1'
2
+ Version = VERSION = '1.25.0.pre'
3
3
  end
@@ -1,4 +1,5 @@
1
1
  require 'time'
2
+ require 'set'
2
3
 
3
4
  module Resque
4
5
  # A Resque Worker processes jobs. On platforms that support fork(2),
@@ -9,10 +10,41 @@ module Resque
9
10
  # It also ensures workers are always listening to signals from you,
10
11
  # their master, and can react accordingly.
11
12
  class Worker
12
- extend Resque::Helpers
13
- include Resque::Helpers
14
13
  include Resque::Logging
15
14
 
15
+ def redis
16
+ Resque.redis
17
+ end
18
+
19
+ def self.redis
20
+ Resque.redis
21
+ end
22
+
23
+ # Given a Ruby object, returns a string suitable for storage in a
24
+ # queue.
25
+ def encode(object)
26
+ if MultiJson.respond_to?(:dump) && MultiJson.respond_to?(:load)
27
+ MultiJson.dump object
28
+ else
29
+ MultiJson.encode object
30
+ end
31
+ end
32
+
33
+ # Given a string, returns a Ruby object.
34
+ def decode(object)
35
+ return unless object
36
+
37
+ begin
38
+ if MultiJson.respond_to?(:dump) && MultiJson.respond_to?(:load)
39
+ MultiJson.load object
40
+ else
41
+ MultiJson.decode object
42
+ end
43
+ rescue ::MultiJson::DecodeError => e
44
+ raise DecodeException, e.message, e.backtrace
45
+ end
46
+ end
47
+
16
48
  # Boolean indicating whether this worker can or can not fork.
17
49
  # Automatically set if a fork(2) fails.
18
50
  attr_accessor :cant_fork
@@ -139,14 +171,8 @@ module Resque
139
171
  job.worker = self
140
172
  working_on job
141
173
 
142
- procline "Processing #{job.queue} since #{Time.now.to_i} [#{job.payload_class}]"
143
- if @child = fork(job) do
144
- unregister_signal_handlers if term_child
145
- reconnect
146
- perform(job, &block)
147
- exit! unless run_at_exit_hooks
148
- end
149
-
174
+ procline "Processing #{job.queue} since #{Time.now.to_i} [#{job.payload_class_name}]"
175
+ if @child = fork(job)
150
176
  srand # Reseeding
151
177
  procline "Forked #{@child} at #{Time.now.to_i}"
152
178
  begin
@@ -156,9 +182,21 @@ module Resque
156
182
  end
157
183
  job.fail(DirtyExit.new($?.to_s)) if $?.signaled?
158
184
  else
159
- reconnect
160
- perform(job, &block)
185
+ unregister_signal_handlers if will_fork? && term_child
186
+ begin
187
+
188
+ reconnect
189
+ perform(job, &block)
190
+
191
+ rescue Exception => exception
192
+ report_failed_job(job,exception)
193
+ end
194
+
195
+ if will_fork?
196
+ run_at_exit_hooks ? exit : exit!
197
+ end
161
198
  end
199
+
162
200
  done_working
163
201
  @child = nil
164
202
  else
@@ -171,9 +209,11 @@ module Resque
171
209
 
172
210
  unregister_worker
173
211
  rescue Exception => exception
174
- log "Failed to start worker : #{exception.inspect}"
212
+ unless exception.class == SystemExit && !@child && run_at_exit_hooks
213
+ log "Failed to start worker : #{exception.inspect}"
175
214
 
176
- unregister_worker(exception)
215
+ unregister_worker(exception)
216
+ end
177
217
  end
178
218
 
179
219
  # DEPRECATED. Processes a single job. If none is given, it will
@@ -188,19 +228,28 @@ module Resque
188
228
  done_working
189
229
  end
190
230
 
231
+ # Reports the exception and marks the job as failed
232
+ def report_failed_job(job,exception)
233
+ log "#{job.inspect} failed: #{exception.inspect}"
234
+ begin
235
+ job.fail(exception)
236
+ rescue Object => exception
237
+ log "Received exception when reporting failure: #{exception.inspect}"
238
+ end
239
+ begin
240
+ failed!
241
+ rescue Object => exception
242
+ log "Received exception when increasing failed jobs counter (redis issue) : #{exception.inspect}"
243
+ end
244
+ end
245
+
191
246
  # Processes a given job in the child.
192
247
  def perform(job)
193
248
  begin
194
249
  run_hook :after_fork, job if will_fork?
195
250
  job.perform
196
251
  rescue Object => e
197
- log "#{job.inspect} failed: #{e.inspect}"
198
- begin
199
- job.fail(e)
200
- rescue Object => e
201
- log "Received exception when reporting failure: #{e.inspect}"
202
- end
203
- failed!
252
+ report_failed_job(job,e)
204
253
  else
205
254
  log "done: #{job.inspect}"
206
255
  ensure
@@ -248,12 +297,25 @@ module Resque
248
297
  # A splat ("*") means you want every queue (in alpha order) - this
249
298
  # can be useful for dynamically adding new queues.
250
299
  def queues
251
- @queues.map {|queue| queue == "*" ? Resque.queues.sort : queue }.flatten.uniq
300
+ @queues.map do |queue|
301
+ queue.strip!
302
+ if (matched_queues = glob_match(queue)).empty?
303
+ queue
304
+ else
305
+ matched_queues
306
+ end
307
+ end.flatten.uniq
308
+ end
309
+
310
+ def glob_match(pattern)
311
+ Resque.queues.select do |queue|
312
+ File.fnmatch?(pattern, queue)
313
+ end.sort
252
314
  end
253
315
 
254
316
  # Not every platform supports fork. Here we do our magic to
255
317
  # determine if yours does.
256
- def fork(job,&block)
318
+ def fork(job)
257
319
  return if @cant_fork
258
320
 
259
321
  # Only run before_fork hooks if we're actually going to fork
@@ -263,7 +325,7 @@ module Resque
263
325
  begin
264
326
  # IronRuby doesn't support `Kernel.fork` yet
265
327
  if Kernel.respond_to?(:fork)
266
- Kernel.fork &block if will_fork?
328
+ Kernel.fork if will_fork?
267
329
  else
268
330
  raise NotImplementedError
269
331
  end
@@ -324,7 +386,12 @@ module Resque
324
386
  end
325
387
 
326
388
  def unregister_signal_handlers
327
- trap('TERM') { raise TermException.new("SIGTERM") }
389
+ trap('TERM') do
390
+ trap ('TERM') do
391
+ # ignore subsequent terms
392
+ end
393
+ raise TermException.new("SIGTERM")
394
+ end
328
395
  trap('INT', 'DEFAULT')
329
396
 
330
397
  begin
@@ -425,7 +492,16 @@ module Resque
425
492
  all_workers = Worker.all
426
493
  known_workers = worker_pids unless all_workers.empty?
427
494
  all_workers.each do |worker|
428
- host, pid, queues = worker.id.split(':')
495
+ host, pid, worker_queues_raw = worker.id.split(':')
496
+ worker_queues = worker_queues_raw.split(",")
497
+ unless @queues.include?("*") || (worker_queues.to_set == @queues.to_set)
498
+ # If the worker we are trying to prune does not belong to the queues
499
+ # we are listening to, we should not touch it.
500
+ # Attempt to prune a worker from different queues may easily result in
501
+ # an unknown class exception, since that worker could easily be even
502
+ # written in different language.
503
+ next
504
+ end
429
505
  next unless host == hostname
430
506
  next if known_workers.include?(pid)
431
507
  log! "Pruning dead worker: #{worker}"
@@ -436,8 +512,10 @@ module Resque
436
512
  # Registers ourself as a worker. Useful when entering the worker
437
513
  # lifecycle on startup.
438
514
  def register_worker
439
- redis.sadd(:workers, self)
440
- started!
515
+ redis.pipelined do
516
+ redis.sadd(:workers, self)
517
+ started!
518
+ end
441
519
  end
442
520
 
443
521
  # Runs a named hook, passing along any arguments.
@@ -464,12 +542,14 @@ module Resque
464
542
  job.fail(exception || DirtyExit.new)
465
543
  end
466
544
 
467
- redis.srem(:workers, self)
468
- redis.del("worker:#{self}")
469
- redis.del("worker:#{self}:started")
545
+ redis.pipelined do
546
+ redis.srem(:workers, self)
547
+ redis.del("worker:#{self}")
548
+ redis.del("worker:#{self}:started")
470
549
 
471
- Stat.clear("processed:#{self}")
472
- Stat.clear("failed:#{self}")
550
+ Stat.clear("processed:#{self}")
551
+ Stat.clear("failed:#{self}")
552
+ end
473
553
  end
474
554
 
475
555
  # Given a job, tells Redis we're working on it. Useful for seeing
@@ -485,8 +565,10 @@ module Resque
485
565
  # Called when we are done working - clears our `working_on` state
486
566
  # and tells Redis we processed a job.
487
567
  def done_working
488
- processed!
489
- redis.del("worker:#{self}")
568
+ redis.pipelined do
569
+ processed!
570
+ redis.del("worker:#{self}")
571
+ end
490
572
  end
491
573
 
492
574
  # How many jobs has this worker processed? Returns an int.
@@ -578,6 +660,8 @@ module Resque
578
660
  def worker_pids
579
661
  if RUBY_PLATFORM =~ /solaris/
580
662
  solaris_worker_pids
663
+ elsif RUBY_PLATFORM =~ /mingw32/
664
+ windows_worker_pids
581
665
  else
582
666
  linux_worker_pids
583
667
  end
@@ -0,0 +1,15 @@
1
+ require 'test_helper'
2
+ require 'minitest/mock'
3
+
4
+ require 'resque/failure/base'
5
+
6
+ class TestFailure < Resque::Failure::Base
7
+ end
8
+
9
+ describe "Base failure class" do
10
+ it "allows calling all without throwing" do
11
+ with_failure_backend TestFailure do
12
+ assert_empty Resque::Failure.all
13
+ end
14
+ end
15
+ end
@@ -3,6 +3,8 @@ require 'bundler/setup'
3
3
  require 'minitest/autorun'
4
4
  require 'redis/namespace'
5
5
 
6
+ require 'mocha/setup'
7
+
6
8
  $dir = File.dirname(File.expand_path(__FILE__))
7
9
  $LOAD_PATH.unshift $dir + '/../lib'
8
10
  require 'resque'
@@ -72,6 +72,38 @@ context "Resque::Worker" do
72
72
 
73
73
  end
74
74
 
75
+ class ::RaiseExceptionOnFailure
76
+
77
+ def self.on_failure_trhow_exception(exception,*args)
78
+ $TESTING = true
79
+ raise "The worker threw an exception"
80
+ end
81
+
82
+ def self.perform
83
+ ""
84
+ end
85
+ end
86
+
87
+ test "should not treat SystemExit as an exception in the child with run_at_exit_hooks == true" do
88
+
89
+ if worker_pid = Kernel.fork
90
+ Process.waitpid(worker_pid)
91
+ else
92
+ # ensure we actually fork
93
+ $TESTING = false
94
+ Resque.redis.client.reconnect
95
+ Resque::Job.create(:not_failing_job, RaiseExceptionOnFailure)
96
+ worker = Resque::Worker.new(:not_failing_job)
97
+ worker.run_at_exit_hooks = true
98
+ suppress_warnings do
99
+ worker.work(0)
100
+ end
101
+ exit
102
+ end
103
+
104
+ end
105
+
106
+
75
107
  test "does not execute at_exit hooks by default" do
76
108
  tmpfile = File.join(Dir.tmpdir, "resque_at_exit_test_file")
77
109
  FileUtils.rm_f tmpfile
@@ -93,6 +125,12 @@ context "Resque::Worker" do
93
125
 
94
126
  end
95
127
 
128
+ test "does report failure for jobs with invalid payload" do
129
+ job = Resque::Job.new(:jobs, { 'class' => 'NotAValidJobClass', 'args' => '' })
130
+ @worker.perform job
131
+ assert_equal 1, Resque::Failure.count, 'failure not reported'
132
+ end
133
+
96
134
  test "register 'run_at' time on UTC timezone in ISO8601 format" do
97
135
  job = Resque::Job.new(:jobs, {'class' => 'GoodJob', 'args' => "blah"})
98
136
  now = Time.now.utc.iso8601
@@ -260,6 +298,19 @@ context "Resque::Worker" do
260
298
  assert_equal %w( jobs high critical blahblah ).sort, processed_queues
261
299
  end
262
300
 
301
+ test "works with globs" do
302
+ Resque::Job.create(:critical, GoodJob)
303
+ Resque::Job.create(:test_one, GoodJob)
304
+ Resque::Job.create(:test_two, GoodJob)
305
+
306
+ worker = Resque::Worker.new("test_*")
307
+
308
+ worker.work(0)
309
+ assert_equal 1, Resque.size(:critical)
310
+ assert_equal 0, Resque.size(:test_one)
311
+ assert_equal 0, Resque.size(:test_two)
312
+ end
313
+
263
314
  test "has a unique id" do
264
315
  assert_equal "#{`hostname`.chomp}:#{$$}:jobs", @worker.to_s
265
316
  end
@@ -453,7 +504,7 @@ context "Resque::Worker" do
453
504
  workerA.work(0)
454
505
  assert $BEFORE_FORK_CALLED
455
506
  end
456
-
507
+
457
508
  test "Will not call a before_fork hook when the worker can't fork" do
458
509
  Resque.redis.flushall
459
510
  $BEFORE_FORK_CALLED = false
@@ -618,7 +669,7 @@ context "Resque::Worker" do
618
669
  assert_not_equal original_connection, Resque.redis.client.connection.instance_variable_get("@sock")
619
670
  end
620
671
 
621
- test "tries to reconnect three times before giving up" do
672
+ test "tries to reconnect three times before giving up and the failure does not unregister the parent" do
622
673
  begin
623
674
  class Redis::Client
624
675
  alias_method :original_reconnect, :reconnect
@@ -643,6 +694,9 @@ context "Resque::Worker" do
643
694
 
644
695
  assert_equal 3, stdout.scan(/retrying/).count
645
696
  assert_equal 1, stdout.scan(/quitting/).count
697
+ assert_equal 0, stdout.scan(/Failed to start worker/).count
698
+ assert_equal 1, stdout.scan(/Redis::BaseConnectionError: Redis::BaseConnectionError/).count
699
+
646
700
  ensure
647
701
  class Redis::Client
648
702
  alias_method :reconnect, :original_reconnect