onyx-resque-retry 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/HISTORY.md +33 -0
- data/LICENSE +21 -0
- data/README.md +289 -0
- data/Rakefile +25 -0
- data/lib/resque-retry.rb +6 -0
- data/lib/resque-retry/server.rb +51 -0
- data/lib/resque-retry/server/views/retry.erb +48 -0
- data/lib/resque-retry/server/views/retry_timestamp.erb +59 -0
- data/lib/resque/failure/multiple_with_retry_suppression.rb +93 -0
- data/lib/resque/plugins/exponential_backoff.rb +64 -0
- data/lib/resque/plugins/retry.rb +221 -0
- data/test/exponential_backoff_test.rb +62 -0
- data/test/multiple_failure_test.rb +86 -0
- data/test/redis-test.conf +132 -0
- data/test/resque_test.rb +18 -0
- data/test/retry_criteria_test.rb +75 -0
- data/test/retry_inheriting_checks_test.rb +33 -0
- data/test/retry_test.rb +173 -0
- data/test/test_helper.rb +78 -0
- data/test/test_jobs.rb +280 -0
- metadata +189 -0
@@ -0,0 +1,86 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
class MultipleFailureTest < Test::Unit::TestCase
|
4
|
+
def setup
|
5
|
+
Resque.redis.flushall
|
6
|
+
@worker = Resque::Worker.new(:testing)
|
7
|
+
@worker.register_worker
|
8
|
+
|
9
|
+
@old_failure_backend = Resque::Failure.backend
|
10
|
+
MockFailureBackend.errors = []
|
11
|
+
Resque::Failure::MultipleWithRetrySuppression.classes = [MockFailureBackend]
|
12
|
+
Resque::Failure.backend = Resque::Failure::MultipleWithRetrySuppression
|
13
|
+
end
|
14
|
+
|
15
|
+
def failure_key_for(klass)
|
16
|
+
args = []
|
17
|
+
key = "failure_" + klass.redis_retry_key(args)
|
18
|
+
end
|
19
|
+
|
20
|
+
def test_last_failure_is_saved_in_redis
|
21
|
+
Resque.enqueue(LimitThreeJob)
|
22
|
+
perform_next_job(@worker)
|
23
|
+
|
24
|
+
# I don't like this, but...
|
25
|
+
key = failure_key_for(LimitThreeJob)
|
26
|
+
assert Resque.redis.exists(key)
|
27
|
+
end
|
28
|
+
|
29
|
+
def test_last_failure_removed_from_redis_after_error_limit
|
30
|
+
Resque.enqueue(LimitThreeJob)
|
31
|
+
3.times do
|
32
|
+
perform_next_job(@worker)
|
33
|
+
end
|
34
|
+
|
35
|
+
key = failure_key_for(LimitThreeJob)
|
36
|
+
assert Resque.redis.exists(key)
|
37
|
+
|
38
|
+
perform_next_job(@worker)
|
39
|
+
assert !Resque.redis.exists(key)
|
40
|
+
end
|
41
|
+
|
42
|
+
def test_on_success_failure_log_removed_from_redis
|
43
|
+
SwitchToSuccessJob.successful_after = 1
|
44
|
+
Resque.enqueue(SwitchToSuccessJob)
|
45
|
+
perform_next_job(@worker)
|
46
|
+
|
47
|
+
key = failure_key_for(SwitchToSuccessJob)
|
48
|
+
assert Resque.redis.exists(key)
|
49
|
+
|
50
|
+
perform_next_job(@worker)
|
51
|
+
assert !Resque.redis.exists(key), 'key removed on success'
|
52
|
+
ensure
|
53
|
+
SwitchToSuccessJob.reset_defaults
|
54
|
+
end
|
55
|
+
|
56
|
+
def test_errors_are_suppressed_up_to_retry_limit
|
57
|
+
Resque.enqueue(LimitThreeJob)
|
58
|
+
3.times do
|
59
|
+
perform_next_job(@worker)
|
60
|
+
end
|
61
|
+
|
62
|
+
assert_equal 0, MockFailureBackend.errors.size
|
63
|
+
end
|
64
|
+
|
65
|
+
def test_errors_are_logged_after_retry_limit
|
66
|
+
Resque.enqueue(LimitThreeJob)
|
67
|
+
4.times do
|
68
|
+
perform_next_job(@worker)
|
69
|
+
end
|
70
|
+
|
71
|
+
assert_equal 1, MockFailureBackend.errors.size
|
72
|
+
end
|
73
|
+
|
74
|
+
def test_jobs_without_retry_log_errors
|
75
|
+
5.times do
|
76
|
+
Resque.enqueue(NoRetryJob)
|
77
|
+
perform_next_job(@worker)
|
78
|
+
end
|
79
|
+
|
80
|
+
assert_equal 5, MockFailureBackend.errors.size
|
81
|
+
end
|
82
|
+
|
83
|
+
def teardown
|
84
|
+
Resque::Failure.backend = @old_failure_backend
|
85
|
+
end
|
86
|
+
end
|
@@ -0,0 +1,132 @@
|
|
1
|
+
# Redis configuration file example
|
2
|
+
|
3
|
+
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
4
|
+
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
5
|
+
daemonize yes
|
6
|
+
|
7
|
+
# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
|
8
|
+
# You can specify a custom pid file location here.
|
9
|
+
pidfile ./test/redis-test.pid
|
10
|
+
|
11
|
+
# Accept connections on the specified port, default is 6379
|
12
|
+
port 9736
|
13
|
+
|
14
|
+
# If you want you can bind a single interface, if the bind option is not
|
15
|
+
# specified all the interfaces will listen for connections.
|
16
|
+
#
|
17
|
+
# bind 127.0.0.1
|
18
|
+
|
19
|
+
# Close the connection after a client is idle for N seconds (0 to disable)
|
20
|
+
timeout 300
|
21
|
+
|
22
|
+
# Save the DB on disk:
|
23
|
+
#
|
24
|
+
# save <seconds> <changes>
|
25
|
+
#
|
26
|
+
# Will save the DB if both the given number of seconds and the given
|
27
|
+
# number of write operations against the DB occurred.
|
28
|
+
#
|
29
|
+
# In the example below the behaviour will be to save:
|
30
|
+
# after 900 sec (15 min) if at least 1 key changed
|
31
|
+
# after 300 sec (5 min) if at least 10 keys changed
|
32
|
+
# after 60 sec if at least 10000 keys changed
|
33
|
+
save 900 1
|
34
|
+
save 300 10
|
35
|
+
save 60 10000
|
36
|
+
|
37
|
+
# The filename where to dump the DB
|
38
|
+
dbfilename dump.rdb
|
39
|
+
|
40
|
+
# For default save/load DB in/from the working directory
|
41
|
+
# Note that you must specify a directory not a file name.
|
42
|
+
dir ./test/
|
43
|
+
|
44
|
+
# Set server verbosity to 'debug'
|
45
|
+
# it can be one of:
|
46
|
+
# debug (a lot of information, useful for development/testing)
|
47
|
+
# notice (moderately verbose, what you want in production probably)
|
48
|
+
# warning (only very important / critical messages are logged)
|
49
|
+
loglevel debug
|
50
|
+
|
51
|
+
# Specify the log file name. Also 'stdout' can be used to force
|
52
|
+
# the demon to log on the standard output. Note that if you use standard
|
53
|
+
# output for logging but daemonize, logs will be sent to /dev/null
|
54
|
+
logfile stdout
|
55
|
+
|
56
|
+
# Set the number of databases. The default database is DB 0, you can select
|
57
|
+
# a different one on a per-connection basis using SELECT <dbid> where
|
58
|
+
# dbid is a number between 0 and 'databases'-1
|
59
|
+
databases 16
|
60
|
+
|
61
|
+
################################# REPLICATION #################################
|
62
|
+
|
63
|
+
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
64
|
+
# another Redis server. Note that the configuration is local to the slave
|
65
|
+
# so for example it is possible to configure the slave to save the DB with a
|
66
|
+
# different interval, or to listen to another port, and so on.
|
67
|
+
|
68
|
+
# slaveof <masterip> <masterport>
|
69
|
+
|
70
|
+
################################## SECURITY ###################################
|
71
|
+
|
72
|
+
# Require clients to issue AUTH <PASSWORD> before processing any other
|
73
|
+
# commands. This might be useful in environments in which you do not trust
|
74
|
+
# others with access to the host running redis-server.
|
75
|
+
#
|
76
|
+
# This should stay commented out for backward compatibility and because most
|
77
|
+
# people do not need auth (e.g. they run their own servers).
|
78
|
+
|
79
|
+
# requirepass foobared
|
80
|
+
|
81
|
+
################################### LIMITS ####################################
|
82
|
+
|
83
|
+
# Set the max number of connected clients at the same time. By default there
|
84
|
+
# is no limit, and it's up to the number of file descriptors the Redis process
|
85
|
+
# is able to open. The special value '0' means no limts.
|
86
|
+
# Once the limit is reached Redis will close all the new connections sending
|
87
|
+
# an error 'max number of clients reached'.
|
88
|
+
|
89
|
+
# maxclients 128
|
90
|
+
|
91
|
+
# Don't use more memory than the specified amount of bytes.
|
92
|
+
# When the memory limit is reached Redis will try to remove keys with an
|
93
|
+
# EXPIRE set. It will try to start freeing keys that are going to expire
|
94
|
+
# in little time and preserve keys with a longer time to live.
|
95
|
+
# Redis will also try to remove objects from free lists if possible.
|
96
|
+
#
|
97
|
+
# If all this fails, Redis will start to reply with errors to commands
|
98
|
+
# that will use more memory, like SET, LPUSH, and so on, and will continue
|
99
|
+
# to reply to most read-only commands like GET.
|
100
|
+
#
|
101
|
+
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
|
102
|
+
# 'state' server or cache, not as a real DB. When Redis is used as a real
|
103
|
+
# database the memory usage will grow over the weeks, it will be obvious if
|
104
|
+
# it is going to use too much memory in the long run, and you'll have the time
|
105
|
+
# to upgrade. With maxmemory after the limit is reached you'll start to get
|
106
|
+
# errors for write operations, and this may even lead to DB inconsistency.
|
107
|
+
|
108
|
+
# maxmemory <bytes>
|
109
|
+
|
110
|
+
############################### ADVANCED CONFIG ###############################
|
111
|
+
|
112
|
+
# Glue small output buffers together in order to send small replies in a
|
113
|
+
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
114
|
+
# in terms of number of queries per second. Use 'yes' if unsure.
|
115
|
+
glueoutputbuf yes
|
116
|
+
|
117
|
+
# Use object sharing. Can save a lot of memory if you have many common
|
118
|
+
# string in your dataset, but performs lookups against the shared objects
|
119
|
+
# pool so it uses more CPU and can be a bit slower. Usually it's a good
|
120
|
+
# idea.
|
121
|
+
#
|
122
|
+
# When object sharing is enabled (shareobjects yes) you can use
|
123
|
+
# shareobjectspoolsize to control the size of the pool used in order to try
|
124
|
+
# object sharing. A bigger pool size will lead to better sharing capabilities.
|
125
|
+
# In general you want this value to be at least the double of the number of
|
126
|
+
# very common strings you have in your dataset.
|
127
|
+
#
|
128
|
+
# WARNING: object sharing is experimental, don't enable this feature
|
129
|
+
# in production before of Redis 1.0-stable. Still please try this feature in
|
130
|
+
# your development environment so that we can test it better.
|
131
|
+
# shareobjects no
|
132
|
+
# shareobjectspoolsize 1024
|
data/test/resque_test.rb
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
# make sure the worlds not fallen from beneith us.
|
4
|
+
class ResqueTest < Test::Unit::TestCase
|
5
|
+
def test_resque_version
|
6
|
+
major, minor, patch = Resque::Version.split('.')
|
7
|
+
assert_equal 1, major.to_i, 'major version does not match'
|
8
|
+
assert_operator minor.to_i, :>=, 8, 'minor version is too low'
|
9
|
+
end
|
10
|
+
|
11
|
+
def test_good_job
|
12
|
+
clean_perform_job(GoodJob, 1234, { :cats => :maiow }, [true, false, false])
|
13
|
+
|
14
|
+
assert_equal 0, Resque.info[:failed], 'failed jobs'
|
15
|
+
assert_equal 1, Resque.info[:processed], 'processed job'
|
16
|
+
assert_equal 0, Resque.delayed_queue_schedule_size
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,75 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
class RetryCriteriaTest < Test::Unit::TestCase
|
4
|
+
def setup
|
5
|
+
Resque.redis.flushall
|
6
|
+
@worker = Resque::Worker.new(:testing)
|
7
|
+
@worker.register_worker
|
8
|
+
end
|
9
|
+
|
10
|
+
def test_retry_criteria_check_should_retry
|
11
|
+
Resque.enqueue(RetryModuleCustomRetryCriteriaCheck)
|
12
|
+
3.times do
|
13
|
+
perform_next_job(@worker)
|
14
|
+
end
|
15
|
+
|
16
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
17
|
+
assert_equal 2, Resque.info[:failed], 'failed jobs'
|
18
|
+
assert_equal 2, Resque.info[:processed], 'processed job'
|
19
|
+
end
|
20
|
+
|
21
|
+
def test_retry_criteria_check_hierarchy_should_not_retry
|
22
|
+
Resque.enqueue(CustomRetryCriteriaCheckDontRetry)
|
23
|
+
3.times do
|
24
|
+
perform_next_job(@worker)
|
25
|
+
end
|
26
|
+
|
27
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
28
|
+
assert_equal 1, Resque.info[:failed], 'failed jobs'
|
29
|
+
assert_equal 1, Resque.info[:processed], 'processed job'
|
30
|
+
end
|
31
|
+
|
32
|
+
def test_retry_criteria_check_hierarchy_should_retry
|
33
|
+
Resque.enqueue(CustomRetryCriteriaCheckDoRetry)
|
34
|
+
3.times do
|
35
|
+
perform_next_job(@worker)
|
36
|
+
end
|
37
|
+
|
38
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
39
|
+
assert_equal 2, Resque.info[:failed], 'failed jobs'
|
40
|
+
assert_equal 2, Resque.info[:processed], 'processed job'
|
41
|
+
end
|
42
|
+
|
43
|
+
def test_retry_criteria_check_multiple_never_retry
|
44
|
+
Resque.enqueue(CustomRetryCriteriaCheckMultipleFailTwice, 'dont')
|
45
|
+
6.times do
|
46
|
+
perform_next_job(@worker)
|
47
|
+
end
|
48
|
+
|
49
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
50
|
+
assert_equal 1, Resque.info[:failed], 'failed jobs'
|
51
|
+
assert_equal 1, Resque.info[:processed], 'processed job'
|
52
|
+
end
|
53
|
+
|
54
|
+
def test_retry_criteria_check_multiple_do_retry
|
55
|
+
Resque.enqueue(CustomRetryCriteriaCheckMultipleFailTwice, 'do')
|
56
|
+
6.times do
|
57
|
+
perform_next_job(@worker)
|
58
|
+
end
|
59
|
+
|
60
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
61
|
+
assert_equal 2, Resque.info[:failed], 'failed jobs'
|
62
|
+
assert_equal 3, Resque.info[:processed], 'processed job'
|
63
|
+
end
|
64
|
+
|
65
|
+
def test_retry_criteria_check_multiple_do_retry_again
|
66
|
+
Resque.enqueue(CustomRetryCriteriaCheckMultipleFailTwice, 'do_again')
|
67
|
+
6.times do
|
68
|
+
perform_next_job(@worker)
|
69
|
+
end
|
70
|
+
|
71
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
72
|
+
assert_equal 2, Resque.info[:failed], 'failed jobs'
|
73
|
+
assert_equal 3, Resque.info[:processed], 'processed job'
|
74
|
+
end
|
75
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
class RetryInheritingChecksTest < Test::Unit::TestCase
|
4
|
+
def setup
|
5
|
+
Resque.redis.flushall
|
6
|
+
@worker = Resque::Worker.new(:testing)
|
7
|
+
@worker.register_worker
|
8
|
+
end
|
9
|
+
|
10
|
+
def test_default_job_has_one_exception
|
11
|
+
assert_equal 0, RetryDefaultsJob.retry_criteria_checks.size
|
12
|
+
end
|
13
|
+
|
14
|
+
def test_inheriting_copies_exceptions
|
15
|
+
assert_equal RetryDefaultsJob.retry_criteria_checks, InheritTestJob.retry_criteria_checks
|
16
|
+
end
|
17
|
+
|
18
|
+
def test_inheriting_adds_exceptions
|
19
|
+
assert_equal 1, InheritTestWithExtraJob.retry_criteria_checks.size
|
20
|
+
end
|
21
|
+
|
22
|
+
def test_extending_with_resque_retry_doesnt_override_previously_defined_inherited_hook
|
23
|
+
klass = InheritOrderingJobExtendLastSubclass
|
24
|
+
assert_equal 1, klass.retry_criteria_checks.size
|
25
|
+
assert_equal 'test', klass.test_value
|
26
|
+
end
|
27
|
+
|
28
|
+
def test_extending_with_resque_retry_then_defining_inherited_does_not_override_previous_hook
|
29
|
+
klass = InheritOrderingJobExtendFirstSubclass
|
30
|
+
assert_equal 1, klass.retry_criteria_checks.size
|
31
|
+
assert_equal 'test', klass.test_value
|
32
|
+
end
|
33
|
+
end
|
data/test/retry_test.rb
ADDED
@@ -0,0 +1,173 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
class RetryTest < Test::Unit::TestCase
|
4
|
+
def setup
|
5
|
+
Resque.redis.flushall
|
6
|
+
@worker = Resque::Worker.new(:testing)
|
7
|
+
@worker.register_worker
|
8
|
+
end
|
9
|
+
|
10
|
+
def test_resque_plugin_lint
|
11
|
+
assert_nothing_raised do
|
12
|
+
Resque::Plugin.lint(Resque::Plugins::Retry)
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
def test_default_settings
|
17
|
+
assert_equal 1, RetryDefaultsJob.retry_limit, 'default retry limit'
|
18
|
+
assert_equal 0, RetryDefaultsJob.retry_attempt, 'default number of retry attempts'
|
19
|
+
assert_equal nil, RetryDefaultsJob.retry_exceptions, 'default retry exceptions; nil = any'
|
20
|
+
assert_equal 0, RetryDefaultsJob.retry_delay, 'default seconds until retry'
|
21
|
+
end
|
22
|
+
|
23
|
+
def test_retry_once_by_default
|
24
|
+
Resque.enqueue(RetryDefaultsJob)
|
25
|
+
3.times do
|
26
|
+
perform_next_job(@worker)
|
27
|
+
end
|
28
|
+
|
29
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
30
|
+
assert_equal 2, Resque.info[:failed], 'failed jobs'
|
31
|
+
assert_equal 2, Resque.info[:processed], 'processed job'
|
32
|
+
end
|
33
|
+
|
34
|
+
def test_module_retry_defaults
|
35
|
+
Resque.enqueue(RetryModuleDefaultsJob)
|
36
|
+
3.times do
|
37
|
+
perform_next_job(@worker)
|
38
|
+
end
|
39
|
+
|
40
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
41
|
+
assert_equal 2, Resque.info[:failed], 'failed jobs'
|
42
|
+
assert_equal 2, Resque.info[:processed], 'processed job'
|
43
|
+
end
|
44
|
+
|
45
|
+
def test_job_args_are_maintained
|
46
|
+
test_args = ['maiow', 'cat', [42, 84]]
|
47
|
+
|
48
|
+
Resque.enqueue(RetryDefaultsJob, *test_args)
|
49
|
+
perform_next_job(@worker)
|
50
|
+
|
51
|
+
assert job = Resque.pop(:testing)
|
52
|
+
assert_equal test_args, job['args']
|
53
|
+
end
|
54
|
+
|
55
|
+
def test_job_args_may_be_modified
|
56
|
+
Resque.enqueue(RetryWithModifiedArgsJob, 'foo', 'bar')
|
57
|
+
perform_next_job(@worker)
|
58
|
+
|
59
|
+
assert job = Resque.pop(:testing)
|
60
|
+
assert_equal ['foobar', 'barbar'], job['args']
|
61
|
+
end
|
62
|
+
|
63
|
+
def test_retry_never_give_up
|
64
|
+
Resque.enqueue(NeverGiveUpJob)
|
65
|
+
10.times do
|
66
|
+
perform_next_job(@worker)
|
67
|
+
end
|
68
|
+
|
69
|
+
assert_equal 1, Resque.info[:pending], 'pending jobs'
|
70
|
+
assert_equal 10, Resque.info[:failed], 'failed jobs'
|
71
|
+
assert_equal 10, Resque.info[:processed], 'processed job'
|
72
|
+
end
|
73
|
+
|
74
|
+
def test_fail_five_times_then_succeed
|
75
|
+
Resque.enqueue(FailFiveTimesJob)
|
76
|
+
7.times do
|
77
|
+
perform_next_job(@worker)
|
78
|
+
end
|
79
|
+
|
80
|
+
assert_equal 5, Resque.info[:failed], 'failed jobs'
|
81
|
+
assert_equal 6, Resque.info[:processed], 'processed job'
|
82
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
83
|
+
end
|
84
|
+
|
85
|
+
def test_can_determine_if_exception_may_be_retried
|
86
|
+
assert_equal true, RetryDefaultsJob.retry_exception?(StandardError), 'StandardError may retry'
|
87
|
+
assert_equal true, RetryDefaultsJob.retry_exception?(CustomException), 'CustomException may retry'
|
88
|
+
assert_equal true, RetryDefaultsJob.retry_exception?(HierarchyCustomException), 'HierarchyCustomException may retry'
|
89
|
+
|
90
|
+
assert_equal true, RetryCustomExceptionsJob.retry_exception?(CustomException), 'CustomException may retry'
|
91
|
+
assert_equal true, RetryCustomExceptionsJob.retry_exception?(HierarchyCustomException), 'HierarchyCustomException may retry'
|
92
|
+
assert_equal false, RetryCustomExceptionsJob.retry_exception?(AnotherCustomException), 'AnotherCustomException may not retry'
|
93
|
+
end
|
94
|
+
|
95
|
+
def test_retry_if_failed_and_exception_may_retry
|
96
|
+
Resque.enqueue(RetryCustomExceptionsJob, CustomException)
|
97
|
+
Resque.enqueue(RetryCustomExceptionsJob, HierarchyCustomException)
|
98
|
+
4.times do
|
99
|
+
perform_next_job(@worker)
|
100
|
+
end
|
101
|
+
|
102
|
+
assert_equal 4, Resque.info[:failed], 'failed jobs'
|
103
|
+
assert_equal 4, Resque.info[:processed], 'processed job'
|
104
|
+
assert_equal 2, Resque.info[:pending], 'pending jobs'
|
105
|
+
end
|
106
|
+
|
107
|
+
def test_do_not_retry_if_failed_and_exception_does_not_allow_retry
|
108
|
+
Resque.enqueue(RetryCustomExceptionsJob, AnotherCustomException)
|
109
|
+
Resque.enqueue(RetryCustomExceptionsJob, RuntimeError)
|
110
|
+
4.times do
|
111
|
+
perform_next_job(@worker)
|
112
|
+
end
|
113
|
+
|
114
|
+
assert_equal 2, Resque.info[:failed], 'failed jobs'
|
115
|
+
assert_equal 2, Resque.info[:processed], 'processed job'
|
116
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
117
|
+
end
|
118
|
+
|
119
|
+
def test_retry_failed_jobs_in_separate_queue
|
120
|
+
Resque.enqueue(JobWithRetryQueue, 'arg1')
|
121
|
+
|
122
|
+
perform_next_job(@worker)
|
123
|
+
|
124
|
+
assert job_from_retry_queue = Resque.pop(:testing_retry)
|
125
|
+
assert_equal ['arg1'], job_from_retry_queue['args']
|
126
|
+
end
|
127
|
+
|
128
|
+
def test_retry_failed_jobs_in_separate_queue
|
129
|
+
Resque.enqueue(DelayedJobWithRetryQueue, 'arg1')
|
130
|
+
Resque.expects(:enqueue_in).with(1, JobRetryQueue, 'arg1')
|
131
|
+
|
132
|
+
perform_next_job(@worker)
|
133
|
+
end
|
134
|
+
|
135
|
+
def test_delete_redis_key_when_job_is_successful
|
136
|
+
Resque.enqueue(GoodJob, 'arg1')
|
137
|
+
|
138
|
+
assert_equal nil, Resque.redis.get(GoodJob.redis_retry_key('arg1'))
|
139
|
+
perform_next_job(@worker)
|
140
|
+
assert_equal nil, Resque.redis.get(GoodJob.redis_retry_key('arg1'))
|
141
|
+
end
|
142
|
+
|
143
|
+
def test_delete_redis_key_after_final_failed_retry
|
144
|
+
Resque.enqueue(FailFiveTimesJob, 'yarrrr')
|
145
|
+
assert_equal nil, Resque.redis.get(FailFiveTimesJob.redis_retry_key('yarrrr'))
|
146
|
+
|
147
|
+
perform_next_job(@worker)
|
148
|
+
assert_equal '0', Resque.redis.get(FailFiveTimesJob.redis_retry_key('yarrrr'))
|
149
|
+
|
150
|
+
perform_next_job(@worker)
|
151
|
+
assert_equal '1', Resque.redis.get(FailFiveTimesJob.redis_retry_key('yarrrr'))
|
152
|
+
|
153
|
+
5.times do
|
154
|
+
perform_next_job(@worker)
|
155
|
+
end
|
156
|
+
assert_equal nil, Resque.redis.get(FailFiveTimesJob.redis_retry_key('yarrrr'))
|
157
|
+
|
158
|
+
assert_equal 5, Resque.info[:failed], 'failed jobs'
|
159
|
+
assert_equal 6, Resque.info[:processed], 'processed job'
|
160
|
+
assert_equal 0, Resque.info[:pending], 'pending jobs'
|
161
|
+
end
|
162
|
+
|
163
|
+
def test_job_without_args_has_no_ending_colon_in_redis_key
|
164
|
+
assert_equal 'resque-retry:GoodJob:yarrrr', GoodJob.redis_retry_key('yarrrr')
|
165
|
+
assert_equal 'resque-retry:GoodJob:foo', GoodJob.redis_retry_key('foo')
|
166
|
+
assert_equal 'resque-retry:GoodJob', GoodJob.redis_retry_key
|
167
|
+
end
|
168
|
+
|
169
|
+
def test_redis_retry_key_removes_whitespace
|
170
|
+
assert_equal 'resque-retry:GoodJob:arg1-removespace', GoodJob.redis_retry_key('arg1', 'remove space')
|
171
|
+
end
|
172
|
+
|
173
|
+
end
|