resque-cleanerer 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +92 -0
- data/LICENSE +20 -0
- data/README.md +333 -0
- data/Rakefile +38 -0
- data/lib/resque-cleaner.rb +1 -0
- data/lib/resque_cleaner/server/views/_cleaner_styles.erb +63 -0
- data/lib/resque_cleaner/server/views/_limiter.erb +13 -0
- data/lib/resque_cleaner/server/views/_paginate.erb +54 -0
- data/lib/resque_cleaner/server/views/_stats.erb +44 -0
- data/lib/resque_cleaner/server/views/cleaner.erb +25 -0
- data/lib/resque_cleaner/server/views/cleaner_exec.erb +8 -0
- data/lib/resque_cleaner/server/views/cleaner_list.erb +179 -0
- data/lib/resque_cleaner/server.rb +258 -0
- data/lib/resque_cleaner.rb +309 -0
- data/test/redis-test.conf +115 -0
- data/test/resque_cleaner_test.rb +206 -0
- data/test/resque_web_test.rb +66 -0
- data/test/test_helper.rb +131 -0
- metadata +106 -0
@@ -0,0 +1,309 @@
|
|
1
|
+
require 'time'
|
2
|
+
require 'resque'
|
3
|
+
require 'resque/server'
|
4
|
+
|
5
|
+
module Resque
|
6
|
+
module Plugins
|
7
|
+
# ResqueCleaner class provides useful functionalities to retry or clean
|
8
|
+
# failed jobs. Let's clean up your failed list!
|
9
|
+
class ResqueCleaner
|
10
|
+
# ResqueCleaner fetches all elements from Redis and checks them
|
11
|
+
# by linear when filtering them. Since there is a performance concern,
|
12
|
+
# ResqueCleaner handles only the latest x(default 1000) jobs.
|
13
|
+
#
|
14
|
+
# You can change the value through limiter attribute.
|
15
|
+
# e.g. cleaner.limiter.maximum = 5000
|
16
|
+
attr_reader :limiter
|
17
|
+
|
18
|
+
# Set false if you don't show any message.
|
19
|
+
attr_accessor :print_message
|
20
|
+
|
21
|
+
# Initializes instance
|
22
|
+
def initialize
|
23
|
+
@failure = Resque::Failure.backend
|
24
|
+
@print_message = true
|
25
|
+
@limiter = Limiter.new self
|
26
|
+
end
|
27
|
+
|
28
|
+
# Returns redis instance.
|
29
|
+
def redis
|
30
|
+
Resque.redis
|
31
|
+
end
|
32
|
+
|
33
|
+
# Returns failure backend. Only supports redis backend.
|
34
|
+
def failure
|
35
|
+
@failure
|
36
|
+
end
|
37
|
+
|
38
|
+
# Stats by date.
|
39
|
+
def stats_by_date(&block)
|
40
|
+
jobs, stats = select(&block), {}
|
41
|
+
jobs.each do |job|
|
42
|
+
date = job["failed_at"][0,10]
|
43
|
+
stats[date] ||= 0
|
44
|
+
stats[date] += 1
|
45
|
+
end
|
46
|
+
|
47
|
+
print_stats(stats) if print?
|
48
|
+
stats
|
49
|
+
end
|
50
|
+
|
51
|
+
# Stats by class.
|
52
|
+
def stats_by_class(&block)
|
53
|
+
jobs, stats = select(&block), {}
|
54
|
+
jobs.each do |job|
|
55
|
+
stats[job.klass_name] ||= 0
|
56
|
+
stats[job.klass_name] += 1
|
57
|
+
end
|
58
|
+
|
59
|
+
print_stats(stats) if print?
|
60
|
+
stats
|
61
|
+
end
|
62
|
+
|
63
|
+
# Stats by exception.
|
64
|
+
def stats_by_exception(&block)
|
65
|
+
jobs, stats = select(&block), {}
|
66
|
+
jobs.each do |job|
|
67
|
+
exception = job["exception"]
|
68
|
+
stats[exception] ||= 0
|
69
|
+
stats[exception] += 1
|
70
|
+
end
|
71
|
+
|
72
|
+
print_stats(stats) if print?
|
73
|
+
stats
|
74
|
+
end
|
75
|
+
|
76
|
+
# Print stats
|
77
|
+
def print_stats(stats)
|
78
|
+
log too_many_message if @limiter.on?
|
79
|
+
stats.keys.sort.each do |k|
|
80
|
+
log "%15s: %4d" % [k,stats[k]]
|
81
|
+
end
|
82
|
+
log "%15s: %4d" % ["total", @limiter.count]
|
83
|
+
end
|
84
|
+
|
85
|
+
# Returns every jobs for which block evaluates to true.
|
86
|
+
def select(&block)
|
87
|
+
jobs = @limiter.jobs
|
88
|
+
block_given? ? @limiter.jobs.select(&block) : jobs
|
89
|
+
end
|
90
|
+
alias :failure_jobs :select
|
91
|
+
|
92
|
+
def select_by_regex(regex)
|
93
|
+
select do |job|
|
94
|
+
job.to_s =~ regex
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
# Clears every jobs for which block evaluates to true.
|
99
|
+
def clear(&block)
|
100
|
+
cleared = 0
|
101
|
+
@limiter.lock do
|
102
|
+
@limiter.jobs.each_with_index do |job,i|
|
103
|
+
if !block_given? || block.call(job)
|
104
|
+
index = @limiter.start_index + i - cleared
|
105
|
+
# fetches again since you can't ensure that it is always true:
|
106
|
+
# a == endode(decode(a))
|
107
|
+
value = redis.lindex(:failed, index)
|
108
|
+
redis.lrem(:failed, 1, value)
|
109
|
+
cleared += 1
|
110
|
+
end
|
111
|
+
end
|
112
|
+
end
|
113
|
+
cleared
|
114
|
+
end
|
115
|
+
|
116
|
+
# Retries every jobs for which block evaluates to true.
|
117
|
+
def requeue(clear_after_requeue=false, options={}, &block)
|
118
|
+
requeued = 0
|
119
|
+
queue = options["queue"] || options[:queue]
|
120
|
+
@limiter.lock do
|
121
|
+
@limiter.jobs.each_with_index do |job,i|
|
122
|
+
if !block_given? || block.call(job)
|
123
|
+
index = @limiter.start_index + i - requeued
|
124
|
+
|
125
|
+
value = redis.lindex(:failed, index)
|
126
|
+
redis.multi do
|
127
|
+
Job.create(queue||job['queue'], job['payload']['class'], *job['payload']['args'])
|
128
|
+
|
129
|
+
if clear_after_requeue
|
130
|
+
# remove job
|
131
|
+
# TODO: should use ltrim. not sure why i used lrem here...
|
132
|
+
redis.lrem(:failed, 1, value)
|
133
|
+
else
|
134
|
+
# mark retried
|
135
|
+
job['retried_at'] = Time.now.strftime("%Y/%m/%d %H:%M:%S")
|
136
|
+
redis.lset(:failed, @limiter.start_index+i, Resque.encode(job))
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
requeued += 1
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
requeued
|
145
|
+
end
|
146
|
+
|
147
|
+
# Clears all jobs except the last X jobs
|
148
|
+
def clear_stale
|
149
|
+
return 0 unless @limiter.on?
|
150
|
+
c = @limiter.maximum
|
151
|
+
redis.ltrim(:failed, -c, -1)
|
152
|
+
c
|
153
|
+
end
|
154
|
+
|
155
|
+
# Exntends job(Hash instance) with some helper methods.
|
156
|
+
module FailedJobEx
|
157
|
+
# Returns true if the job has been already retried. Otherwise returns
|
158
|
+
# false.
|
159
|
+
def retried?
|
160
|
+
!self['retried_at'].nil?
|
161
|
+
end
|
162
|
+
alias :requeued? :retried?
|
163
|
+
|
164
|
+
# Returns true if the job processed(failed) before the given time.
|
165
|
+
# Otherwise returns false.
|
166
|
+
# You can pass Time object or String.
|
167
|
+
def before?(time)
|
168
|
+
time = Time.parse(time) if time.is_a?(String)
|
169
|
+
Time.parse(self['failed_at']) < time
|
170
|
+
end
|
171
|
+
|
172
|
+
# Returns true if the job processed(failed) after the given time.
|
173
|
+
# Otherwise returns false.
|
174
|
+
# You can pass Time object or String.
|
175
|
+
def after?(time)
|
176
|
+
time = Time.parse(time) if time.is_a?(String)
|
177
|
+
Time.parse(self['failed_at']) >= time
|
178
|
+
end
|
179
|
+
|
180
|
+
# Returns true if the class of the job matches. Otherwise returns false.
|
181
|
+
def klass?(klass_or_name)
|
182
|
+
klass_name == klass_or_name
|
183
|
+
end
|
184
|
+
|
185
|
+
def klass_name
|
186
|
+
@klass_name ||=
|
187
|
+
(self["payload"]["args"] && self["payload"]["args"][0] && self["payload"]["args"][0]["job_class"]) || # ActiveJob
|
188
|
+
self["payload"]["class"] || # Rescue
|
189
|
+
"UNKNOWN"
|
190
|
+
end
|
191
|
+
|
192
|
+
# Returns true if the exception raised by the failed job matches. Otherwise returns false.
|
193
|
+
def exception?(exception)
|
194
|
+
self["exception"] == exception.to_s
|
195
|
+
end
|
196
|
+
|
197
|
+
# Returns true if the queue of the job matches. Otherwise returns false.
|
198
|
+
def queue?(queue)
|
199
|
+
self["queue"] == queue.to_s
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
# Through the Limiter class, you accesses only the last x(default 1000)
|
204
|
+
# jobs.
|
205
|
+
class Limiter
|
206
|
+
@@default_maximum ||= 1000
|
207
|
+
|
208
|
+
class << self
|
209
|
+
def default_maximum
|
210
|
+
@@default_maximum
|
211
|
+
end
|
212
|
+
|
213
|
+
def default_maximum=(v)
|
214
|
+
@@default_maximum = v
|
215
|
+
end
|
216
|
+
end
|
217
|
+
|
218
|
+
attr_accessor :maximum
|
219
|
+
def initialize(cleaner)
|
220
|
+
@cleaner = cleaner
|
221
|
+
@maximum = @@default_maximum
|
222
|
+
@locked = false
|
223
|
+
end
|
224
|
+
|
225
|
+
# Returns true if limiter is ON: number of failed jobs is more than
|
226
|
+
# maximum value.
|
227
|
+
def on?
|
228
|
+
@cleaner.failure.count > @maximum
|
229
|
+
end
|
230
|
+
|
231
|
+
# Returns limited count.
|
232
|
+
def count
|
233
|
+
if @locked
|
234
|
+
@jobs.size
|
235
|
+
else
|
236
|
+
on? ? @maximum : @cleaner.failure.count
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
# Returns jobs. If numbers of jobs is more than maximum, it returns only
|
241
|
+
# the maximum.
|
242
|
+
def jobs
|
243
|
+
if @locked
|
244
|
+
@jobs
|
245
|
+
else
|
246
|
+
all( - count, count)
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
# Wraps Resque's all and returns always array.
|
251
|
+
def all(index=0,count=1)
|
252
|
+
jobs = @cleaner.failure.all( index, count)
|
253
|
+
jobs = [] unless jobs
|
254
|
+
jobs = [jobs] unless jobs.is_a?(Array)
|
255
|
+
jobs.each{|j| j.extend FailedJobEx}
|
256
|
+
jobs
|
257
|
+
end
|
258
|
+
|
259
|
+
# Returns a start index of jobs in :failed list.
|
260
|
+
def start_index
|
261
|
+
if @locked
|
262
|
+
@start_index
|
263
|
+
else
|
264
|
+
on? ? @cleaner.failure.count-@maximum : 0
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
# Assuming new failures pushed while cleaner is dealing with failures,
|
269
|
+
# you need to lock the range.
|
270
|
+
def lock
|
271
|
+
old = @locked
|
272
|
+
|
273
|
+
unless @locked
|
274
|
+
total_count = @cleaner.failure.count
|
275
|
+
if total_count>@maximum
|
276
|
+
@start_index = total_count-@maximum
|
277
|
+
@jobs = all( @start_index, @maximum)
|
278
|
+
else
|
279
|
+
@start_index = 0
|
280
|
+
@jobs = all( 0, total_count)
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
@locked = true
|
285
|
+
yield
|
286
|
+
ensure
|
287
|
+
@locked = old
|
288
|
+
end
|
289
|
+
end
|
290
|
+
|
291
|
+
# Outputs message. Overrides this method when you want to change a output
|
292
|
+
# stream.
|
293
|
+
def log(msg)
|
294
|
+
puts msg if print?
|
295
|
+
end
|
296
|
+
|
297
|
+
def print?
|
298
|
+
@print_message
|
299
|
+
end
|
300
|
+
|
301
|
+
def too_many_message
|
302
|
+
"There are too many failed jobs(count=#{@failure.count}). This only looks at last #{@limiter.maximum} jobs."
|
303
|
+
end
|
304
|
+
end
|
305
|
+
end
|
306
|
+
end
|
307
|
+
|
308
|
+
require 'resque_cleaner/server'
|
309
|
+
|
@@ -0,0 +1,115 @@
|
|
1
|
+
# Redis configuration file example
|
2
|
+
|
3
|
+
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
4
|
+
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
5
|
+
daemonize yes
|
6
|
+
|
7
|
+
# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
|
8
|
+
# You can specify a custom pid file location here.
|
9
|
+
pidfile ./test/redis-test.pid
|
10
|
+
|
11
|
+
# Accept connections on the specified port, default is 6379
|
12
|
+
port 9736
|
13
|
+
|
14
|
+
# If you want you can bind a single interface, if the bind option is not
|
15
|
+
# specified all the interfaces will listen for connections.
|
16
|
+
#
|
17
|
+
# bind 127.0.0.1
|
18
|
+
|
19
|
+
# Close the connection after a client is idle for N seconds (0 to disable)
|
20
|
+
timeout 300
|
21
|
+
|
22
|
+
# Save the DB on disk:
|
23
|
+
#
|
24
|
+
# save <seconds> <changes>
|
25
|
+
#
|
26
|
+
# Will save the DB if both the given number of seconds and the given
|
27
|
+
# number of write operations against the DB occurred.
|
28
|
+
#
|
29
|
+
# In the example below the behaviour will be to save:
|
30
|
+
# after 900 sec (15 min) if at least 1 key changed
|
31
|
+
# after 300 sec (5 min) if at least 10 keys changed
|
32
|
+
# after 60 sec if at least 10000 keys changed
|
33
|
+
save 900 1
|
34
|
+
save 300 10
|
35
|
+
save 60 10000
|
36
|
+
|
37
|
+
# The filename where to dump the DB
|
38
|
+
dbfilename dump.rdb
|
39
|
+
|
40
|
+
# For default save/load DB in/from the working directory
|
41
|
+
# Note that you must specify a directory not a file name.
|
42
|
+
dir ./test/
|
43
|
+
|
44
|
+
# Set server verbosity to 'debug'
|
45
|
+
# it can be one of:
|
46
|
+
# debug (a lot of information, useful for development/testing)
|
47
|
+
# notice (moderately verbose, what you want in production probably)
|
48
|
+
# warning (only very important / critical messages are logged)
|
49
|
+
loglevel debug
|
50
|
+
|
51
|
+
# Specify the log file name. Also 'stdout' can be used to force
|
52
|
+
# the demon to log on the standard output. Note that if you use standard
|
53
|
+
# output for logging but daemonize, logs will be sent to /dev/null
|
54
|
+
logfile stdout
|
55
|
+
|
56
|
+
# Set the number of databases. The default database is DB 0, you can select
|
57
|
+
# a different one on a per-connection basis using SELECT <dbid> where
|
58
|
+
# dbid is a number between 0 and 'databases'-1
|
59
|
+
databases 16
|
60
|
+
|
61
|
+
################################# REPLICATION #################################
|
62
|
+
|
63
|
+
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
64
|
+
# another Redis server. Note that the configuration is local to the slave
|
65
|
+
# so for example it is possible to configure the slave to save the DB with a
|
66
|
+
# different interval, or to listen to another port, and so on.
|
67
|
+
|
68
|
+
# slaveof <masterip> <masterport>
|
69
|
+
|
70
|
+
################################## SECURITY ###################################
|
71
|
+
|
72
|
+
# Require clients to issue AUTH <PASSWORD> before processing any other
|
73
|
+
# commands. This might be useful in environments in which you do not trust
|
74
|
+
# others with access to the host running redis-server.
|
75
|
+
#
|
76
|
+
# This should stay commented out for backward compatibility and because most
|
77
|
+
# people do not need auth (e.g. they run their own servers).
|
78
|
+
|
79
|
+
# requirepass foobared
|
80
|
+
|
81
|
+
################################### LIMITS ####################################
|
82
|
+
|
83
|
+
# Set the max number of connected clients at the same time. By default there
|
84
|
+
# is no limit, and it's up to the number of file descriptors the Redis process
|
85
|
+
# is able to open. The special value '0' means no limts.
|
86
|
+
# Once the limit is reached Redis will close all the new connections sending
|
87
|
+
# an error 'max number of clients reached'.
|
88
|
+
|
89
|
+
# maxclients 128
|
90
|
+
|
91
|
+
# Don't use more memory than the specified amount of bytes.
|
92
|
+
# When the memory limit is reached Redis will try to remove keys with an
|
93
|
+
# EXPIRE set. It will try to start freeing keys that are going to expire
|
94
|
+
# in little time and preserve keys with a longer time to live.
|
95
|
+
# Redis will also try to remove objects from free lists if possible.
|
96
|
+
#
|
97
|
+
# If all this fails, Redis will start to reply with errors to commands
|
98
|
+
# that will use more memory, like SET, LPUSH, and so on, and will continue
|
99
|
+
# to reply to most read-only commands like GET.
|
100
|
+
#
|
101
|
+
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
|
102
|
+
# 'state' server or cache, not as a real DB. When Redis is used as a real
|
103
|
+
# database the memory usage will grow over the weeks, it will be obvious if
|
104
|
+
# it is going to use too much memory in the long run, and you'll have the time
|
105
|
+
# to upgrade. With maxmemory after the limit is reached you'll start to get
|
106
|
+
# errors for write operations, and this may even lead to DB inconsistency.
|
107
|
+
|
108
|
+
# maxmemory <bytes>
|
109
|
+
|
110
|
+
############################### ADVANCED CONFIG ###############################
|
111
|
+
|
112
|
+
# Glue small output buffers together in order to send small replies in a
|
113
|
+
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
114
|
+
# in terms of number of queries per second. Use 'yes' if unsure.
|
115
|
+
#glueoutputbuf yes
|
@@ -0,0 +1,206 @@
|
|
1
|
+
require File.expand_path(File.dirname(__FILE__) + '/test_helper')
|
2
|
+
require 'time'
|
3
|
+
describe "ResqueCleaner" do
|
4
|
+
before do
|
5
|
+
Resque.redis.flushall
|
6
|
+
|
7
|
+
@worker = Resque::Worker.new(:jobs,:jobs2)
|
8
|
+
|
9
|
+
# 3 BadJob at 2009-03-13
|
10
|
+
create_and_process_jobs :jobs, @worker, 3, Time.parse('2009-03-13'), BadJob
|
11
|
+
# 2 BadJob by Jason at 2009-03-13
|
12
|
+
create_and_process_jobs :jobs2, @worker, 2, Time.parse('2009-03-13'), BadJob, "Jason"
|
13
|
+
|
14
|
+
# 1 BadJob by Johnson at 2009-03-13
|
15
|
+
create_and_process_jobs :jobs, @worker, 1, Time.parse('2009-03-13'), BadJob, "Johnson"
|
16
|
+
|
17
|
+
# 7 BadJob at 2009-11-13
|
18
|
+
create_and_process_jobs :jobs, @worker, 7, Time.parse('2009-11-13'), BadJobWithSyntaxError
|
19
|
+
# 7 BadJob by Freddy at 2009-11-13
|
20
|
+
create_and_process_jobs :jobs2, @worker, 7, Time.parse('2009-11-13'), BadJob, "Freddy"
|
21
|
+
|
22
|
+
# 11 BadJob at 2010-08-13
|
23
|
+
create_and_process_jobs :jobs, @worker, 11, Time.parse('2010-08-13'), BadJob
|
24
|
+
# 11 BadJob by Jason at 2010-08-13
|
25
|
+
create_and_process_jobs :jobs2, @worker, 11, Time.parse('2010-08-13'), BadJob, "Jason"
|
26
|
+
|
27
|
+
@cleaner = Resque::Plugins::ResqueCleaner.new
|
28
|
+
@cleaner.print_message = false
|
29
|
+
end
|
30
|
+
|
31
|
+
it "#select_by_regex returns only Jason jobs" do
|
32
|
+
ret = @cleaner.select_by_regex(/Jason/)
|
33
|
+
assert_equal 13, ret.size
|
34
|
+
end
|
35
|
+
|
36
|
+
it "#select_by_regex returns an empty array if passed a non-regex" do
|
37
|
+
['string', nil, 13, Class.new].each do |non_regex|
|
38
|
+
ret = @cleaner.select_by_regex(nil)
|
39
|
+
assert_equal 0, ret.size
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
it "#select returns failure jobs" do
|
44
|
+
ret = @cleaner.select
|
45
|
+
assert_equal 42, ret.size
|
46
|
+
end
|
47
|
+
|
48
|
+
it "#select works with a limit" do
|
49
|
+
@cleaner.limiter.maximum = 10
|
50
|
+
ret = @cleaner.select
|
51
|
+
|
52
|
+
# only maximum number
|
53
|
+
assert_equal 10, ret.size
|
54
|
+
|
55
|
+
# lait one
|
56
|
+
assert_equal Time.parse(ret[0]['failed_at']), Time.parse('2010-08-13')
|
57
|
+
end
|
58
|
+
|
59
|
+
it "#select with a block returns failure jobs which the block evaluates true" do
|
60
|
+
ret = @cleaner.select {|job| job["payload"]["args"][0]=="Jason"}
|
61
|
+
assert_equal 13, ret.size
|
62
|
+
end
|
63
|
+
|
64
|
+
it "#clear deletes failure jobs" do
|
65
|
+
cleared = @cleaner.clear
|
66
|
+
assert_equal 42, cleared
|
67
|
+
assert_equal 0, @cleaner.select.size
|
68
|
+
end
|
69
|
+
|
70
|
+
it "#clear with a block deletes failure jobs which the block evaluates true" do
|
71
|
+
cleared = @cleaner.clear{|job| job["payload"]["args"][0]=="Jason"}
|
72
|
+
assert_equal 13, cleared
|
73
|
+
assert_equal 42-13, @cleaner.select.size
|
74
|
+
assert_equal 0, @cleaner.select{|job| job["payload"]["args"][0]=="Jason"}.size
|
75
|
+
end
|
76
|
+
|
77
|
+
it "#requeue retries failure jobs" do
|
78
|
+
assert_equal 0, queue_size(:jobs,:jobs2)
|
79
|
+
requeued = @cleaner.requeue
|
80
|
+
assert_equal 42, requeued
|
81
|
+
assert_equal 42, @cleaner.select.size # it doesn't clear jobs
|
82
|
+
assert_equal 42, queue_size(:jobs,:jobs2)
|
83
|
+
end
|
84
|
+
|
85
|
+
it "#requeue with a block retries failure jobs which the block evaluates true" do
|
86
|
+
requeued = @cleaner.requeue{|job| job["payload"]["args"][0]=="Jason"}
|
87
|
+
assert_equal 13, requeued
|
88
|
+
assert_equal 13, queue_size(:jobs,:jobs2)
|
89
|
+
end
|
90
|
+
|
91
|
+
it "#requeue with clear option requeues and deletes failure jobs" do
|
92
|
+
assert_equal 0, queue_size(:jobs,:jobs2)
|
93
|
+
requeued = @cleaner.requeue(true)
|
94
|
+
assert_equal 42, requeued
|
95
|
+
assert_equal 42, queue_size(:jobs,:jobs2)
|
96
|
+
assert_equal 0, @cleaner.select.size
|
97
|
+
end
|
98
|
+
|
99
|
+
it "#requeue with :queue option requeues the jobs to the queue" do
|
100
|
+
assert_equal 0, queue_size(:jobs,:jobs2,:retry)
|
101
|
+
requeued = @cleaner.requeue false, :queue => :retry
|
102
|
+
assert_equal 42, requeued
|
103
|
+
assert_equal 42, @cleaner.select.size # it doesn't clear jobs
|
104
|
+
assert_equal 0, queue_size(:jobs,:jobs2)
|
105
|
+
assert_equal 42, queue_size(:retry)
|
106
|
+
end
|
107
|
+
|
108
|
+
it "#clear_stale deletes failure jobs which is queued before the last x enqueued" do
|
109
|
+
@cleaner.limiter.maximum = 10
|
110
|
+
@cleaner.clear_stale
|
111
|
+
assert_equal 10, @cleaner.failure.count
|
112
|
+
assert_equal Time.parse(@cleaner.failure_jobs[0]['failed_at']), Time.parse('2010-08-13')
|
113
|
+
end
|
114
|
+
|
115
|
+
it "FailedJobEx module extends job and provides some useful methods" do
|
116
|
+
# before 2009-04-01
|
117
|
+
ret = @cleaner.select {|j| j.before?('2009-04-01')}
|
118
|
+
assert_equal 6, ret.size
|
119
|
+
|
120
|
+
# after 2010-01-01
|
121
|
+
ret = @cleaner.select {|j| j.after?('2010-01-01')}
|
122
|
+
assert_equal 22, ret.size
|
123
|
+
|
124
|
+
# filter by class
|
125
|
+
ret = @cleaner.select {|j| j.klass?(BadJobWithSyntaxError)}
|
126
|
+
assert_equal 7, ret.size
|
127
|
+
|
128
|
+
# filter by exception
|
129
|
+
ret = @cleaner.select {|j| j.exception?(SyntaxError)}
|
130
|
+
assert_equal 7, ret.size
|
131
|
+
|
132
|
+
# filter by queue
|
133
|
+
ret = @cleaner.select {|j| j.queue?(:jobs2)}
|
134
|
+
assert_equal 20, ret.size
|
135
|
+
|
136
|
+
# combination
|
137
|
+
ret = @cleaner.select {|j| j.queue?(:jobs2) && j.before?('2009-12-01')}
|
138
|
+
assert_equal 9, ret.size
|
139
|
+
|
140
|
+
# combination 2
|
141
|
+
ret = @cleaner.select {|j| j['payload']['args']==['Jason'] && j.queue?(:jobs2)}
|
142
|
+
assert_equal 13, ret.size
|
143
|
+
|
144
|
+
# retried?
|
145
|
+
requeued = @cleaner.requeue{|j| j["payload"]["args"][0]=="Johnson"}
|
146
|
+
ret = @cleaner.select {|j| j.retried?}
|
147
|
+
assert_equal 1, ret.size
|
148
|
+
end
|
149
|
+
|
150
|
+
it "#stats_by_date returns stats grouped by date" do
|
151
|
+
ret = @cleaner.stats_by_date
|
152
|
+
assert_equal 6, ret['2009/03/13']
|
153
|
+
assert_equal 14, ret['2009/11/13']
|
154
|
+
|
155
|
+
# with block
|
156
|
+
ret = @cleaner.stats_by_date{|j| j['payload']['args']==['Jason']}
|
157
|
+
assert_equal 2, ret['2009/03/13']
|
158
|
+
assert_equal nil, ret['2009/11/13']
|
159
|
+
assert_equal 11, ret['2010/08/13']
|
160
|
+
end
|
161
|
+
|
162
|
+
it "#stats_by_class returns stats grouped by class" do
|
163
|
+
ret = @cleaner.stats_by_class
|
164
|
+
assert_equal 35, ret['BadJob']
|
165
|
+
assert_equal 7, ret['BadJobWithSyntaxError']
|
166
|
+
end
|
167
|
+
|
168
|
+
it "#stats_by_class works with broken log" do
|
169
|
+
add_empty_payload_failure
|
170
|
+
ret = @cleaner.stats_by_class
|
171
|
+
assert_equal 1, ret['UNKNOWN']
|
172
|
+
end
|
173
|
+
|
174
|
+
it "#stats_by_exception returns stats grouped by exception" do
|
175
|
+
ret = @cleaner.stats_by_exception
|
176
|
+
assert_equal 35, ret['RuntimeError']
|
177
|
+
assert_equal 7, ret['SyntaxError']
|
178
|
+
end
|
179
|
+
|
180
|
+
it "#lock ensures that a new failure job doesn't affect in a limit mode" do
|
181
|
+
@cleaner.limiter.maximum = 23
|
182
|
+
@cleaner.limiter.lock do
|
183
|
+
first = @cleaner.select[0]
|
184
|
+
assert_equal "Freddy", first["payload"]["args"][0]
|
185
|
+
|
186
|
+
create_and_process_jobs :jobs, @worker, 30, Time.parse('2010-10-10'), BadJob, "Jack"
|
187
|
+
|
188
|
+
first = @cleaner.select[0]
|
189
|
+
assert_equal "Freddy", first["payload"]["args"][0]
|
190
|
+
end
|
191
|
+
first = @cleaner.select[0]
|
192
|
+
assert_equal "Jack", first["payload"]["args"][0]
|
193
|
+
end
|
194
|
+
|
195
|
+
it "allows you to configure limiter" do
|
196
|
+
c = Resque::Plugins::ResqueCleaner.new
|
197
|
+
refute_equal c.limiter.maximum, 10_000
|
198
|
+
|
199
|
+
module Resque::Plugins
|
200
|
+
ResqueCleaner::Limiter.default_maximum = 10_000
|
201
|
+
end
|
202
|
+
|
203
|
+
c = Resque::Plugins::ResqueCleaner.new
|
204
|
+
assert_equal c.limiter.maximum, 10_000
|
205
|
+
end
|
206
|
+
end
|
@@ -0,0 +1,66 @@
|
|
1
|
+
require File.expand_path(File.dirname(__FILE__) + '/test_helper')
|
2
|
+
|
3
|
+
require 'digest/sha1'
|
4
|
+
require 'json'
|
5
|
+
require 'rack/test'
|
6
|
+
|
7
|
+
class Minitest::Spec
|
8
|
+
include Rack::Test::Methods
|
9
|
+
def app
|
10
|
+
Resque::Server.new
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def setup_some_failed_jobs
|
15
|
+
Resque.redis.flushall
|
16
|
+
|
17
|
+
@worker = Resque::Worker.new(:jobs,:jobs2)
|
18
|
+
|
19
|
+
create_and_process_jobs :jobs, @worker, 1, Time.now, BadJobWithSyntaxError, "great_args"
|
20
|
+
|
21
|
+
10.times {|i|
|
22
|
+
create_and_process_jobs :jobs, @worker, 1, Time.now, BadJob, "test_#{i}"
|
23
|
+
}
|
24
|
+
|
25
|
+
@cleaner = Resque::Plugins::ResqueCleaner.new
|
26
|
+
@cleaner.print_message = false
|
27
|
+
end
|
28
|
+
|
29
|
+
describe "resque-web" do
|
30
|
+
before do
|
31
|
+
setup_some_failed_jobs
|
32
|
+
end
|
33
|
+
|
34
|
+
it "#cleaner should respond with success" do
|
35
|
+
get "/cleaner"
|
36
|
+
assert last_response.body.include?('BadJob')
|
37
|
+
assert last_response.body =~ /\bException\b/
|
38
|
+
end
|
39
|
+
|
40
|
+
it "#cleaner_list should respond with success" do
|
41
|
+
get "/cleaner_list"
|
42
|
+
assert last_response.ok?, last_response.errors
|
43
|
+
end
|
44
|
+
|
45
|
+
it '#cleaner_list shows the failed jobs' do
|
46
|
+
get "/cleaner_list"
|
47
|
+
assert last_response.body.include?('BadJob')
|
48
|
+
end
|
49
|
+
|
50
|
+
it "#cleaner_list shows the failed jobs when we use a select_by_regex" do
|
51
|
+
get "/cleaner_list", :regex => "BadJob*"
|
52
|
+
assert last_response.body.include?('"BadJobWithSyntaxError"')
|
53
|
+
assert last_response.body.include?('"BadJob"')
|
54
|
+
end
|
55
|
+
|
56
|
+
|
57
|
+
it '#cleaner_exec clears job' do
|
58
|
+
post "/cleaner_exec", :action => "clear", :sha1 => Digest::SHA1.hexdigest(@cleaner.select[0].to_json)
|
59
|
+
assert_equal 10, @cleaner.select.size
|
60
|
+
end
|
61
|
+
it "#cleaner_dump should respond with success" do
|
62
|
+
get "/cleaner_dump"
|
63
|
+
assert last_response.ok?, last_response.errors
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|