resque-cleaner 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +20 -0
- data/README.markdown +6 -0
- data/Rakefile +38 -0
- data/lib/resque_cleaner.rb +275 -0
- data/test/redis-test.conf +115 -0
- data/test/resque_cleaner_test.rb +165 -0
- data/test/test_helper.rb +122 -0
- metadata +88 -0
data/LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2010 Tatsuya Ono
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.markdown
ADDED
data/Rakefile
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
#
|
2
|
+
# Setup
|
3
|
+
#
|
4
|
+
|
5
|
+
# load 'tasks/redis.rake'
|
6
|
+
#require 'rake/testtask'
|
7
|
+
|
8
|
+
$LOAD_PATH.unshift 'lib'
|
9
|
+
#require 'resque/tasks'
|
10
|
+
|
11
|
+
def command?(command)
|
12
|
+
system("type #{command} > /dev/null 2>&1")
|
13
|
+
end
|
14
|
+
|
15
|
+
|
16
|
+
#
|
17
|
+
# Tests
|
18
|
+
#
|
19
|
+
|
20
|
+
task :default => :test
|
21
|
+
|
22
|
+
desc "Run the test suite"
|
23
|
+
task :test do
|
24
|
+
rg = command?(:rg)
|
25
|
+
Dir['test/**/*_test.rb'].each do |f|
|
26
|
+
rg ? sh("rg #{f}") : ruby(f)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
if command? :kicker
|
31
|
+
desc "Launch Kicker (like autotest)"
|
32
|
+
task :kicker do
|
33
|
+
puts "Kicking... (ctrl+c to cancel)"
|
34
|
+
exec "kicker -e rake test lib examples"
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
|
@@ -0,0 +1,275 @@
|
|
1
|
+
require 'time'
|
2
|
+
module Resque
|
3
|
+
module Plugins
|
4
|
+
# ResqueCleaner class provides useful functionalities to retry or clean
|
5
|
+
# failed jobs. Let's clean up your failed list!
|
6
|
+
class ResqueCleaner
|
7
|
+
include Resque::Helpers
|
8
|
+
# ResqueCleaner fetches all elements from Redis and checks them
|
9
|
+
# by linear when filtering them. Since there is a performance concern,
|
10
|
+
# ResqueCleaner handles only the latest x(default 1000) jobs.
|
11
|
+
#
|
12
|
+
# You can change the value through limiter attribute.
|
13
|
+
# e.g. cleaner.limiter.maximum = 5000
|
14
|
+
attr_reader :limiter
|
15
|
+
|
16
|
+
# Set false if you don't show any message.
|
17
|
+
attr_accessor :print_message
|
18
|
+
|
19
|
+
# Initializes instance
|
20
|
+
def initialize
|
21
|
+
@failure = Resque::Failure.backend
|
22
|
+
@print_message = true
|
23
|
+
@limiter = Limiter.new self
|
24
|
+
end
|
25
|
+
|
26
|
+
# Returns redis instance.
|
27
|
+
def redis
|
28
|
+
Resque.redis
|
29
|
+
end
|
30
|
+
|
31
|
+
# Returns failure backend. Only supports redis backend.
|
32
|
+
def failure
|
33
|
+
@failure
|
34
|
+
end
|
35
|
+
|
36
|
+
# Stats by date.
|
37
|
+
def stats_by_date(&block)
|
38
|
+
jobs = select(&block)
|
39
|
+
summary = {}
|
40
|
+
jobs.each do |job|
|
41
|
+
date = job["failed_at"][0,10]
|
42
|
+
summary[date] ||= 0
|
43
|
+
summary[date] += 1
|
44
|
+
end
|
45
|
+
|
46
|
+
if print?
|
47
|
+
log too_many_message if @limiter.on?
|
48
|
+
summary.keys.sort.each do |k|
|
49
|
+
log "%s: %4d" % [k,summary[k]]
|
50
|
+
end
|
51
|
+
log "%10s: %4d" % ["total", @limiter.count]
|
52
|
+
end
|
53
|
+
summary
|
54
|
+
end
|
55
|
+
|
56
|
+
# Stats by class.
|
57
|
+
def stats_by_class(&block)
|
58
|
+
jobs = select(&block)
|
59
|
+
summary = {}
|
60
|
+
jobs.each do |job|
|
61
|
+
klass = job["payload"]["class"]
|
62
|
+
summary[klass] ||= 0
|
63
|
+
summary[klass] += 1
|
64
|
+
end
|
65
|
+
|
66
|
+
if print?
|
67
|
+
log too_many_message if @limiter.on?
|
68
|
+
summary.keys.sort.each do |k|
|
69
|
+
log "%15s: %4d" % [k,summary[k]]
|
70
|
+
end
|
71
|
+
log "%15s: %4d" % ["total", @limiter.count]
|
72
|
+
end
|
73
|
+
summary
|
74
|
+
end
|
75
|
+
|
76
|
+
# Returns every jobs for which block evaluates to true.
|
77
|
+
def select(&block)
|
78
|
+
jobs = @limiter.jobs
|
79
|
+
block_given? ? @limiter.jobs.select(&block) : jobs
|
80
|
+
end
|
81
|
+
alias :failure_jobs :select
|
82
|
+
|
83
|
+
# Clears every jobs for which block evaluates to true.
|
84
|
+
def clear(&block)
|
85
|
+
cleared = 0
|
86
|
+
@limiter.lock do
|
87
|
+
@limiter.jobs.each_with_index do |job,i|
|
88
|
+
if !block_given? || block.call(job)
|
89
|
+
index = @limiter.start_index + i - cleared
|
90
|
+
# fetches again since you can't ensure that it is always true:
|
91
|
+
# a == endode(decode(a))
|
92
|
+
value = redis.lindex(:failed, index)
|
93
|
+
redis.lrem(:failed, 1, value)
|
94
|
+
cleared += 1
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
cleared
|
99
|
+
end
|
100
|
+
|
101
|
+
# Retries every jobs for which block evaluates to true.
|
102
|
+
def requeue(clear_after_requeue=false, &block)
|
103
|
+
requeued = 0
|
104
|
+
@limiter.lock do
|
105
|
+
@limiter.jobs.each_with_index do |job,i|
|
106
|
+
if !block_given? || block.call(job)
|
107
|
+
index = @limiter.start_index + i - requeued
|
108
|
+
|
109
|
+
if clear_after_requeue
|
110
|
+
# remove job
|
111
|
+
value = redis.lindex(:failed, index)
|
112
|
+
redis.lrem(:failed, 1, value)
|
113
|
+
else
|
114
|
+
# mark retried
|
115
|
+
job['retried_at'] = Time.now.strftime("%Y/%m/%d %H:%M:%S")
|
116
|
+
redis.lset(:failed, @limiter.start_index+i, Resque.encode(job))
|
117
|
+
end
|
118
|
+
|
119
|
+
Job.create(job['queue'], job['payload']['class'], *job['payload']['args'])
|
120
|
+
requeued += 1
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
requeued
|
125
|
+
end
|
126
|
+
|
127
|
+
# Clears all jobs except the last X jobs
|
128
|
+
def clear_stale
|
129
|
+
return 0 unless @limiter.on?
|
130
|
+
c = @limiter.maximum
|
131
|
+
redis.ltrim(:failed, -c, -1)
|
132
|
+
c
|
133
|
+
end
|
134
|
+
|
135
|
+
# Returns Proc which you can add a useful condition easily.
|
136
|
+
# e.g.
|
137
|
+
# cleaner.clear &cleaner.proc.retried
|
138
|
+
# #=> Clears all jobs retried.
|
139
|
+
# cleaner.select &cleaner.proc.after(10.days.ago).klass(EmailJob)
|
140
|
+
# #=> Selects all EmailJob failed within 10 days.
|
141
|
+
# cleaner.select &cleaner.proc{|j| j["exception"]=="RunTimeError"}.klass(EmailJob)
|
142
|
+
# #=> Selects all EmailJob failed with RunTimeError.
|
143
|
+
def proc(&block)
|
144
|
+
FilterProc.new(&block)
|
145
|
+
end
|
146
|
+
|
147
|
+
# Provides typical proc you can filter jobs.
|
148
|
+
class FilterProc < Proc
|
149
|
+
def retried
|
150
|
+
FilterProc.new {|job| self.call(job) && job['retried_at'].blank?}
|
151
|
+
end
|
152
|
+
alias :requeued :retried
|
153
|
+
|
154
|
+
def before(time)
|
155
|
+
time = Time.parse(time) if time.is_a?(String)
|
156
|
+
FilterProc.new {|job| self.call(job) && Time.parse(job['failed_at']) <= time}
|
157
|
+
end
|
158
|
+
|
159
|
+
def after(time)
|
160
|
+
time = Time.parse(time) if time.is_a?(String)
|
161
|
+
FilterProc.new {|job| self.call(job) && Time.parse(job['failed_at']) >= time}
|
162
|
+
end
|
163
|
+
|
164
|
+
def klass(klass_or_name)
|
165
|
+
FilterProc.new {|job| self.call(job) && job["payload"]["class"] == klass_or_name.to_s}
|
166
|
+
end
|
167
|
+
|
168
|
+
def queue(queue)
|
169
|
+
FilterProc.new {|job| self.call(job) && job["queue"] == queue.to_s}
|
170
|
+
end
|
171
|
+
|
172
|
+
def self.new(&block)
|
173
|
+
if block
|
174
|
+
super
|
175
|
+
else
|
176
|
+
super {|job| true}
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
# Through the Limiter class, you accesses only the last x(default 1000)
|
182
|
+
# jobs.
|
183
|
+
class Limiter
|
184
|
+
DEFAULT_MAX_JOBS = 1000
|
185
|
+
attr_accessor :maximum
|
186
|
+
def initialize(cleaner)
|
187
|
+
@cleaner = cleaner
|
188
|
+
@maximum = DEFAULT_MAX_JOBS
|
189
|
+
@locked = false
|
190
|
+
end
|
191
|
+
|
192
|
+
# Returns true if limiter is ON: number of failed jobs is more than
|
193
|
+
# maximum value.
|
194
|
+
def on?
|
195
|
+
@cleaner.failure.count > @maximum
|
196
|
+
end
|
197
|
+
|
198
|
+
# Returns limited count.
|
199
|
+
def count
|
200
|
+
if @locked
|
201
|
+
@jobs.size
|
202
|
+
else
|
203
|
+
on? ? @maximum : @cleaner.failure.count
|
204
|
+
end
|
205
|
+
end
|
206
|
+
|
207
|
+
# Returns jobs. If numbers of jobs is more than maixum, it returns only
|
208
|
+
# the maximum.
|
209
|
+
def jobs
|
210
|
+
if @locked
|
211
|
+
@jobs
|
212
|
+
else
|
213
|
+
all( - count, count)
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
# wraps Resque's all and returns always array.
|
218
|
+
def all(index=0,count=1)
|
219
|
+
jobs = @cleaner.failure.all( index, count)
|
220
|
+
jobs = [] unless jobs
|
221
|
+
jobs = [jobs] unless jobs.is_a?(Array)
|
222
|
+
jobs
|
223
|
+
end
|
224
|
+
|
225
|
+
# Returns a start index of jobs in :failed list.
|
226
|
+
def start_index
|
227
|
+
if @locked
|
228
|
+
@start_index
|
229
|
+
else
|
230
|
+
on? ? @cleaner.failure.count-@maximum : 0
|
231
|
+
end
|
232
|
+
end
|
233
|
+
|
234
|
+
# Assuming new failures pushed while cleaner is dealing with failures,
|
235
|
+
# you need to lock the range.
|
236
|
+
def lock
|
237
|
+
old = @locked
|
238
|
+
|
239
|
+
unless @locked
|
240
|
+
total_count = @cleaner.failure.count
|
241
|
+
if total_count>@maximum
|
242
|
+
@start_index = total_count-@maximum
|
243
|
+
@jobs = all( @start_index, @maximum)
|
244
|
+
else
|
245
|
+
@start_index = 0
|
246
|
+
@jobs = all( 0, total_count)
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
@locked = true
|
251
|
+
yield
|
252
|
+
ensure
|
253
|
+
@locked = old
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
# Outputs message. Overrides this method when you want to change a output
|
258
|
+
# stream.
|
259
|
+
def log(msg)
|
260
|
+
puts msg if print?
|
261
|
+
end
|
262
|
+
|
263
|
+
def print?
|
264
|
+
@print_message
|
265
|
+
end
|
266
|
+
|
267
|
+
def too_many_message
|
268
|
+
"There are too many failed jobs(count=#{@failure.count}). This only looks at last #{@limiter.maximum} jobs."
|
269
|
+
end
|
270
|
+
end
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
|
275
|
+
|
@@ -0,0 +1,115 @@
|
|
1
|
+
# Redis configuration file example
|
2
|
+
|
3
|
+
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
4
|
+
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
5
|
+
daemonize yes
|
6
|
+
|
7
|
+
# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
|
8
|
+
# You can specify a custom pid file location here.
|
9
|
+
pidfile ./test/redis-test.pid
|
10
|
+
|
11
|
+
# Accept connections on the specified port, default is 6379
|
12
|
+
port 9736
|
13
|
+
|
14
|
+
# If you want you can bind a single interface, if the bind option is not
|
15
|
+
# specified all the interfaces will listen for connections.
|
16
|
+
#
|
17
|
+
# bind 127.0.0.1
|
18
|
+
|
19
|
+
# Close the connection after a client is idle for N seconds (0 to disable)
|
20
|
+
timeout 300
|
21
|
+
|
22
|
+
# Save the DB on disk:
|
23
|
+
#
|
24
|
+
# save <seconds> <changes>
|
25
|
+
#
|
26
|
+
# Will save the DB if both the given number of seconds and the given
|
27
|
+
# number of write operations against the DB occurred.
|
28
|
+
#
|
29
|
+
# In the example below the behaviour will be to save:
|
30
|
+
# after 900 sec (15 min) if at least 1 key changed
|
31
|
+
# after 300 sec (5 min) if at least 10 keys changed
|
32
|
+
# after 60 sec if at least 10000 keys changed
|
33
|
+
save 900 1
|
34
|
+
save 300 10
|
35
|
+
save 60 10000
|
36
|
+
|
37
|
+
# The filename where to dump the DB
|
38
|
+
dbfilename dump.rdb
|
39
|
+
|
40
|
+
# For default save/load DB in/from the working directory
|
41
|
+
# Note that you must specify a directory not a file name.
|
42
|
+
dir ./test/
|
43
|
+
|
44
|
+
# Set server verbosity to 'debug'
|
45
|
+
# it can be one of:
|
46
|
+
# debug (a lot of information, useful for development/testing)
|
47
|
+
# notice (moderately verbose, what you want in production probably)
|
48
|
+
# warning (only very important / critical messages are logged)
|
49
|
+
loglevel debug
|
50
|
+
|
51
|
+
# Specify the log file name. Also 'stdout' can be used to force
|
52
|
+
# the demon to log on the standard output. Note that if you use standard
|
53
|
+
# output for logging but daemonize, logs will be sent to /dev/null
|
54
|
+
logfile stdout
|
55
|
+
|
56
|
+
# Set the number of databases. The default database is DB 0, you can select
|
57
|
+
# a different one on a per-connection basis using SELECT <dbid> where
|
58
|
+
# dbid is a number between 0 and 'databases'-1
|
59
|
+
databases 16
|
60
|
+
|
61
|
+
################################# REPLICATION #################################
|
62
|
+
|
63
|
+
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
64
|
+
# another Redis server. Note that the configuration is local to the slave
|
65
|
+
# so for example it is possible to configure the slave to save the DB with a
|
66
|
+
# different interval, or to listen to another port, and so on.
|
67
|
+
|
68
|
+
# slaveof <masterip> <masterport>
|
69
|
+
|
70
|
+
################################## SECURITY ###################################
|
71
|
+
|
72
|
+
# Require clients to issue AUTH <PASSWORD> before processing any other
|
73
|
+
# commands. This might be useful in environments in which you do not trust
|
74
|
+
# others with access to the host running redis-server.
|
75
|
+
#
|
76
|
+
# This should stay commented out for backward compatibility and because most
|
77
|
+
# people do not need auth (e.g. they run their own servers).
|
78
|
+
|
79
|
+
# requirepass foobared
|
80
|
+
|
81
|
+
################################### LIMITS ####################################
|
82
|
+
|
83
|
+
# Set the max number of connected clients at the same time. By default there
|
84
|
+
# is no limit, and it's up to the number of file descriptors the Redis process
|
85
|
+
# is able to open. The special value '0' means no limts.
|
86
|
+
# Once the limit is reached Redis will close all the new connections sending
|
87
|
+
# an error 'max number of clients reached'.
|
88
|
+
|
89
|
+
# maxclients 128
|
90
|
+
|
91
|
+
# Don't use more memory than the specified amount of bytes.
|
92
|
+
# When the memory limit is reached Redis will try to remove keys with an
|
93
|
+
# EXPIRE set. It will try to start freeing keys that are going to expire
|
94
|
+
# in little time and preserve keys with a longer time to live.
|
95
|
+
# Redis will also try to remove objects from free lists if possible.
|
96
|
+
#
|
97
|
+
# If all this fails, Redis will start to reply with errors to commands
|
98
|
+
# that will use more memory, like SET, LPUSH, and so on, and will continue
|
99
|
+
# to reply to most read-only commands like GET.
|
100
|
+
#
|
101
|
+
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
|
102
|
+
# 'state' server or cache, not as a real DB. When Redis is used as a real
|
103
|
+
# database the memory usage will grow over the weeks, it will be obvious if
|
104
|
+
# it is going to use too much memory in the long run, and you'll have the time
|
105
|
+
# to upgrade. With maxmemory after the limit is reached you'll start to get
|
106
|
+
# errors for write operations, and this may even lead to DB inconsistency.
|
107
|
+
|
108
|
+
# maxmemory <bytes>
|
109
|
+
|
110
|
+
############################### ADVANCED CONFIG ###############################
|
111
|
+
|
112
|
+
# Glue small output buffers together in order to send small replies in a
|
113
|
+
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
114
|
+
# in terms of number of queries per second. Use 'yes' if unsure.
|
115
|
+
glueoutputbuf yes
|
@@ -0,0 +1,165 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
require 'time'
|
3
|
+
context "ResqueCleaner" do
|
4
|
+
def create_and_process_jobs(queue,worker,num,date,job,*args)
|
5
|
+
Timecop.freeze(date) do
|
6
|
+
num.times do
|
7
|
+
Resque::Job.create(queue, job, *args)
|
8
|
+
end
|
9
|
+
worker.work(0)
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
def queue_size(*queues)
|
14
|
+
queues.inject(0){|sum,queue| sum + Resque.size(queue).to_i}
|
15
|
+
end
|
16
|
+
|
17
|
+
setup do
|
18
|
+
Resque.redis.flushall
|
19
|
+
|
20
|
+
@worker = Resque::Worker.new(:jobs,:jobs2)
|
21
|
+
|
22
|
+
# 3 BadJob at 2009-03-13
|
23
|
+
create_and_process_jobs :jobs, @worker, 3, Time.parse('2009-03-13'), BadJob
|
24
|
+
# 2 BadJob by Jason at 2009-03-13
|
25
|
+
create_and_process_jobs :jobs2, @worker, 2, Time.parse('2009-03-13'), BadJob, "Jason"
|
26
|
+
|
27
|
+
# 1 BadJob by Johnson at 2009-03-13
|
28
|
+
create_and_process_jobs :jobs, @worker, 1, Time.parse('2009-03-13'), BadJob, "Johnson"
|
29
|
+
|
30
|
+
# 7 BadJob at 2009-11-13
|
31
|
+
create_and_process_jobs :jobs, @worker, 7, Time.parse('2009-11-13'), BadJobWithSyntaxError
|
32
|
+
# 7 BadJob by Freddy at 2009-11-13
|
33
|
+
create_and_process_jobs :jobs2, @worker, 7, Time.parse('2009-11-13'), BadJob, "Freddy"
|
34
|
+
|
35
|
+
# 11 BadJob at 2010-08-13
|
36
|
+
create_and_process_jobs :jobs, @worker, 11, Time.parse('2010-08-13'), BadJob
|
37
|
+
# 11 BadJob by Jason at 2010-08-13
|
38
|
+
create_and_process_jobs :jobs2, @worker, 11, Time.parse('2010-08-13'), BadJob, "Jason"
|
39
|
+
|
40
|
+
@cleaner = Resque::Plugins::ResqueCleaner.new
|
41
|
+
@cleaner.print_message = false
|
42
|
+
end
|
43
|
+
|
44
|
+
test "#select returns failure jobs" do
|
45
|
+
ret = @cleaner.select
|
46
|
+
assert_equal 42, ret.size
|
47
|
+
end
|
48
|
+
|
49
|
+
test "#select works with a limit" do
|
50
|
+
@cleaner.limiter.maximum = 10
|
51
|
+
ret = @cleaner.select
|
52
|
+
|
53
|
+
# only maximum number
|
54
|
+
assert_equal 10, ret.size
|
55
|
+
|
56
|
+
# latest one
|
57
|
+
assert_equal Time.parse(ret[0]['failed_at']), Time.parse('2010-08-13')
|
58
|
+
end
|
59
|
+
|
60
|
+
test "#select with a block returns failure jobs which the block evaluates true" do
|
61
|
+
ret = @cleaner.select {|job| job["payload"]["args"][0]=="Jason"}
|
62
|
+
assert_equal 13, ret.size
|
63
|
+
end
|
64
|
+
|
65
|
+
test "#clear deletes failure jobs" do
|
66
|
+
cleared = @cleaner.clear
|
67
|
+
assert_equal 42, cleared
|
68
|
+
assert_equal 0, @cleaner.select.size
|
69
|
+
end
|
70
|
+
|
71
|
+
test "#clear with a block deletes failure jobs which the block evaluates true" do
|
72
|
+
cleared = @cleaner.clear{|job| job["payload"]["args"][0]=="Jason"}
|
73
|
+
assert_equal 13, cleared
|
74
|
+
assert_equal 42-13, @cleaner.select.size
|
75
|
+
assert_equal 0, @cleaner.select{|job| job["payload"]["args"][0]=="Jason"}.size
|
76
|
+
end
|
77
|
+
|
78
|
+
test "#requeue retries failure jobs" do
|
79
|
+
assert_equal 0, queue_size(:jobs,:jobs2)
|
80
|
+
requeued = @cleaner.requeue
|
81
|
+
assert_equal 42, requeued
|
82
|
+
assert_equal 42, @cleaner.select.size # it doesn't clear jobs
|
83
|
+
assert_equal 42, queue_size(:jobs,:jobs2)
|
84
|
+
end
|
85
|
+
|
86
|
+
test "#requeue with a block retries failure jobs which the block evaluates true" do
|
87
|
+
requeued = @cleaner.requeue{|job| job["payload"]["args"][0]=="Jason"}
|
88
|
+
assert_equal 13, requeued
|
89
|
+
assert_equal 13, queue_size(:jobs,:jobs2)
|
90
|
+
end
|
91
|
+
|
92
|
+
test "#requeue with clear option requeues and deletes failure jobs" do
|
93
|
+
assert_equal 0, queue_size(:jobs,:jobs2)
|
94
|
+
requeued = @cleaner.requeue(true)
|
95
|
+
assert_equal 42, requeued
|
96
|
+
assert_equal 42, queue_size(:jobs,:jobs2)
|
97
|
+
assert_equal 0, @cleaner.select.size
|
98
|
+
end
|
99
|
+
|
100
|
+
test "#clear_stale deletes failure jobs which is queued before the last x enqueued" do
|
101
|
+
@cleaner.limiter.maximum = 10
|
102
|
+
@cleaner.clear_stale
|
103
|
+
assert_equal 10, @cleaner.failure.count
|
104
|
+
assert_equal Time.parse(@cleaner.failure_jobs[0]['failed_at']), Time.parse('2010-08-13')
|
105
|
+
end
|
106
|
+
|
107
|
+
test "#proc gives you handy proc definitions" do
|
108
|
+
# before 2009-04-01
|
109
|
+
ret = @cleaner.select &@cleaner.proc.before('2009-04-01')
|
110
|
+
assert_equal 6, ret.size
|
111
|
+
|
112
|
+
# after 2010-01-01
|
113
|
+
ret = @cleaner.select &@cleaner.proc.after('2010-01-01')
|
114
|
+
assert_equal 22, ret.size
|
115
|
+
|
116
|
+
# filter by class
|
117
|
+
ret = @cleaner.select &@cleaner.proc.klass(BadJobWithSyntaxError)
|
118
|
+
assert_equal 7, ret.size
|
119
|
+
|
120
|
+
# filter by queue
|
121
|
+
ret = @cleaner.select &@cleaner.proc.queue(:jobs2)
|
122
|
+
assert_equal 20, ret.size
|
123
|
+
|
124
|
+
# you can chain
|
125
|
+
ret = @cleaner.select &@cleaner.proc.queue(:jobs2).before('2009-12-01')
|
126
|
+
assert_equal 9, ret.size
|
127
|
+
|
128
|
+
# you can chain with your custom block
|
129
|
+
ret = @cleaner.select &@cleaner.proc{|j| j['payload']['args']==['Jason']}.queue(:jobs2)
|
130
|
+
assert_equal 13, ret.size
|
131
|
+
end
|
132
|
+
|
133
|
+
test "#stats_by_date returns stats grouped by date" do
|
134
|
+
ret = @cleaner.stats_by_date
|
135
|
+
assert_equal 6, ret['2009/03/13']
|
136
|
+
assert_equal 14, ret['2009/11/13']
|
137
|
+
|
138
|
+
# with block
|
139
|
+
ret = @cleaner.stats_by_date{|j| j['payload']['args']==['Jason']}
|
140
|
+
assert_equal 2, ret['2009/03/13']
|
141
|
+
assert_equal nil, ret['2009/11/13']
|
142
|
+
assert_equal 11, ret['2010/08/13']
|
143
|
+
end
|
144
|
+
|
145
|
+
test "#stats_by_class returns stats grouped by class" do
|
146
|
+
ret = @cleaner.stats_by_class
|
147
|
+
assert_equal 35, ret['BadJob']
|
148
|
+
assert_equal 7, ret['BadJobWithSyntaxError']
|
149
|
+
end
|
150
|
+
|
151
|
+
test "#lock ensures that a new failure job doesn't affect in a limit mode" do
|
152
|
+
@cleaner.limiter.maximum = 23
|
153
|
+
@cleaner.limiter.lock do
|
154
|
+
first = @cleaner.select[0]
|
155
|
+
assert_equal "Freddy", first["payload"]["args"][0]
|
156
|
+
|
157
|
+
create_and_process_jobs :jobs, @worker, 30, Time.parse('2010-10-10'), BadJob, "Jack"
|
158
|
+
|
159
|
+
first = @cleaner.select[0]
|
160
|
+
assert_equal "Freddy", first["payload"]["args"][0]
|
161
|
+
end
|
162
|
+
first = @cleaner.select[0]
|
163
|
+
assert_equal "Jack", first["payload"]["args"][0]
|
164
|
+
end
|
165
|
+
end
|
data/test/test_helper.rb
ADDED
@@ -0,0 +1,122 @@
|
|
1
|
+
# Mostly copied from Resque in order to have similar test environment.
|
2
|
+
# https://github.com/defunkt/resque/blob/master/test/test_helper.rb
|
3
|
+
|
4
|
+
dir = File.dirname(File.expand_path(__FILE__))
|
5
|
+
$LOAD_PATH.unshift dir + '/../lib'
|
6
|
+
$TESTING = true
|
7
|
+
require 'test/unit'
|
8
|
+
require 'rubygems'
|
9
|
+
require 'resque'
|
10
|
+
require 'timecop'
|
11
|
+
|
12
|
+
begin
|
13
|
+
require 'leftright'
|
14
|
+
rescue LoadError
|
15
|
+
end
|
16
|
+
require 'resque'
|
17
|
+
require 'resque_cleaner'
|
18
|
+
|
19
|
+
#
|
20
|
+
# make sure we can run redis
|
21
|
+
#
|
22
|
+
|
23
|
+
if !system("which redis-server")
|
24
|
+
puts '', "** can't find `redis-server` in your path"
|
25
|
+
puts "** try running `sudo rake install`"
|
26
|
+
abort ''
|
27
|
+
end
|
28
|
+
|
29
|
+
|
30
|
+
#
|
31
|
+
# start our own redis when the tests start,
|
32
|
+
# kill it when they end
|
33
|
+
#
|
34
|
+
|
35
|
+
at_exit do
|
36
|
+
next if $!
|
37
|
+
|
38
|
+
if defined?(MiniTest)
|
39
|
+
exit_code = MiniTest::Unit.new.run(ARGV)
|
40
|
+
else
|
41
|
+
exit_code = Test::Unit::AutoRunner.run
|
42
|
+
end
|
43
|
+
|
44
|
+
pid = `ps -A -o pid,command | grep [r]edis-test`.split(" ")[0]
|
45
|
+
puts "Killing test redis server..."
|
46
|
+
`rm -f #{dir}/dump.rdb`
|
47
|
+
Process.kill("KILL", pid.to_i)
|
48
|
+
exit exit_code
|
49
|
+
end
|
50
|
+
|
51
|
+
puts "Starting redis for testing at localhost:9736..."
|
52
|
+
`redis-server #{dir}/redis-test.conf`
|
53
|
+
Resque.redis = 'localhost:9736'
|
54
|
+
|
55
|
+
|
56
|
+
##
|
57
|
+
# test/spec/mini 3
|
58
|
+
# http://gist.github.com/25455
|
59
|
+
# chris@ozmm.org
|
60
|
+
#
|
61
|
+
def context(*args, &block)
|
62
|
+
return super unless (name = args.first) && block
|
63
|
+
require 'test/unit'
|
64
|
+
klass = Class.new(defined?(ActiveSupport::TestCase) ? ActiveSupport::TestCase : Test::Unit::TestCase) do
|
65
|
+
def self.test(name, &block)
|
66
|
+
define_method("test_#{name.gsub(/\W/,'_')}", &block) if block
|
67
|
+
end
|
68
|
+
def self.xtest(*args) end
|
69
|
+
def self.setup(&block) define_method(:setup, &block) end
|
70
|
+
def self.teardown(&block) define_method(:teardown, &block) end
|
71
|
+
end
|
72
|
+
(class << klass; self end).send(:define_method, :name) { name.gsub(/\W/,'_') }
|
73
|
+
klass.class_eval &block
|
74
|
+
end
|
75
|
+
|
76
|
+
##
|
77
|
+
# Helper to perform job classes
|
78
|
+
#
|
79
|
+
module PerformJob
|
80
|
+
def perform_job(klass, *args)
|
81
|
+
resque_job = Resque::Job.new(:testqueue, 'class' => klass, 'args' => args)
|
82
|
+
resque_job.perform
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
#
|
87
|
+
# fixture classes
|
88
|
+
#
|
89
|
+
|
90
|
+
class SomeJob
|
91
|
+
def self.perform(repo_id, path)
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
class SomeIvarJob < SomeJob
|
96
|
+
@queue = :ivar
|
97
|
+
end
|
98
|
+
|
99
|
+
class SomeMethodJob < SomeJob
|
100
|
+
def self.queue
|
101
|
+
:method
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
class BadJob
|
106
|
+
def self.perform(name=nil)
|
107
|
+
msg = name ? "Bad job, #{name}" : "Bad job!"
|
108
|
+
raise msg
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
class GoodJob
|
113
|
+
def self.perform(name)
|
114
|
+
"Good job, #{name}"
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
class BadJobWithSyntaxError
|
119
|
+
def self.perform
|
120
|
+
raise SyntaxError, "Extra Bad job!"
|
121
|
+
end
|
122
|
+
end
|
metadata
ADDED
@@ -0,0 +1,88 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: resque-cleaner
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
hash: 29
|
5
|
+
prerelease: false
|
6
|
+
segments:
|
7
|
+
- 0
|
8
|
+
- 0
|
9
|
+
- 1
|
10
|
+
version: 0.0.1
|
11
|
+
platform: ruby
|
12
|
+
authors:
|
13
|
+
- Tatsuya Ono
|
14
|
+
autorequire:
|
15
|
+
bindir: bin
|
16
|
+
cert_chain: []
|
17
|
+
|
18
|
+
date: 2010-11-18 00:00:00 +00:00
|
19
|
+
default_executable:
|
20
|
+
dependencies:
|
21
|
+
- !ruby/object:Gem::Dependency
|
22
|
+
name: resque
|
23
|
+
prerelease: false
|
24
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
25
|
+
none: false
|
26
|
+
requirements:
|
27
|
+
- - ~>
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
hash: 15
|
30
|
+
segments:
|
31
|
+
- 1
|
32
|
+
- 0
|
33
|
+
version: "1.0"
|
34
|
+
type: :runtime
|
35
|
+
version_requirements: *id001
|
36
|
+
description: " ResqueCleaner is a Resque plugin which helps you clean up failure jobs. It provides the following functionalities.\n\n * Filters failure jobs with an easy and extensible way\n * Retries failure jobs\n * Removes failure jobs\n * Shows stats\n\n"
|
37
|
+
email: ononoma@gmail.com
|
38
|
+
executables: []
|
39
|
+
|
40
|
+
extensions: []
|
41
|
+
|
42
|
+
extra_rdoc_files:
|
43
|
+
- LICENSE
|
44
|
+
- README.markdown
|
45
|
+
files:
|
46
|
+
- README.markdown
|
47
|
+
- Rakefile
|
48
|
+
- LICENSE
|
49
|
+
- lib/resque_cleaner.rb
|
50
|
+
- test/redis-test.conf
|
51
|
+
- test/resque_cleaner_test.rb
|
52
|
+
- test/test_helper.rb
|
53
|
+
has_rdoc: true
|
54
|
+
homepage: http://github.com/ono/resque-cleaner
|
55
|
+
licenses: []
|
56
|
+
|
57
|
+
post_install_message:
|
58
|
+
rdoc_options:
|
59
|
+
- --charset=UTF-8
|
60
|
+
require_paths:
|
61
|
+
- lib
|
62
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
63
|
+
none: false
|
64
|
+
requirements:
|
65
|
+
- - ">="
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
hash: 3
|
68
|
+
segments:
|
69
|
+
- 0
|
70
|
+
version: "0"
|
71
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
72
|
+
none: false
|
73
|
+
requirements:
|
74
|
+
- - ">="
|
75
|
+
- !ruby/object:Gem::Version
|
76
|
+
hash: 3
|
77
|
+
segments:
|
78
|
+
- 0
|
79
|
+
version: "0"
|
80
|
+
requirements: []
|
81
|
+
|
82
|
+
rubyforge_project:
|
83
|
+
rubygems_version: 1.3.7
|
84
|
+
signing_key:
|
85
|
+
specification_version: 3
|
86
|
+
summary: A Resque plugin cleaning up failure jobs.
|
87
|
+
test_files: []
|
88
|
+
|