resque-scheduler 2.0.0 → 2.0.1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of resque-scheduler might be problematic. Click here for more details.
- data/README.markdown +20 -12
- data/lib/resque/scheduler.rb +29 -11
- data/lib/resque/scheduler/lock.rb +3 -0
- data/lib/resque/scheduler/lock/base.rb +52 -0
- data/lib/resque/scheduler/lock/basic.rb +28 -0
- data/lib/resque/scheduler/lock/resilient.rb +69 -0
- data/lib/resque/scheduler_locking.rb +90 -0
- data/lib/resque_scheduler.rb +15 -4
- data/lib/resque_scheduler/version.rb +1 -1
- data/test/delayed_queue_test.rb +19 -0
- data/test/redis-test.conf +0 -7
- data/test/scheduler_locking_test.rb +180 -0
- data/test/scheduler_test.rb +6 -8
- data/test/support/redis_instance.rb +129 -0
- data/test/test_helper.rb +9 -12
- metadata +10 -3
data/README.markdown
CHANGED
@@ -82,7 +82,7 @@ to know.
|
|
82
82
|
# need to require your jobs. This can be an advantage since it's
|
83
83
|
# less code that resque-scheduler needs to know about. But in a small
|
84
84
|
# project, it's usually easier to just include you job classes here.
|
85
|
-
# So,
|
85
|
+
# So, something like this:
|
86
86
|
require 'jobs'
|
87
87
|
end
|
88
88
|
end
|
@@ -99,17 +99,6 @@ any nonempty value, they will take effect. `VERBOSE` simply dumps more output
|
|
99
99
|
to stdout. `MUTE` does the opposite and silences all output. `MUTE`
|
100
100
|
supersedes `VERBOSE`.
|
101
101
|
|
102
|
-
NOTE: You DO NOT want to run >1 instance of the scheduler. Doing so will
|
103
|
-
result in the same job being queued more than once. You only need one
|
104
|
-
instance of the scheduler running per resque instance (regardless of number
|
105
|
-
of machines).
|
106
|
-
|
107
|
-
If the scheduler process goes down for whatever reason, the delayed items
|
108
|
-
that should have fired during the outage will fire once the scheduler process
|
109
|
-
is started back up again (regardless of it being on a new machine). Missed
|
110
|
-
scheduled jobs, however, will not fire upon recovery of the scheduler process.
|
111
|
-
|
112
|
-
|
113
102
|
|
114
103
|
### Delayed jobs
|
115
104
|
|
@@ -280,6 +269,25 @@ custom job class to support the #scheduled method:
|
|
280
269
|
end
|
281
270
|
end
|
282
271
|
|
272
|
+
### Redundancy and Fail-Over
|
273
|
+
|
274
|
+
*>= 2.0.1 only. Prior to 2.0.1, it is not recommended to run multiple resque-scheduler processes and will result in duplicate jobs.*
|
275
|
+
|
276
|
+
You may want to have resque-scheduler running on multiple machines for
|
277
|
+
redudancy. Electing a master and failover is built in and default. Simply
|
278
|
+
run resque-scheduler on as many machine as you want pointing to the same
|
279
|
+
redis instance and schedule. The scheduler processes will use redis to
|
280
|
+
elect a master process and detect failover when the master dies. Precautions are
|
281
|
+
taken to prevent jobs from potentially being queued twice during failover even
|
282
|
+
when the clocks of the scheduler machines are slightly out of sync (or load affects
|
283
|
+
scheduled job firing time). If you want the gory details, look at Resque::SchedulerLocking.
|
284
|
+
|
285
|
+
If the scheduler process(es) goes down for whatever reason, the delayed items
|
286
|
+
that should have fired during the outage will fire once the scheduler process
|
287
|
+
is started back up again (regardless of it being on a new machine). Missed
|
288
|
+
scheduled jobs, however, will not fire upon recovery of the scheduler process.
|
289
|
+
Think of scheduled (recurring) jobs as cron jobs - if you stop cron, it doesn't fire
|
290
|
+
missed jobs once it starts back up.
|
283
291
|
|
284
292
|
|
285
293
|
### resque-web Additions
|
data/lib/resque/scheduler.rb
CHANGED
@@ -1,11 +1,13 @@
|
|
1
1
|
require 'rufus/scheduler'
|
2
2
|
require 'thwait'
|
3
|
+
require 'resque/scheduler_locking'
|
3
4
|
|
4
5
|
module Resque
|
5
6
|
|
6
7
|
class Scheduler
|
7
8
|
|
8
9
|
extend Resque::Helpers
|
10
|
+
extend Resque::SchedulerLocking
|
9
11
|
|
10
12
|
class << self
|
11
13
|
|
@@ -15,7 +17,7 @@ module Resque
|
|
15
17
|
# If set, produces no output
|
16
18
|
attr_accessor :mute
|
17
19
|
|
18
|
-
# If set, will try to update the
|
20
|
+
# If set, will try to update the schedule in the loop
|
19
21
|
attr_accessor :dynamic
|
20
22
|
|
21
23
|
# Amount of time in seconds to sleep between polls of the delayed
|
@@ -47,17 +49,20 @@ module Resque
|
|
47
49
|
|
48
50
|
# Now start the scheduling part of the loop.
|
49
51
|
loop do
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
52
|
+
if is_master?
|
53
|
+
begin
|
54
|
+
handle_delayed_items
|
55
|
+
update_schedule if dynamic
|
56
|
+
rescue Errno::EAGAIN, Errno::ECONNRESET => e
|
57
|
+
warn e.message
|
58
|
+
end
|
55
59
|
end
|
56
60
|
poll_sleep
|
57
61
|
end
|
58
62
|
|
59
63
|
# never gets here.
|
60
64
|
end
|
65
|
+
|
61
66
|
|
62
67
|
# For all signals, set the shutdown flag and wait for current
|
63
68
|
# poll/enqueing to finish (should be almost istant). In the
|
@@ -133,8 +138,10 @@ module Resque
|
|
133
138
|
if !config[interval_type].nil? && config[interval_type].length > 0
|
134
139
|
args = optionizate_interval_value(config[interval_type])
|
135
140
|
@@scheduled_jobs[name] = rufus_scheduler.send(interval_type, *args) do
|
136
|
-
|
137
|
-
|
141
|
+
if is_master?
|
142
|
+
log! "queueing #{config['class']} (#{name})"
|
143
|
+
handle_errors { enqueue_from_config(config) }
|
144
|
+
end
|
138
145
|
end
|
139
146
|
interval_defined = true
|
140
147
|
break
|
@@ -169,7 +176,8 @@ module Resque
|
|
169
176
|
item = nil
|
170
177
|
begin
|
171
178
|
handle_shutdown do
|
172
|
-
|
179
|
+
# Continually check that it is still the master
|
180
|
+
if is_master? && item = Resque.next_item_for_timestamp(timestamp)
|
173
181
|
log "queuing #{item['class']} [delayed]"
|
174
182
|
handle_errors { enqueue_from_config(item) }
|
175
183
|
end
|
@@ -218,7 +226,14 @@ module Resque
|
|
218
226
|
# one app that schedules for another
|
219
227
|
if Class === klass
|
220
228
|
ResqueScheduler::Plugin.run_before_delayed_enqueue_hooks(klass, *params)
|
221
|
-
|
229
|
+
|
230
|
+
# If the class is a custom job class, call self#scheduled on it. This allows you to do things like
|
231
|
+
# Resque.enqueue_at(timestamp, CustomJobClass). Otherwise, pass off to Resque.
|
232
|
+
if klass.respond_to?(:scheduled)
|
233
|
+
klass.scheduled(queue, klass_name, *params)
|
234
|
+
else
|
235
|
+
Resque.enqueue_to(queue, klass, *params)
|
236
|
+
end
|
222
237
|
else
|
223
238
|
# This will not run the before_hooks in rescue, but will at least
|
224
239
|
# queue the job.
|
@@ -281,7 +296,10 @@ module Resque
|
|
281
296
|
# Sets the shutdown flag, exits if sleeping
|
282
297
|
def shutdown
|
283
298
|
@shutdown = true
|
284
|
-
|
299
|
+
if @sleeping
|
300
|
+
release_master_lock!
|
301
|
+
exit
|
302
|
+
end
|
285
303
|
end
|
286
304
|
|
287
305
|
def log!(msg)
|
@@ -0,0 +1,52 @@
|
|
1
|
+
module Resque
|
2
|
+
class Scheduler
|
3
|
+
module Lock
|
4
|
+
class Base
|
5
|
+
attr_reader :key
|
6
|
+
attr_accessor :timeout
|
7
|
+
|
8
|
+
def initialize(key, options = {})
|
9
|
+
@key = key
|
10
|
+
|
11
|
+
# 3 minute default timeout
|
12
|
+
@timeout = options[:timeout] || 60 * 3
|
13
|
+
end
|
14
|
+
|
15
|
+
# Attempts to acquire the lock. Returns true if successfully acquired.
|
16
|
+
def acquire!
|
17
|
+
raise NotImplementedError
|
18
|
+
end
|
19
|
+
|
20
|
+
def value
|
21
|
+
@value ||= [hostname, process_id].join(':')
|
22
|
+
end
|
23
|
+
|
24
|
+
# Returns true if you currently hold the lock.
|
25
|
+
def locked?
|
26
|
+
raise NotImplementedError
|
27
|
+
end
|
28
|
+
|
29
|
+
# Releases the lock.
|
30
|
+
def release!
|
31
|
+
Resque.redis.del(key) == 1
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
# Extends the lock by `timeout` seconds.
|
37
|
+
def extend_lock!
|
38
|
+
Resque.redis.expire(key, timeout)
|
39
|
+
end
|
40
|
+
|
41
|
+
def hostname
|
42
|
+
local_hostname = Socket.gethostname
|
43
|
+
Socket.gethostbyname(local_hostname).first rescue local_hostname
|
44
|
+
end
|
45
|
+
|
46
|
+
def process_id
|
47
|
+
Process.pid
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
require 'resque/scheduler/lock/base'
|
2
|
+
|
3
|
+
module Resque
|
4
|
+
class Scheduler
|
5
|
+
module Lock
|
6
|
+
class Basic < Base
|
7
|
+
def acquire!
|
8
|
+
if Resque.redis.setnx(key, value)
|
9
|
+
extend_lock!
|
10
|
+
true
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def locked?
|
15
|
+
if Resque.redis.get(key) == value
|
16
|
+
extend_lock!
|
17
|
+
|
18
|
+
if Resque.redis.get(key) == value
|
19
|
+
return true
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
false
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require 'resque/scheduler/lock/base'
|
2
|
+
|
3
|
+
module Resque
|
4
|
+
class Scheduler
|
5
|
+
module Lock
|
6
|
+
class Resilient < Base
|
7
|
+
def acquire!
|
8
|
+
Resque.redis.evalsha(
|
9
|
+
acquire_sha,
|
10
|
+
:keys => [key],
|
11
|
+
:argv => [value]
|
12
|
+
).to_i == 1
|
13
|
+
end
|
14
|
+
|
15
|
+
def locked?
|
16
|
+
Resque.redis.evalsha(
|
17
|
+
locked_sha,
|
18
|
+
:keys => [key],
|
19
|
+
:argv => [value]
|
20
|
+
).to_i == 1
|
21
|
+
end
|
22
|
+
|
23
|
+
private
|
24
|
+
|
25
|
+
def locked_sha(refresh = false)
|
26
|
+
@locked_sha = nil if refresh
|
27
|
+
|
28
|
+
@locked_sha ||= begin
|
29
|
+
Resque.redis.script(
|
30
|
+
:load,
|
31
|
+
<<-EOF
|
32
|
+
if redis.call('GET', KEYS[1]) == ARGV[1]
|
33
|
+
then
|
34
|
+
redis.call('EXPIRE', KEYS[1], #{timeout})
|
35
|
+
|
36
|
+
if redis.call('GET', KEYS[1]) == ARGV[1]
|
37
|
+
then
|
38
|
+
return 1
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
return 0
|
43
|
+
EOF
|
44
|
+
)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def acquire_sha(refresh = false)
|
49
|
+
@acquire_sha = nil if refresh
|
50
|
+
|
51
|
+
@acquire_sha ||= begin
|
52
|
+
Resque.redis.script(
|
53
|
+
:load,
|
54
|
+
<<-EOF
|
55
|
+
if redis.call('SETNX', KEYS[1], ARGV[1]) == 1
|
56
|
+
then
|
57
|
+
redis.call('EXPIRE', KEYS[1], #{timeout})
|
58
|
+
return 1
|
59
|
+
else
|
60
|
+
return 0
|
61
|
+
end
|
62
|
+
EOF
|
63
|
+
)
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
|
2
|
+
# ### Locking the scheduler process
|
3
|
+
#
|
4
|
+
# There are two places in resque-scheduler that need to be synchonized
|
5
|
+
# in order to be able to run redundant scheduler processes while ensuring jobs don't
|
6
|
+
# get queued multiple times when the master process changes.
|
7
|
+
#
|
8
|
+
# 1) Processing the delayed queues (jobs that are created from enqueue_at/enqueue_in, etc)
|
9
|
+
# 2) Processing the scheduled (cron-like) jobs from rufus-scheduler
|
10
|
+
#
|
11
|
+
# Protecting the delayed queues (#1) is relatively easy. A simple SETNX in
|
12
|
+
# redis would suffice. However, protecting the scheduled jobs is trickier
|
13
|
+
# because the clocks on machines could be slightly off or actual firing times
|
14
|
+
# could vary slightly due to load. If scheduler A's clock is slightly ahead
|
15
|
+
# of scheduler B's clock (since they are on different machines), when
|
16
|
+
# scheduler A dies, we need to ensure that scheduler B doesn't queue jobs
|
17
|
+
# that A already queued before it's death. (This all assumes that it is
|
18
|
+
# better to miss a few scheduled jobs than it is to run them multiple times
|
19
|
+
# for the same iteration.)
|
20
|
+
#
|
21
|
+
# To avoid queuing multiple jobs in the case of master fail-over, the master
|
22
|
+
# should remain the master as long as it can rather than a simple SETNX which
|
23
|
+
# would result in the master roll being passed around frequently.
|
24
|
+
#
|
25
|
+
# Locking Scheme:
|
26
|
+
# Each resque-scheduler process attempts to get the master lock via SETNX.
|
27
|
+
# Once obtained, it sets the expiration for 3 minutes (configurable). The
|
28
|
+
# master process continually updates the timeout on the lock key to be 3
|
29
|
+
# minutes in the future in it's loop(s) (see `run`) and when jobs come out of
|
30
|
+
# rufus-scheduler (see `load_schedule_job`). That ensures that a minimum of
|
31
|
+
# 3 minutes must pass since the last queuing operation before a new master is
|
32
|
+
# chosen. If, for whatever reason, the master fails to update the expiration
|
33
|
+
# for 3 minutes, the key expires and the lock is up for grabs. If
|
34
|
+
# miraculously the original master comes back to life, it will realize it is
|
35
|
+
# no longer the master and stop processing jobs.
|
36
|
+
#
|
37
|
+
# The clocks on the scheduler machines can then be up to 3 minutes off from
|
38
|
+
# each other without the risk of queueing the same scheduled job twice during
|
39
|
+
# a master change. The catch is, in the event of a master change, no
|
40
|
+
# scheduled jobs will be queued during those 3 minutes. So, there is a trade
|
41
|
+
# off: the higher the timeout, the less likely scheduled jobs will be fired
|
42
|
+
# twice but greater chances of missing scheduled jobs. The lower the timeout,
|
43
|
+
# less likely jobs will be missed, greater the chances of jobs firing twice. If
|
44
|
+
# you don't care about jobs firing twice or are certain your machines' clocks
|
45
|
+
# are well in sync, a lower timeout is preferable. One thing to keep in mind:
|
46
|
+
# this only effects *scheduled* jobs - delayed jobs will never be lost or
|
47
|
+
# skipped since eventually a master will come online and it will process
|
48
|
+
# everything that is ready (no matter how old it is). Scheduled jobs work
|
49
|
+
# like cron - if you stop cron, no jobs fire while it's stopped and it doesn't
|
50
|
+
# fire jobs that were missed when it starts up again.
|
51
|
+
|
52
|
+
require 'resque/scheduler/lock'
|
53
|
+
|
54
|
+
module Resque
|
55
|
+
module SchedulerLocking
|
56
|
+
def master_lock
|
57
|
+
@master_lock ||= build_master_lock
|
58
|
+
end
|
59
|
+
|
60
|
+
def supports_lua?
|
61
|
+
redis_master_version >= 2.5
|
62
|
+
end
|
63
|
+
|
64
|
+
def is_master?
|
65
|
+
master_lock.acquire! || master_lock.locked?
|
66
|
+
end
|
67
|
+
|
68
|
+
def release_master_lock!
|
69
|
+
master_lock.release!
|
70
|
+
end
|
71
|
+
|
72
|
+
private
|
73
|
+
|
74
|
+
def build_master_lock
|
75
|
+
if supports_lua?
|
76
|
+
Resque::Scheduler::Lock::Resilient.new(master_lock_key)
|
77
|
+
else
|
78
|
+
Resque::Scheduler::Lock::Basic.new(master_lock_key)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def master_lock_key
|
83
|
+
:resque_scheduler_master_lock
|
84
|
+
end
|
85
|
+
|
86
|
+
def redis_master_version
|
87
|
+
Resque.redis.info['redis_version'].to_f
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
data/lib/resque_scheduler.rb
CHANGED
@@ -123,7 +123,13 @@ module ResqueScheduler
|
|
123
123
|
|
124
124
|
if Resque.inline?
|
125
125
|
# Just create the job and let resque perform it right away with inline.
|
126
|
-
|
126
|
+
# If the class is a custom job class, call self#scheduled on it. This allows you to do things like
|
127
|
+
# Resque.enqueue_at(timestamp, CustomJobClass, :opt1 => val1). Otherwise, pass off to Resque.
|
128
|
+
if klass.respond_to?(:scheduled)
|
129
|
+
klass.scheduled(queue, klass.to_s(), *args)
|
130
|
+
else
|
131
|
+
Resque::Job.create(queue, klass, *args)
|
132
|
+
end
|
127
133
|
else
|
128
134
|
delayed_push(timestamp, job_to_hash_with_queue(queue, klass, args))
|
129
135
|
end
|
@@ -216,12 +222,13 @@ module ResqueScheduler
|
|
216
222
|
# Given an encoded item, remove it from the delayed_queue
|
217
223
|
#
|
218
224
|
# This method is potentially very expensive since it needs to scan
|
219
|
-
# through the delayed queue for every timestamp
|
225
|
+
# through the delayed queue for every timestamp, but at least it
|
226
|
+
# doesn't kill Redis by calling redis.keys.
|
220
227
|
def remove_delayed(klass, *args)
|
221
228
|
destroyed = 0
|
222
229
|
search = encode(job_to_hash(klass, args))
|
223
|
-
Array(redis.
|
224
|
-
destroyed += redis.lrem
|
230
|
+
Array(redis.zrange(:delayed_queue_schedule, 0, -1)).each do |timestamp|
|
231
|
+
destroyed += redis.lrem "delayed:#{timestamp}", 0, search
|
225
232
|
end
|
226
233
|
destroyed
|
227
234
|
end
|
@@ -258,6 +265,9 @@ module ResqueScheduler
|
|
258
265
|
|
259
266
|
def clean_up_timestamp(key, timestamp)
|
260
267
|
# If the list is empty, remove it.
|
268
|
+
|
269
|
+
# Use a watch here to ensure nobody adds jobs to this delayed
|
270
|
+
# queue while we're removing it.
|
261
271
|
redis.watch key
|
262
272
|
if 0 == redis.llen(key).to_i
|
263
273
|
redis.multi do
|
@@ -268,6 +278,7 @@ module ResqueScheduler
|
|
268
278
|
redis.unwatch
|
269
279
|
end
|
270
280
|
end
|
281
|
+
|
271
282
|
def validate_job!(klass)
|
272
283
|
if klass.to_s.empty?
|
273
284
|
raise Resque::NoClassError.new("Jobs must be given a class.")
|
data/test/delayed_queue_test.rb
CHANGED
@@ -184,6 +184,25 @@ context "DelayedQueue" do
|
|
184
184
|
Resque::Scheduler.handle_delayed_items(t)
|
185
185
|
end
|
186
186
|
|
187
|
+
test "calls klass#scheduled when enqueuing jobs if it exists" do
|
188
|
+
t = Time.now - 60
|
189
|
+
Resque.enqueue_at(t, FakeCustomJobClassEnqueueAt, :foo => "bar")
|
190
|
+
FakeCustomJobClassEnqueueAt.expects(:scheduled).once.with('test', FakeCustomJobClassEnqueueAt.to_s, {"foo" => "bar"})
|
191
|
+
Resque::Scheduler.handle_delayed_items
|
192
|
+
end
|
193
|
+
|
194
|
+
test "when Resque.inline = true, calls klass#scheduled when enqueuing jobs if it exists" do
|
195
|
+
old_val = Resque.inline
|
196
|
+
begin
|
197
|
+
Resque.inline = true
|
198
|
+
t = Time.now - 60
|
199
|
+
FakeCustomJobClassEnqueueAt.expects(:scheduled).once.with(:test, FakeCustomJobClassEnqueueAt.to_s, {:foo => "bar"})
|
200
|
+
Resque.enqueue_at(t, FakeCustomJobClassEnqueueAt, :foo => "bar")
|
201
|
+
ensure
|
202
|
+
Resque.inline = old_val
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
187
206
|
test "enqueue_delayed_items_for_timestamp creates jobs and empties the delayed queue" do
|
188
207
|
t = Time.now + 60
|
189
208
|
|
data/test/redis-test.conf
CHANGED
@@ -106,10 +106,3 @@ databases 16
|
|
106
106
|
# errors for write operations, and this may even lead to DB inconsistency.
|
107
107
|
|
108
108
|
# maxmemory <bytes>
|
109
|
-
|
110
|
-
############################### ADVANCED CONFIG ###############################
|
111
|
-
|
112
|
-
# Glue small output buffers together in order to send small replies in a
|
113
|
-
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
114
|
-
# in terms of number of queries per second. Use 'yes' if unsure.
|
115
|
-
glueoutputbuf yes
|
@@ -0,0 +1,180 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
module LockTestHelper
|
4
|
+
def lock_is_not_held(lock)
|
5
|
+
Resque.redis.set(lock.key, 'anothermachine:1234')
|
6
|
+
end
|
7
|
+
end
|
8
|
+
|
9
|
+
context 'Resque::SchedulerLocking' do
|
10
|
+
setup do
|
11
|
+
@subject = Class.new { extend Resque::SchedulerLocking }
|
12
|
+
end
|
13
|
+
|
14
|
+
teardown do
|
15
|
+
Resque.redis.del(@subject.master_lock.key)
|
16
|
+
end
|
17
|
+
|
18
|
+
test 'it should use the basic lock mechanism for <= Redis 2.4' do
|
19
|
+
Resque.redis.stubs(:info).returns('redis_version' => '2.4.16')
|
20
|
+
|
21
|
+
assert_equal @subject.master_lock.class, Resque::Scheduler::Lock::Basic
|
22
|
+
end
|
23
|
+
|
24
|
+
test 'it should use the resilient lock mechanism for > Redis 2.4' do
|
25
|
+
Resque.redis.stubs(:info).returns('redis_version' => '2.5.12')
|
26
|
+
|
27
|
+
assert_equal @subject.master_lock.class, Resque::Scheduler::Lock::Resilient
|
28
|
+
end
|
29
|
+
|
30
|
+
test 'it should be the master if the lock is held' do
|
31
|
+
@subject.master_lock.acquire!
|
32
|
+
assert @subject.is_master?, 'should be master'
|
33
|
+
end
|
34
|
+
|
35
|
+
test 'it should not be the master if the lock is held by someone else' do
|
36
|
+
Resque.redis.set(@subject.master_lock.key, 'somethingelse:1234')
|
37
|
+
assert !@subject.is_master?, 'should not be master'
|
38
|
+
end
|
39
|
+
|
40
|
+
test "release_master_lock should delegate to master_lock" do
|
41
|
+
@subject.master_lock.expects(:release!)
|
42
|
+
@subject.release_master_lock!
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
context 'Resque::Scheduler::Lock::Base' do
|
47
|
+
setup do
|
48
|
+
@lock = Resque::Scheduler::Lock::Base.new('test_lock_key')
|
49
|
+
end
|
50
|
+
|
51
|
+
test '#acquire! should be not implemented' do
|
52
|
+
assert_raise(NotImplementedError) do
|
53
|
+
@lock.acquire!
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
test '#locked? should be not implemented' do
|
58
|
+
assert_raise(NotImplementedError) do
|
59
|
+
@lock.locked?
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
context 'Resque::Scheduler::Lock::Basic' do
|
65
|
+
include LockTestHelper
|
66
|
+
|
67
|
+
setup do
|
68
|
+
@lock = Resque::Scheduler::Lock::Basic.new('test_lock_key')
|
69
|
+
end
|
70
|
+
|
71
|
+
teardown do
|
72
|
+
@lock.release!
|
73
|
+
end
|
74
|
+
|
75
|
+
test 'you should not have the lock if someone else holds it' do
|
76
|
+
lock_is_not_held(@lock)
|
77
|
+
|
78
|
+
assert !@lock.locked?
|
79
|
+
end
|
80
|
+
|
81
|
+
test 'you should not be able to acquire the lock if someone else holds it' do
|
82
|
+
lock_is_not_held(@lock)
|
83
|
+
|
84
|
+
assert !@lock.acquire!
|
85
|
+
end
|
86
|
+
|
87
|
+
test "the lock should receive a TTL on acquiring" do
|
88
|
+
@lock.acquire!
|
89
|
+
|
90
|
+
assert Resque.redis.ttl(@lock.key) > 0, "lock should expire"
|
91
|
+
end
|
92
|
+
|
93
|
+
test 'releasing should release the master lock' do
|
94
|
+
assert @lock.acquire!, 'should have acquired the master lock'
|
95
|
+
assert @lock.locked?, 'should be locked'
|
96
|
+
|
97
|
+
@lock.release!
|
98
|
+
|
99
|
+
assert !@lock.locked?, 'should not be locked'
|
100
|
+
end
|
101
|
+
|
102
|
+
test 'checking the lock should increase the TTL if we hold it' do
|
103
|
+
@lock.acquire!
|
104
|
+
Resque.redis.setex(@lock.key, 10, @lock.value)
|
105
|
+
|
106
|
+
@lock.locked?
|
107
|
+
|
108
|
+
assert Resque.redis.ttl(@lock.key) > 10, "TTL should have been updated"
|
109
|
+
end
|
110
|
+
|
111
|
+
test 'checking the lock should not increase the TTL if we do not hold it' do
|
112
|
+
Resque.redis.setex(@lock.key, 10, @lock.value)
|
113
|
+
lock_is_not_held(@lock)
|
114
|
+
|
115
|
+
@lock.locked?
|
116
|
+
|
117
|
+
assert Resque.redis.ttl(@lock.key) <= 10, "TTL should not have been updated"
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
context 'Resque::Scheduler::Lock::Resilient' do
|
122
|
+
include LockTestHelper
|
123
|
+
|
124
|
+
if !Resque::Scheduler.supports_lua?
|
125
|
+
puts "*** Skipping Resque::Scheduler::Lock::Resilient tests, as they require Redis >= 2.5."
|
126
|
+
else
|
127
|
+
setup do
|
128
|
+
@lock = Resque::Scheduler::Lock::Resilient.new('test_resilient_lock')
|
129
|
+
end
|
130
|
+
|
131
|
+
teardown do
|
132
|
+
@lock.release!
|
133
|
+
end
|
134
|
+
|
135
|
+
test 'you should not have the lock if someone else holds it' do
|
136
|
+
lock_is_not_held(@lock)
|
137
|
+
|
138
|
+
assert !@lock.locked?, 'you should not have the lock'
|
139
|
+
end
|
140
|
+
|
141
|
+
test 'you should not be able to acquire the lock if someone else holds it' do
|
142
|
+
lock_is_not_held(@lock)
|
143
|
+
|
144
|
+
assert !@lock.acquire!
|
145
|
+
end
|
146
|
+
|
147
|
+
test "the lock should receive a TTL on acquiring" do
|
148
|
+
@lock.acquire!
|
149
|
+
|
150
|
+
assert Resque.redis.ttl(@lock.key) > 0, "lock should expire"
|
151
|
+
end
|
152
|
+
|
153
|
+
test 'releasing should release the master lock' do
|
154
|
+
assert @lock.acquire!, 'should have acquired the master lock'
|
155
|
+
assert @lock.locked?, 'should be locked'
|
156
|
+
|
157
|
+
@lock.release!
|
158
|
+
|
159
|
+
assert !@lock.locked?, 'should not be locked'
|
160
|
+
end
|
161
|
+
|
162
|
+
test 'checking the lock should increase the TTL if we hold it' do
|
163
|
+
@lock.acquire!
|
164
|
+
Resque.redis.setex(@lock.key, 10, @lock.value)
|
165
|
+
|
166
|
+
@lock.locked?
|
167
|
+
|
168
|
+
assert Resque.redis.ttl(@lock.key) > 10, "TTL should have been updated"
|
169
|
+
end
|
170
|
+
|
171
|
+
test 'checking the lock should not increase the TTL if we do not hold it' do
|
172
|
+
Resque.redis.setex(@lock.key, 10, @lock.value)
|
173
|
+
lock_is_not_held(@lock)
|
174
|
+
|
175
|
+
@lock.locked?
|
176
|
+
|
177
|
+
assert Resque.redis.ttl(@lock.key) <= 10, "TTL should not have been updated"
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
data/test/scheduler_test.rb
CHANGED
@@ -4,8 +4,7 @@ context "Resque::Scheduler" do
|
|
4
4
|
|
5
5
|
setup do
|
6
6
|
Resque::Scheduler.dynamic = false
|
7
|
-
Resque.redis.
|
8
|
-
Resque.redis.del(:schedules_changed)
|
7
|
+
Resque.redis.flushall
|
9
8
|
Resque::Scheduler.mute = true
|
10
9
|
Resque::Scheduler.clear_schedule!
|
11
10
|
Resque::Scheduler.send(:class_variable_set, :@@scheduled_jobs, {})
|
@@ -72,15 +71,15 @@ context "Resque::Scheduler" do
|
|
72
71
|
assert Resque::Scheduler.scheduled_jobs.include?("some_ivar_job2")
|
73
72
|
end
|
74
73
|
|
75
|
-
test "load_schedule_job loads a schedule" do
|
74
|
+
test "load_schedule_job loads a schedule" do
|
76
75
|
Resque::Scheduler.load_schedule_job("some_ivar_job", {'cron' => "* * * * *", 'class' => 'SomeIvarJob', 'args' => "/tmp"})
|
77
76
|
|
78
77
|
assert_equal(1, Resque::Scheduler.rufus_scheduler.all_jobs.size)
|
79
78
|
assert_equal(1, Resque::Scheduler.scheduled_jobs.size)
|
80
79
|
assert Resque::Scheduler.scheduled_jobs.keys.include?("some_ivar_job")
|
81
80
|
end
|
82
|
-
|
83
|
-
test "load_schedule_job with every with options" do
|
81
|
+
|
82
|
+
test "load_schedule_job with every with options" do
|
84
83
|
Resque::Scheduler.load_schedule_job("some_ivar_job", {'every' => ['30s', {'first_in' => '60s'}], 'class' => 'SomeIvarJob', 'args' => "/tmp"})
|
85
84
|
|
86
85
|
assert_equal(1, Resque::Scheduler.rufus_scheduler.all_jobs.size)
|
@@ -88,8 +87,8 @@ context "Resque::Scheduler" do
|
|
88
87
|
assert Resque::Scheduler.scheduled_jobs.keys.include?("some_ivar_job")
|
89
88
|
assert Resque::Scheduler.scheduled_jobs["some_ivar_job"].params.keys.include?(:first_in)
|
90
89
|
end
|
91
|
-
|
92
|
-
test "load_schedule_job with cron with options" do
|
90
|
+
|
91
|
+
test "load_schedule_job with cron with options" do
|
93
92
|
Resque::Scheduler.load_schedule_job("some_ivar_job", {'cron' => ['* * * * *', {'allow_overlapping' => 'true'}], 'class' => 'SomeIvarJob', 'args' => "/tmp"})
|
94
93
|
|
95
94
|
assert_equal(1, Resque::Scheduler.rufus_scheduler.all_jobs.size)
|
@@ -243,5 +242,4 @@ context "Resque::Scheduler" do
|
|
243
242
|
Resque::Plugin.lint(ResqueScheduler)
|
244
243
|
end
|
245
244
|
end
|
246
|
-
|
247
245
|
end
|
@@ -0,0 +1,129 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'timeout'
|
3
|
+
require 'fileutils'
|
4
|
+
|
5
|
+
class RedisInstance
|
6
|
+
class << self
|
7
|
+
@running = false
|
8
|
+
@port = nil
|
9
|
+
@pid = nil
|
10
|
+
|
11
|
+
def run_if_needed!
|
12
|
+
run! unless running?
|
13
|
+
end
|
14
|
+
|
15
|
+
def run!
|
16
|
+
ensure_pid_directory
|
17
|
+
reassign_redis_clients
|
18
|
+
start_redis_server
|
19
|
+
|
20
|
+
if $?.success?
|
21
|
+
wait_for_pid
|
22
|
+
puts "Booted isolated Redis on port #{port} with PID #{pid}."
|
23
|
+
|
24
|
+
wait_for_redis_boot
|
25
|
+
|
26
|
+
# Ensure we tear down Redis on Ctrl+C / test failure.
|
27
|
+
at_exit do
|
28
|
+
RedisInstance.stop!
|
29
|
+
end
|
30
|
+
else
|
31
|
+
raise "Failed to start Redis on port #{port}."
|
32
|
+
end
|
33
|
+
|
34
|
+
@running = true
|
35
|
+
end
|
36
|
+
|
37
|
+
def stop!
|
38
|
+
$stdout.puts "Sending TERM to Redis (#{pid})..."
|
39
|
+
Process.kill('TERM', pid)
|
40
|
+
|
41
|
+
@port = nil
|
42
|
+
@running = false
|
43
|
+
@pid = nil
|
44
|
+
end
|
45
|
+
|
46
|
+
def running?
|
47
|
+
@running
|
48
|
+
end
|
49
|
+
|
50
|
+
private
|
51
|
+
|
52
|
+
def wait_for_redis_boot
|
53
|
+
Timeout::timeout(10) do
|
54
|
+
begin
|
55
|
+
while Resque.redis.ping != 'PONG'
|
56
|
+
end
|
57
|
+
rescue
|
58
|
+
# silence all errors
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def ensure_pid_directory
|
64
|
+
FileUtils.mkdir_p(File.dirname(pid_file))
|
65
|
+
end
|
66
|
+
|
67
|
+
def reassign_redis_clients
|
68
|
+
Resque.redis = Redis.new(:hostname => '127.0.0.1', :port => port, :thread_safe => true)
|
69
|
+
end
|
70
|
+
|
71
|
+
def start_redis_server
|
72
|
+
IO.popen("redis-server -", "w+") do |server|
|
73
|
+
server.write(config)
|
74
|
+
server.close_write
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def pid
|
79
|
+
@pid ||= File.read(pid_file).to_i
|
80
|
+
end
|
81
|
+
|
82
|
+
def wait_for_pid
|
83
|
+
Timeout::timeout(10) do
|
84
|
+
while !File.exist?(pid_file)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def port
|
90
|
+
@port ||= random_port
|
91
|
+
end
|
92
|
+
|
93
|
+
def pid_file
|
94
|
+
"/tmp/redis-scheduler-test.pid"
|
95
|
+
end
|
96
|
+
|
97
|
+
def config
|
98
|
+
<<-EOF
|
99
|
+
daemonize yes
|
100
|
+
pidfile #{pid_file}
|
101
|
+
port #{port}
|
102
|
+
EOF
|
103
|
+
end
|
104
|
+
|
105
|
+
# Returns a random port in the upper (10000-65535) range.
|
106
|
+
def random_port
|
107
|
+
ports = (10000..65535).to_a
|
108
|
+
|
109
|
+
loop do
|
110
|
+
port = ports[rand(ports.size)]
|
111
|
+
return port if port_available?('127.0.0.1', port)
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
def port_available?(ip, port, seconds=1)
|
116
|
+
Timeout::timeout(seconds) do
|
117
|
+
begin
|
118
|
+
TCPSocket.new(ip, port).close
|
119
|
+
false
|
120
|
+
rescue Errno::ECONNREFUSED, Errno::EHOSTUNREACH
|
121
|
+
true
|
122
|
+
end
|
123
|
+
end
|
124
|
+
rescue Timeout::Error
|
125
|
+
true
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
data/test/test_helper.rb
CHANGED
@@ -23,10 +23,10 @@ if !system("which redis-server")
|
|
23
23
|
end
|
24
24
|
|
25
25
|
|
26
|
-
#
|
27
|
-
#
|
28
|
-
|
29
|
-
|
26
|
+
# Start our own Redis when the tests start. RedisInstance will take care of
|
27
|
+
# starting and stopping.
|
28
|
+
require File.dirname(__FILE__) + '/support/redis_instance'
|
29
|
+
RedisInstance.run!
|
30
30
|
|
31
31
|
at_exit do
|
32
32
|
next if $!
|
@@ -37,17 +37,9 @@ at_exit do
|
|
37
37
|
exit_code = Test::Unit::AutoRunner.run
|
38
38
|
end
|
39
39
|
|
40
|
-
pid = `ps -e -o pid,command | grep [r]edis-test`.split(" ")[0]
|
41
|
-
puts "Killing test redis server..."
|
42
|
-
`rm -f #{dir}/dump.rdb`
|
43
|
-
Process.kill("KILL", pid.to_i)
|
44
40
|
exit exit_code
|
45
41
|
end
|
46
42
|
|
47
|
-
puts "Starting redis for testing at localhost:9736..."
|
48
|
-
`redis-server #{dir}/redis-test.conf`
|
49
|
-
Resque.redis = 'localhost:9736'
|
50
|
-
|
51
43
|
##
|
52
44
|
# test/spec/mini 3
|
53
45
|
# http://gist.github.com/25455
|
@@ -72,6 +64,11 @@ class FakeCustomJobClass
|
|
72
64
|
def self.scheduled(queue, klass, *args); end
|
73
65
|
end
|
74
66
|
|
67
|
+
class FakeCustomJobClassEnqueueAt
|
68
|
+
@queue = :test
|
69
|
+
def self.scheduled(queue, klass, *args); end
|
70
|
+
end
|
71
|
+
|
75
72
|
class SomeJob
|
76
73
|
def self.perform(repo_id, path)
|
77
74
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: resque-scheduler
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.0.
|
4
|
+
version: 2.0.1
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2013-03-21 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: bundler
|
@@ -91,6 +91,11 @@ files:
|
|
91
91
|
- README.markdown
|
92
92
|
- Rakefile
|
93
93
|
- lib/resque/scheduler.rb
|
94
|
+
- lib/resque/scheduler/lock.rb
|
95
|
+
- lib/resque/scheduler/lock/base.rb
|
96
|
+
- lib/resque/scheduler/lock/basic.rb
|
97
|
+
- lib/resque/scheduler/lock/resilient.rb
|
98
|
+
- lib/resque/scheduler_locking.rb
|
94
99
|
- lib/resque_scheduler.rb
|
95
100
|
- lib/resque_scheduler/plugin.rb
|
96
101
|
- lib/resque_scheduler/server.rb
|
@@ -106,7 +111,9 @@ files:
|
|
106
111
|
- test/resque-web_test.rb
|
107
112
|
- test/scheduler_args_test.rb
|
108
113
|
- test/scheduler_hooks_test.rb
|
114
|
+
- test/scheduler_locking_test.rb
|
109
115
|
- test/scheduler_test.rb
|
116
|
+
- test/support/redis_instance.rb
|
110
117
|
- test/test_helper.rb
|
111
118
|
homepage: http://github.com/bvandenbos/resque-scheduler
|
112
119
|
licenses: []
|
@@ -128,7 +135,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
128
135
|
version: 1.3.6
|
129
136
|
requirements: []
|
130
137
|
rubyforge_project:
|
131
|
-
rubygems_version: 1.8.
|
138
|
+
rubygems_version: 1.8.23
|
132
139
|
signing_key:
|
133
140
|
specification_version: 3
|
134
141
|
summary: Light weight job scheduling on top of Resque
|