resque-concurrent-restriction 0.5.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +8 -0
- data/Gemfile +4 -0
- data/LICENSE +20 -0
- data/README.md +39 -0
- data/Rakefile +2 -0
- data/lib/resque-concurrent-restriction.rb +16 -0
- data/lib/resque/plugins/concurrent_restriction/concurrent_restriction_job.rb +382 -0
- data/lib/resque/plugins/concurrent_restriction/resque_worker_extension.rb +84 -0
- data/lib/resque/plugins/concurrent_restriction/version.rb +7 -0
- data/resque-concurrent-restriction.gemspec +27 -0
- data/spec/concurrent_restriction_job_spec.rb +487 -0
- data/spec/redis-test.conf +312 -0
- data/spec/resque_worker_extensions_spec.rb +195 -0
- data/spec/spec.opts +8 -0
- data/spec/spec_helper.rb +153 -0
- metadata +107 -0
data/.gitignore
ADDED
data/Gemfile
ADDED
data/LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2010 Matt Conway (matt@conwaysplace.com)
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,39 @@
|
|
1
|
+
resque-concurrent-restriction
|
2
|
+
===============
|
3
|
+
|
4
|
+
Resque Concurrent Restriction is a plugin for the [Resque][0] queueing system (http://github.com/defunkt/resque). It allows one to specify how many of the given job can run concurrently.
|
5
|
+
|
6
|
+
Resque Concurrent Restriction requires Resque 1.10 and redis 2.2
|
7
|
+
|
8
|
+
Install
|
9
|
+
-------
|
10
|
+
|
11
|
+
sudo gem install resque-concurrent-restriction
|
12
|
+
|
13
|
+
To use
|
14
|
+
------
|
15
|
+
|
16
|
+
It is especially useful when a system has intensive jobs for which you should only run a few at a time. What you should do for the IntensiveJob is to make it extend Resque::Plugins::ConcurrentRestriction and specify the concurrent limit (defaults to 1). For example:
|
17
|
+
|
18
|
+
class IntensiveJob
|
19
|
+
extend Resque::Plugins::ConcurrentRestriction
|
20
|
+
concurrent 4
|
21
|
+
|
22
|
+
#rest of your class here
|
23
|
+
end
|
24
|
+
|
25
|
+
That means the IntensiveJob can not have more than 4 jobs running simultaneously
|
26
|
+
|
27
|
+
Author
|
28
|
+
------
|
29
|
+
Code was originally forkd from the [resque-restriction][1] plugin (Richard Huang :: flyerhzm@gmail.com :: @flyerhzm), but diverged enough that it warranted being its own plugin to keep the code simple.
|
30
|
+
|
31
|
+
Matt Conway :: matt@conwaysplace.com :: @mattconway
|
32
|
+
|
33
|
+
Copyright
|
34
|
+
---------
|
35
|
+
Copyright (c) 2011 Matt Conway. See LICENSE for details.
|
36
|
+
|
37
|
+
[0]: http://github.com/defunkt/resque
|
38
|
+
[1]: http://github.com/flyerhzm/resque-restriction
|
39
|
+
|
data/Rakefile
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
require 'resque'
|
2
|
+
require 'resque/plugins/concurrent_restriction/version'
|
3
|
+
require 'resque/plugins/concurrent_restriction/resque_worker_extension'
|
4
|
+
require 'resque/plugins/concurrent_restriction/concurrent_restriction_job'
|
5
|
+
|
6
|
+
Resque::Worker.send(:include, Resque::Plugins::ConcurrentRestriction::Worker)
|
7
|
+
Resque::Job.send(:extend, Resque::Plugins::ConcurrentRestriction::Job)
|
8
|
+
|
9
|
+
unsupported_version = false
|
10
|
+
begin
|
11
|
+
server_ver = Resque.redis.info["redis_version"].split('.').collect{|x| x.to_i}
|
12
|
+
unsupported_version = (server_ver <=> [2, 2, 0]) < 0
|
13
|
+
rescue
|
14
|
+
end
|
15
|
+
|
16
|
+
raise "resque-concurrent-restriction requires a redis-server version >= 2.2.0" if unsupported_version
|
@@ -0,0 +1,382 @@
|
|
1
|
+
module Resque
|
2
|
+
module Plugins
|
3
|
+
module ConcurrentRestriction
|
4
|
+
|
5
|
+
# Used by the user in their job class to set the concurrency limit
|
6
|
+
def concurrent(limit)
|
7
|
+
@concurrent = limit
|
8
|
+
end
|
9
|
+
|
10
|
+
# Allows the user to specify the unique key that identifies a set
|
11
|
+
# of jobs that share a concurrency limit. Defaults to the job class name
|
12
|
+
def concurrent_identifier(*args)
|
13
|
+
end
|
14
|
+
|
15
|
+
# Used to query what the limit the user has set
|
16
|
+
def concurrent_limit
|
17
|
+
@concurrent ||= 1
|
18
|
+
end
|
19
|
+
|
20
|
+
# The key used to acquire a lock so we can operate on multiple
|
21
|
+
# redis structures (runnables set, running_count) atomically
|
22
|
+
def lock_key(tracking_key)
|
23
|
+
parts = tracking_key.split(":")
|
24
|
+
"concurrent:lock:#{parts[2..-1].join(':')}"
|
25
|
+
end
|
26
|
+
|
27
|
+
# The redis key used to store the number of currently running
|
28
|
+
# jobs for the restriction_identifier
|
29
|
+
def running_count_key(tracking_key)
|
30
|
+
parts = tracking_key.split(":")
|
31
|
+
"concurrent:count:#{parts[2..-1].join(':')}"
|
32
|
+
end
|
33
|
+
|
34
|
+
# The key for the redis list where restricted jobs for the given resque queue are stored
|
35
|
+
def restriction_queue_key(tracking_key, queue)
|
36
|
+
parts = tracking_key.split(":")
|
37
|
+
"concurrent:queue:#{queue}:#{parts[2..-1].join(':')}"
|
38
|
+
end
|
39
|
+
|
40
|
+
def restriction_queue_availability_key(tracking_key)
|
41
|
+
parts = tracking_key.split(":")
|
42
|
+
"concurrent:queue_availability:#{parts[2..-1].join(':')}"
|
43
|
+
end
|
44
|
+
|
45
|
+
# The key that groups all jobs of the same restriction_identifier together
|
46
|
+
# so that we can work on any of those jobs if they are runnable
|
47
|
+
# Stored in runnables set, and used to build keys for each queue where jobs
|
48
|
+
# for those queues are stored
|
49
|
+
def tracking_key(*args)
|
50
|
+
id = concurrent_identifier(*args)
|
51
|
+
id = ":#{id}" if id && id.strip.size > 0
|
52
|
+
"concurrent:tracking:#{self.to_s}#{id}"
|
53
|
+
end
|
54
|
+
|
55
|
+
def tracking_class(tracking_key)
|
56
|
+
Resque.constantize(tracking_key.split(":")[2])
|
57
|
+
end
|
58
|
+
|
59
|
+
# The key to the redis set where we keep a list of runnable tracking_keys
|
60
|
+
def runnables_key(queue=nil)
|
61
|
+
key = ":#{queue}" if queue
|
62
|
+
"concurrent:runnable#{key}"
|
63
|
+
end
|
64
|
+
|
65
|
+
# Encodes the job intot he restriction queue
|
66
|
+
def encode(job)
|
67
|
+
item = {:queue => job.queue, :payload => job.payload}
|
68
|
+
Resque.encode(item)
|
69
|
+
end
|
70
|
+
|
71
|
+
# Decodes the job from the restriction queue
|
72
|
+
def decode(str)
|
73
|
+
item = Resque.decode(str)
|
74
|
+
Resque::Job.new(item['queue'], item['payload']) if item
|
75
|
+
end
|
76
|
+
|
77
|
+
# The restriction queues that have data for each tracking key
|
78
|
+
# Adds/Removes the queue to the list of queues for that tracking key
|
79
|
+
# so we can quickly tell in next_runnable_job if a runnable job exists on a
|
80
|
+
# specific restriction queue
|
81
|
+
def update_queues_available(tracking_key, queue, action)
|
82
|
+
availability_key = restriction_queue_availability_key(tracking_key)
|
83
|
+
case action
|
84
|
+
when :add then Resque.redis.send(:sadd, availability_key, queue)
|
85
|
+
when :remove then Resque.redis.send(:srem, availability_key, queue)
|
86
|
+
else raise "Invalid action to ConcurrentRestriction.track_queue"
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
def queues_available(tracking_key)
|
91
|
+
availability_key = restriction_queue_availability_key(tracking_key)
|
92
|
+
Resque.redis.smembers(availability_key)
|
93
|
+
end
|
94
|
+
|
95
|
+
# Pushes the job to the restriction queue
|
96
|
+
def push_to_restriction_queue(job, location=:back)
|
97
|
+
tracking_key = tracking_key(*job.args)
|
98
|
+
|
99
|
+
case location
|
100
|
+
when :back then Resque.redis.rpush(restriction_queue_key(tracking_key, job.queue), encode(job))
|
101
|
+
when :front then Resque.redis.lpush(restriction_queue_key(tracking_key, job.queue), encode(job))
|
102
|
+
else raise "Invalid location to ConcurrentRestriction.push_to_restriction_queue"
|
103
|
+
end
|
104
|
+
|
105
|
+
update_queues_available(tracking_key, job.queue, :add)
|
106
|
+
mark_runnable(tracking_key, false)
|
107
|
+
end
|
108
|
+
|
109
|
+
# Pops a job from the restriction queue
|
110
|
+
def pop_from_restriction_queue(tracking_key, queue)
|
111
|
+
queue_key = restriction_queue_key(tracking_key, queue)
|
112
|
+
str = Resque.redis.lpop(queue_key)
|
113
|
+
post_pop_size = Resque.redis.llen(queue_key)
|
114
|
+
|
115
|
+
if post_pop_size == 0
|
116
|
+
update_queues_available(tracking_key, queue, :remove)
|
117
|
+
clear_runnable(tracking_key, queue)
|
118
|
+
end
|
119
|
+
|
120
|
+
# increment by one to indicate that we are running
|
121
|
+
# do this before update_queues_available so that the current queue gets cleaned
|
122
|
+
increment_running_count(tracking_key) if str
|
123
|
+
|
124
|
+
decode(str)
|
125
|
+
end
|
126
|
+
|
127
|
+
# Grabs the raw data (undecoded) from the restriction queue
|
128
|
+
def restriction_queue_raw(tracking_key, queue)
|
129
|
+
Array(Resque.redis.lrange(restriction_queue_key(tracking_key, queue), 0, -1))
|
130
|
+
end
|
131
|
+
|
132
|
+
# Grabs the contents of the restriction queue (decoded)
|
133
|
+
def restriction_queue(tracking_key, queue)
|
134
|
+
restriction_queue_raw(tracking_key, queue).collect {|s| decode(s) }
|
135
|
+
end
|
136
|
+
|
137
|
+
# Returns the number of jobs currently running
|
138
|
+
def running_count(tracking_key)
|
139
|
+
Resque.redis.get(running_count_key(tracking_key)).to_i
|
140
|
+
end
|
141
|
+
|
142
|
+
# Returns the number of jobs currently running
|
143
|
+
def set_running_count(tracking_key, value)
|
144
|
+
count_key = running_count_key(tracking_key)
|
145
|
+
Resque.redis.set(count_key, value)
|
146
|
+
restricted = (value > concurrent_limit)
|
147
|
+
mark_runnable(tracking_key, !restricted)
|
148
|
+
return restricted
|
149
|
+
end
|
150
|
+
|
151
|
+
def restricted?(tracking_key)
|
152
|
+
count_key = running_count_key(tracking_key)
|
153
|
+
value = Resque.redis.get(count_key).to_i
|
154
|
+
restricted = (value >= concurrent_limit)
|
155
|
+
return restricted
|
156
|
+
end
|
157
|
+
|
158
|
+
def increment_running_count(tracking_key)
|
159
|
+
count_key = running_count_key(tracking_key)
|
160
|
+
value = Resque.redis.incr(count_key)
|
161
|
+
restricted = (value > concurrent_limit)
|
162
|
+
mark_runnable(tracking_key, !restricted)
|
163
|
+
return restricted
|
164
|
+
end
|
165
|
+
|
166
|
+
def decrement_running_count(tracking_key)
|
167
|
+
count_key = running_count_key(tracking_key)
|
168
|
+
value = Resque.redis.decr(count_key)
|
169
|
+
Resque.redis.set(count_key, 0) if value < 0
|
170
|
+
restricted = (value >= concurrent_limit)
|
171
|
+
mark_runnable(tracking_key, !restricted)
|
172
|
+
return restricted
|
173
|
+
end
|
174
|
+
|
175
|
+
def runnable?(tracking_key, queue)
|
176
|
+
Resque.redis.sismember(runnables_key(queue), tracking_key)
|
177
|
+
end
|
178
|
+
|
179
|
+
def get_next_runnable(queue)
|
180
|
+
Resque.redis.srandmember(runnables_key(queue))
|
181
|
+
end
|
182
|
+
|
183
|
+
# Returns the list of tracking_keys that have jobs waiting to run (are not over the concurrency limit)
|
184
|
+
def runnables(queue=nil)
|
185
|
+
Resque.redis.smembers(runnables_key(queue))
|
186
|
+
end
|
187
|
+
|
188
|
+
# Keeps track of which jobs are currently runnable, that is the
|
189
|
+
# tracking_key should have jobs on some restriction queue and
|
190
|
+
# also have less than concurrency_limit jobs running
|
191
|
+
#
|
192
|
+
def mark_runnable(tracking_key, runnable)
|
193
|
+
queues = queues_available(tracking_key)
|
194
|
+
queues.each do |queue|
|
195
|
+
runnable_queues_key = runnables_key(queue)
|
196
|
+
if runnable
|
197
|
+
Resque.redis.sadd(runnable_queues_key, tracking_key)
|
198
|
+
else
|
199
|
+
Resque.redis.srem(runnable_queues_key, tracking_key)
|
200
|
+
end
|
201
|
+
end
|
202
|
+
if runnable
|
203
|
+
Resque.redis.sadd(runnables_key, tracking_key) if queues.size > 0
|
204
|
+
else
|
205
|
+
Resque.redis.srem(runnables_key, tracking_key)
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
def clear_runnable(tracking_key, queue)
|
210
|
+
Resque.redis.srem(runnables_key(queue), tracking_key)
|
211
|
+
Resque.redis.srem(runnables_key, tracking_key)
|
212
|
+
end
|
213
|
+
|
214
|
+
# Acquires a lock using the given key and lock expiration time
|
215
|
+
def acquire_lock(lock_key, lock_expiration)
|
216
|
+
# acquire the lock to work on the restriction queue
|
217
|
+
expiration_time = lock_expiration + 1
|
218
|
+
acquired_lock = Resque.redis.setnx(lock_key, expiration_time)
|
219
|
+
|
220
|
+
# If we didn't acquire the lock, check the expiration as described
|
221
|
+
# at http://redis.io/commands/setnx
|
222
|
+
if ! acquired_lock
|
223
|
+
# If expiration time is in the future, then someone else beat us to getting the lock
|
224
|
+
old_expiration_time = Resque.redis.get(lock_key)
|
225
|
+
return false if old_expiration_time.to_i > Time.now.to_i
|
226
|
+
|
227
|
+
# if expiration time was in the future when we set it, then someone beat us to it
|
228
|
+
old_expiration_time = Resque.redis.getset(lock_key, expiration_time)
|
229
|
+
return false if old_expiration_time.to_i > Time.now.to_i
|
230
|
+
end
|
231
|
+
|
232
|
+
# expire the lock eventually so we clean up keys - not needed to timeout
|
233
|
+
# lock, just to keep redis clean for locks that aren't being used'
|
234
|
+
Resque.redis.expireat(lock_key, expiration_time + 300)
|
235
|
+
|
236
|
+
return true
|
237
|
+
end
|
238
|
+
|
239
|
+
# Releases the lock acquired by #acquire_lock
|
240
|
+
def release_lock(lock_key, lock_expiration)
|
241
|
+
# Only delete the lock if the one we created hasn't expired
|
242
|
+
expiration_time = lock_expiration + 1
|
243
|
+
Resque.redis.del(lock_key) if expiration_time > Time.now.to_i
|
244
|
+
end
|
245
|
+
|
246
|
+
|
247
|
+
# Uses a lock to ensure that a sequence of redis operations happen atomically
|
248
|
+
# We don't use watch/multi/exec as it doesn't work in a DistributedRedis setup
|
249
|
+
def run_atomically(lock_key)
|
250
|
+
trying = true
|
251
|
+
exp_backoff = 1
|
252
|
+
|
253
|
+
while trying do
|
254
|
+
lock_expiration = Time.now.to_i + 10
|
255
|
+
if acquire_lock(lock_key, lock_expiration)
|
256
|
+
begin
|
257
|
+
yield
|
258
|
+
ensure
|
259
|
+
release_lock(lock_key, lock_expiration)
|
260
|
+
end
|
261
|
+
trying = false
|
262
|
+
else
|
263
|
+
sleep (rand(1000) * 0.0001 * exp_backoff)
|
264
|
+
exp_backoff *= 2
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
end
|
269
|
+
|
270
|
+
# Pushes the job to restriction queue if it is restricted
|
271
|
+
# If the job is within the concurrency limit, thus needs to be run, we
|
272
|
+
# keep the running count incremented so that other calls don't erroneously
|
273
|
+
# see a lower value and run their job. This count gets decremented by call
|
274
|
+
# to release_restriction when job completes
|
275
|
+
def stash_if_restricted(job)
|
276
|
+
restricted = false
|
277
|
+
tracking_key = tracking_key(*job.args)
|
278
|
+
lock_key = lock_key(tracking_key)
|
279
|
+
|
280
|
+
run_atomically(lock_key) do
|
281
|
+
|
282
|
+
restricted = restricted?(tracking_key)
|
283
|
+
if restricted
|
284
|
+
push_to_restriction_queue(job)
|
285
|
+
else
|
286
|
+
increment_running_count(tracking_key)
|
287
|
+
end
|
288
|
+
|
289
|
+
end
|
290
|
+
|
291
|
+
return restricted
|
292
|
+
end
|
293
|
+
|
294
|
+
# Returns the next job that is runnable
|
295
|
+
def next_runnable_job(queue)
|
296
|
+
tracking_key = get_next_runnable(queue)
|
297
|
+
return nil unless tracking_key
|
298
|
+
|
299
|
+
job = nil
|
300
|
+
lock_key = lock_key(tracking_key)
|
301
|
+
|
302
|
+
run_atomically(lock_key) do
|
303
|
+
|
304
|
+
# since we don't have a lock when we get the runnable,
|
305
|
+
# we need to check it again
|
306
|
+
still_runnable = runnable?(tracking_key, queue)
|
307
|
+
if still_runnable
|
308
|
+
klazz = tracking_class(tracking_key)
|
309
|
+
job = klazz.pop_from_restriction_queue(tracking_key, queue)
|
310
|
+
end
|
311
|
+
|
312
|
+
end
|
313
|
+
|
314
|
+
return job
|
315
|
+
|
316
|
+
end
|
317
|
+
|
318
|
+
# Decrements the running_count - to be called at end of job
|
319
|
+
def release_restriction(job)
|
320
|
+
tracking_key = tracking_key(*job.args)
|
321
|
+
lock_key = lock_key(tracking_key)
|
322
|
+
|
323
|
+
run_atomically(lock_key) do
|
324
|
+
|
325
|
+
# decrement the count after a job has run
|
326
|
+
decrement_running_count(tracking_key)
|
327
|
+
|
328
|
+
end
|
329
|
+
end
|
330
|
+
|
331
|
+
def stats
|
332
|
+
results = {}
|
333
|
+
|
334
|
+
queue_keys = Resque.redis.keys("concurrent:queue:*")
|
335
|
+
|
336
|
+
queue_sizes = {}
|
337
|
+
ident_sizes = {}
|
338
|
+
queue_keys.each do |k|
|
339
|
+
parts = k.split(":")
|
340
|
+
ident = parts[3..-1].join(":")
|
341
|
+
queue_name = parts[2]
|
342
|
+
size = Resque.redis.llen(k)
|
343
|
+
queue_sizes[queue_name] ||= 0
|
344
|
+
queue_sizes[queue_name] += size
|
345
|
+
ident_sizes[ident] ||= 0
|
346
|
+
ident_sizes[ident] += size
|
347
|
+
end
|
348
|
+
|
349
|
+
count_keys = Resque.redis.keys("concurrent:count:*")
|
350
|
+
running_counts = {}
|
351
|
+
count_keys.each do |k|
|
352
|
+
parts = k.split(":")
|
353
|
+
ident = parts[2..-1].join(":")
|
354
|
+
running_counts[ident] = Resque.redis.get(k).to_i
|
355
|
+
end
|
356
|
+
|
357
|
+
lock_keys = Resque.redis.keys("concurrent:lock:*")
|
358
|
+
lock_count = lock_keys.size
|
359
|
+
|
360
|
+
runnable_count = Resque.redis.scard(runnables_key)
|
361
|
+
|
362
|
+
return {
|
363
|
+
:queue_totals => {
|
364
|
+
:by_queue_name => queue_sizes,
|
365
|
+
:by_identifier => ident_sizes
|
366
|
+
},
|
367
|
+
:running_counts => running_counts,
|
368
|
+
:lock_count => lock_count,
|
369
|
+
:runnable_count => runnable_count,
|
370
|
+
}
|
371
|
+
|
372
|
+
end
|
373
|
+
|
374
|
+
end
|
375
|
+
|
376
|
+
# Allows users to subclass instead of extending in their job classes
|
377
|
+
class ConcurrentRestrictionJob
|
378
|
+
extend ConcurrentRestriction
|
379
|
+
end
|
380
|
+
|
381
|
+
end
|
382
|
+
end
|