resque-concurrent-restriction 0.6.0 → 0.6.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: bbda1244cff78d5060249dffb53d58d9a8a84c10
4
- data.tar.gz: 41cd25689487c7f2cc4fcba41491b9e146806606
3
+ metadata.gz: 15ae520632319f854b8b8975c4dfd83338a4e5c0
4
+ data.tar.gz: 44be3699fa06a36fd8626012133b3821957b67b4
5
5
  SHA512:
6
- metadata.gz: 5a8aad211b3ee98acbe8cf55002ff7d8af401857fbf2f6af885d93547e529eb684c3ad968cefab487c78c859052c68938bc49d423fedac8e41806c1a600a14a4
7
- data.tar.gz: 42111765e48f618fb806f386bedd51f112a267ec0855f52578b3ac7dd0a906fefae66c7ad320b9c4e3a3f9aee1b37fee6c52219925e93474b570ea36acb8cc1e
6
+ metadata.gz: 64a4aa1348e13b2d440c0e9ff00ab4af6ac0e6a530fd7073d3253efc57da70e8f1f5ef41b9ade01c51c32b28e7c7a0200b104879ea5fbaad962b8ccddcc1d0e5
7
+ data.tar.gz: 6d39969cee8167c013429ed0c4dd68716e5e7ee931a5ba41f102663d38cccc909fd819831678e87531bd31b44865d00ddcfe396953cb38b9a7c6c451e8acfc18
@@ -1,540 +1,539 @@
1
- # To configure resque concurrent restriction, add something like the
2
- # following to an initializer (defaults shown):
3
- #
4
- # Resque::Plugins::ConcurrentRestriction.configure do |config|
5
- # # The lock timeout for the restriction queue lock
6
- # config.lock_timeout = 60
7
- # # How many times to try to get a lock before giving up
8
- # # Worker stays busy for: 2^tries * rand(100) * 0.001 (~30s-3000s)
9
- # config.lock_tries = 10
10
- # # Try this many times to reserve a job from a queue. Also, the maximum
11
- # # number of jobs to move to the restricted queue during this process
12
- # # before giving up
13
- # config.reserve_queued_job_attempts = 10
14
- # # Try to pick jobs off of the restricted queue before normal queues
15
- # config.restricted_before_queued = true
16
- # end
17
-
18
- module Resque
19
- module Plugins
20
- module ConcurrentRestriction
21
- # Warning: The helpers module will be gone in Resque 2.x
22
- # Resque::Helpers removed from Resque in 1.25, see:
23
- # https://github.com/resque/resque/issues/1150#issuecomment-27942972
24
- include Resque::Helpers
25
-
26
- # Allows configuring via class accessors
27
- class << self
28
- # optional
29
- attr_accessor :lock_timeout, :lock_tries, :reserve_queued_job_attempts, :restricted_before_queued
30
- end
31
-
32
- # default values
33
- self.lock_timeout = 60
34
- self.lock_tries = 15
35
- self.reserve_queued_job_attempts = 1
36
- self.restricted_before_queued = false
37
-
38
- # Allows configuring via class accessors
39
- def self.configure
40
- yield self
41
- end
42
-
43
- # Redis Data Structures
44
- #
45
- # concurrent.lock.tracking_id => timestamp
46
- # Maintains the distributed lock for the tracking_key to ensure
47
- # atomic modification of other data structures
48
- #
49
- # concurrent.count.tracking_id => count
50
- # The count of currently running jobs for the tracking_id
51
- #
52
- # concurrent.queue.queue_name.tracking_id => List[job1, job2, ...]
53
- # The queue of items that is currently unable to run due to count being exceeded
54
- #
55
- # concurrent.queue_availability.tracking_key => Set[queue_name1, queue_name2, ...]
56
- # Maintains the set of queues that currently have something
57
- # runnable for each tracking_id
58
- #
59
- # concurrent.runnable[.queue_name] => Set[tracking_id1, tracking_id2, ...]
60
- # Maintains the set of tracking_ids that have something
61
- # runnable for each queue (globally without .queue_name postfix in key)
62
- #
63
- # The behavior has two points of entry:
64
- #
65
- # When the Resque::Worker is looking for a job to run from a restriction
66
- # queue, we use the queue_name to look up the set of tracking IDs that
67
- # are currently runnable for that queue. If we get a tracking id, we
68
- # know that there is a restriction queue with something runnable in it,
69
- # and we then use that tracking_id and queue to look up and pop something
70
- # off of the restriction queue.
71
- #
72
- # When the Resque::Worker gets a job off of a normal resque queue, it uses
73
- # the count to see if that job is currently restricted. If not, it runs it
74
- # as normal, but if it is restricted, then it sticks it on a restriction queue.
75
- #
76
- # In both cases, before a job is handed off to resque to be run, we increment
77
- # the count so we can keep tracking of how many are currently running. When
78
- # the job finishes, we then decrement the count.
79
-
80
- # Used by the user in their job class to set the concurrency limit
81
- def concurrent(limit)
82
- @concurrent = limit
83
- end
84
-
85
- # Allows the user to specify the unique key that identifies a set
86
- # of jobs that share a concurrency limit. Defaults to the job class name
87
- def concurrent_identifier(*args)
88
- end
89
-
90
- # Used to query what the limit the user has set
91
- def concurrent_limit
92
- @concurrent ||= 1
93
- end
94
-
95
- # The key used to acquire a lock so we can operate on multiple
96
- # redis structures (runnables set, running_count) atomically
97
- def lock_key(tracking_key)
98
- parts = tracking_key.split(".")
99
- "concurrent.lock.#{parts[2..-1].join('.')}"
100
- end
101
-
102
- # The redis key used to store the number of currently running
103
- # jobs for the restriction_identifier
104
- def running_count_key(tracking_key)
105
- parts = tracking_key.split(".")
106
- "concurrent.count.#{parts[2..-1].join('.')}"
107
- end
108
-
109
- # The key for the redis list where restricted jobs for the given resque queue are stored
110
- def restriction_queue_key(tracking_key, queue)
111
- parts = tracking_key.split(".")
112
- "concurrent.queue.#{queue}.#{parts[2..-1].join('.')}"
113
- end
114
-
115
- # The redis key used to store the aggregate number of jobs
116
- # in restriction queues by queue name
117
- def queue_count_key
118
- "concurrent.queue_counts"
119
- end
120
-
121
- def restriction_queue_availability_key(tracking_key)
122
- parts = tracking_key.split(".")
123
- "concurrent.queue_availability.#{parts[2..-1].join('.')}"
124
- end
125
-
126
- # The key that groups all jobs of the same restriction_identifier together
127
- # so that we can work on any of those jobs if they are runnable
128
- # Stored in runnables set, and used to build keys for each queue where jobs
129
- # for those queues are stored
130
- def tracking_key(*args)
131
- id = concurrent_identifier(*args)
132
- id = ".#{id}" if id && id.strip.size > 0
133
- "concurrent.tracking.#{self.to_s}#{id}"
134
- end
135
-
136
- def tracking_class(tracking_key)
137
- constantize(tracking_key.split(".")[2])
138
- end
139
-
140
- # The key to the redis set where we keep a list of runnable tracking_keys
141
- def runnables_key(queue=nil)
142
- key = ".#{queue}" if queue
143
- "concurrent.runnable#{key}"
144
- end
145
-
146
- # Encodes the job into the restriction queue
147
- def encode(job)
148
- item = {:queue => job.queue, :payload => job.payload}
149
- Resque.encode(item)
150
- end
151
-
152
- # Decodes the job from the restriction queue
153
- def decode(str)
154
- item = Resque.decode(str)
155
- Resque::Job.new(item['queue'], item['payload']) if item
156
- end
157
-
158
- # The restriction queues that have data for each tracking key
159
- # Adds/Removes the queue to the list of queues for that tracking key
160
- # so we can quickly tell in next_runnable_job if a runnable job exists on a
161
- # specific restriction queue
162
- def update_queues_available(tracking_key, queue, action)
163
- availability_key = restriction_queue_availability_key(tracking_key)
164
- case action
165
- when :add then Resque.redis.send(:sadd, availability_key, queue)
166
- when :remove then Resque.redis.send(:srem, availability_key, queue)
167
- else raise "Invalid action to ConcurrentRestriction.track_queue"
168
- end
169
- end
170
-
171
- def queues_available(tracking_key)
172
- availability_key = restriction_queue_availability_key(tracking_key)
173
- Resque.redis.smembers(availability_key)
174
- end
175
-
176
- # Pushes the job to the restriction queue
177
- def push_to_restriction_queue(job, location=:back)
178
- tracking_key = tracking_key(*job.args)
179
-
180
- case location
181
- when :back then Resque.redis.rpush(restriction_queue_key(tracking_key, job.queue), encode(job))
182
- when :front then Resque.redis.lpush(restriction_queue_key(tracking_key, job.queue), encode(job))
183
- else raise "Invalid location to ConcurrentRestriction.push_to_restriction_queue"
184
- end
185
-
186
- increment_queue_count(job.queue)
187
- update_queues_available(tracking_key, job.queue, :add)
188
- mark_runnable(tracking_key, false)
189
- end
190
-
191
- # Pops a job from the restriction queue
192
- def pop_from_restriction_queue(tracking_key, queue)
193
- queue_key = restriction_queue_key(tracking_key, queue)
194
- str = Resque.redis.lpop(queue_key)
195
- post_pop_size = Resque.redis.llen(queue_key)
196
-
197
- if post_pop_size == 0
198
- update_queues_available(tracking_key, queue, :remove)
199
- clear_runnable(tracking_key, queue)
200
- end
201
-
202
- decrement_queue_count(queue)
203
-
204
- # increment by one to indicate that we are running
205
- increment_running_count(tracking_key) if str
206
-
207
- decode(str)
208
- end
209
-
210
- # Grabs the raw data (undecoded) from the restriction queue
211
- def restriction_queue_raw(tracking_key, queue)
212
- Array(Resque.redis.lrange(restriction_queue_key(tracking_key, queue), 0, -1))
213
- end
214
-
215
- # Grabs the contents of the restriction queue (decoded)
216
- def restriction_queue(tracking_key, queue)
217
- restriction_queue_raw(tracking_key, queue).collect {|s| decode(s) }
218
- end
219
-
220
- # Returns the number of jobs currently running
221
- def running_count(tracking_key)
222
- Resque.redis.get(running_count_key(tracking_key)).to_i
223
- end
224
-
225
- # Returns the number of jobs currently running
226
- def set_running_count(tracking_key, value)
227
- count_key = running_count_key(tracking_key)
228
- Resque.redis.set(count_key, value)
229
- restricted = (value > concurrent_limit)
230
- mark_runnable(tracking_key, !restricted)
231
- return restricted
232
- end
233
-
234
- def restricted?(tracking_key)
235
- count_key = running_count_key(tracking_key)
236
- value = Resque.redis.get(count_key).to_i
237
- restricted = (value >= concurrent_limit)
238
- return restricted
239
- end
240
-
241
- # The value in redis is the number of jobs currently running
242
- # If we increment past that, we are restricted. Incrementing is only done
243
- # after the job is cleared for execution due to checking the runnable
244
- # state, and post increment we setup runnable for future jobs based on
245
- # the new "restricted" value
246
- def increment_running_count(tracking_key)
247
- count_key = running_count_key(tracking_key)
248
- value = Resque.redis.incr(count_key)
249
- restricted = (value >= concurrent_limit)
250
- mark_runnable(tracking_key, !restricted)
251
- return restricted
252
- end
253
-
254
- def decrement_running_count(tracking_key)
255
- count_key = running_count_key(tracking_key)
256
- value = Resque.redis.decr(count_key)
257
- Resque.redis.set(count_key, 0) if value < 0
258
- restricted = (value >= concurrent_limit)
259
- mark_runnable(tracking_key, !restricted)
260
- return restricted
261
- end
262
-
263
- def increment_queue_count(queue, by=1)
264
- value = Resque.redis.hincrby(queue_count_key, queue, by)
265
- return value
266
- end
267
-
268
- def decrement_queue_count(queue, by=1)
269
- value = Resque.redis.hincrby(queue_count_key, queue, -by)
270
- return value
271
- end
272
-
273
- def queue_counts
274
- value = Resque.redis.hgetall(queue_count_key)
275
- value = Hash[*value.collect {|k, v| [k, v.to_i] }.flatten]
276
- return value
277
- end
278
-
279
- def set_queue_count(queue, count)
280
- Resque.redis.hset(queue_count_key, queue, count)
281
- end
282
-
283
- def runnable?(tracking_key, queue)
284
- Resque.redis.sismember(runnables_key(queue), tracking_key)
285
- end
286
-
287
- def get_next_runnable(queue)
288
- Resque.redis.srandmember(runnables_key(queue))
289
- end
290
-
291
- # Returns the list of tracking_keys that have jobs waiting to run (are not over the concurrency limit)
292
- def runnables(queue=nil)
293
- Resque.redis.smembers(runnables_key(queue))
294
- end
295
-
296
- # Keeps track of which jobs are currently runnable, that is the
297
- # tracking_key should have jobs on some restriction queue and
298
- # also have less than concurrency_limit jobs running
299
- #
300
- def mark_runnable(tracking_key, runnable)
301
- queues = queues_available(tracking_key)
302
- queues.each do |queue|
303
- runnable_queues_key = runnables_key(queue)
304
- if runnable
305
- Resque.redis.sadd(runnable_queues_key, tracking_key)
306
- else
307
- Resque.redis.srem(runnable_queues_key, tracking_key)
308
- end
309
- end
310
- if runnable
311
- Resque.redis.sadd(runnables_key, tracking_key) if queues.size > 0
312
- else
313
- Resque.redis.srem(runnables_key, tracking_key)
314
- end
315
- end
316
-
317
- def clear_runnable(tracking_key, queue)
318
- Resque.redis.srem(runnables_key(queue), tracking_key)
319
- Resque.redis.srem(runnables_key, tracking_key)
320
- end
321
-
322
- # Acquires a lock using the given key and lock expiration time
323
- def acquire_lock(lock_key, lock_expiration)
324
- # acquire the lock to work on the restriction queue
325
- expiration_time = lock_expiration + 1
326
- acquired_lock = Resque.redis.setnx(lock_key, expiration_time)
327
-
328
- # If we didn't acquire the lock, check the expiration as described
329
- # at http://redis.io/commands/setnx
330
- if ! acquired_lock
331
- # If expiration time is in the future, then someone else beat us to getting the lock
332
- old_expiration_time = Resque.redis.get(lock_key)
333
- return false if old_expiration_time.to_i > Time.now.to_i
334
-
335
- # if expiration time was in the future when we set it, then someone beat us to it
336
- old_expiration_time = Resque.redis.getset(lock_key, expiration_time)
337
- return false if old_expiration_time.to_i > Time.now.to_i
338
- end
339
-
340
- # expire the lock eventually so we clean up keys - not needed to timeout
341
- # lock, just to keep redis clean for locks that aren't being used'
342
- Resque.redis.expireat(lock_key, expiration_time + 300)
343
-
344
- return true
345
- end
346
-
347
- # Releases the lock acquired by #acquire_lock
348
- def release_lock(lock_key, lock_expiration)
349
- # Only delete the lock if the one we created hasn't expired
350
- expiration_time = lock_expiration + 1
351
- Resque.redis.del(lock_key) if expiration_time > Time.now.to_i
352
- end
353
-
354
-
355
- # Uses a lock to ensure that a sequence of redis operations happen atomically
356
- # We don't use watch/multi/exec as it doesn't work in a DistributedRedis setup
357
- def run_atomically(lock_key, tries=ConcurrentRestriction.lock_tries)
358
- acquired_lock = false
359
- exp_backoff = 1
360
-
361
- tries.times do
362
- lock_expiration = Time.now.to_i + ConcurrentRestriction.lock_timeout
363
- if acquire_lock(lock_key, lock_expiration)
364
- acquired_lock = true
365
- begin
366
- yield
367
- ensure
368
- release_lock(lock_key, lock_expiration)
369
- end
370
- break
371
- else
372
- sleep(rand(100) * 0.001 * exp_backoff)
373
- exp_backoff *= 2
374
- end
375
- end
376
-
377
- return acquired_lock
378
- end
379
-
380
- # Pushes the job to restriction queue if it is restricted
381
- # If the job is within the concurrency limit, thus needs to be run, we
382
- # keep the running count incremented so that other calls don't erroneously
383
- # see a lower value and run their job. This count gets decremented by call
384
- # to release_restriction when job completes
385
- def stash_if_restricted(job)
386
- restricted = nil
387
- tracking_key = tracking_key(*job.args)
388
- lock_key = lock_key(tracking_key)
389
-
390
- did_run = run_atomically(lock_key) do
391
-
392
- restricted = restricted?(tracking_key)
393
- if restricted
394
- push_to_restriction_queue(job)
395
- else
396
- increment_running_count(tracking_key)
397
- end
398
-
399
- end
400
-
401
- # if run_atomically fails to acquire the lock, we need to put
402
- # the job back on the queue for processing later and act restricted
403
- # upstack so nothing gets run
404
- if !did_run
405
- restricted = true
406
- job.recreate
407
- end
408
-
409
- return restricted
410
- end
411
-
412
- # Returns the next job that is runnable
413
- def next_runnable_job(queue)
414
- tracking_key = get_next_runnable(queue)
415
- return nil unless tracking_key
416
-
417
- job = nil
418
- lock_key = lock_key(tracking_key)
419
-
420
- run_atomically(lock_key) do
421
-
422
- # since we don't have a lock when we get the runnable,
423
- # we need to check it again
424
- still_runnable = runnable?(tracking_key, queue)
425
- if still_runnable
426
- klazz = tracking_class(tracking_key)
427
- job = klazz.pop_from_restriction_queue(tracking_key, queue)
428
- end
429
-
430
- end
431
-
432
- return job
433
-
434
- end
435
-
436
- # Decrements the running_count - to be called at end of job
437
- def release_restriction(job)
438
- tracking_key = tracking_key(*job.args)
439
- lock_key = lock_key(tracking_key)
440
-
441
- run_atomically(lock_key) do
442
-
443
- # decrement the count after a job has run
444
- decrement_running_count(tracking_key)
445
-
446
- end
447
- end
448
-
449
- # Resets everything to be runnable
450
- def reset_restrictions
451
-
452
- counts_reset = 0
453
- count_keys = Resque.redis.keys("concurrent.count.*")
454
- if count_keys.size > 0
455
- count_keys.each_slice(10000) do |key_slice|
456
- counts_reset += Resque.redis.del(*key_slice)
457
- end
458
- end
459
-
460
- runnable_keys = Resque.redis.keys("concurrent.runnable*")
461
- if runnable_keys.size > 0
462
- runnable_keys.each_slice(10000) do |runnable_slice|
463
- Resque.redis.del(*runnable_slice)
464
- end
465
- end
466
-
467
- Resque.redis.del(queue_count_key)
468
- queues_enabled = 0
469
- queue_keys = Resque.redis.keys("concurrent.queue.*")
470
- queue_keys.each do |k|
471
- len = Resque.redis.llen(k)
472
- if len > 0
473
- parts = k.split(".")
474
- queue = parts[2]
475
- ident = parts[3..-1].join('.')
476
- tracking_key = "concurrent.tracking.#{ident}"
477
-
478
- increment_queue_count(queue, len)
479
- update_queues_available(tracking_key, queue, :add)
480
- mark_runnable(tracking_key, true)
481
- queues_enabled += 1
482
- end
483
- end
484
-
485
- return counts_reset, queues_enabled
486
-
487
- end
488
-
489
- def stats(extended=false)
490
- result = {}
491
-
492
- result[:queues] = queue_counts
493
-
494
- if extended
495
- ident_sizes = {}
496
- queue_keys = Resque.redis.keys("concurrent.queue.*")
497
- queue_keys.each do |k|
498
- parts = k.split(".")
499
- ident = parts[3..-1].join(".")
500
- queue_name = parts[2]
501
- size = Resque.redis.llen(k)
502
- ident_sizes[ident] ||= {}
503
- ident_sizes[ident][queue_name] ||= 0
504
- ident_sizes[ident][queue_name] += size
505
- end
506
-
507
- count_keys = Resque.redis.keys("concurrent.count.*")
508
- running_counts = {}
509
- count_keys.each do |k|
510
- parts = k.split(".")
511
- ident = parts[2..-1].join(".")
512
- ident_sizes[ident] ||= {}
513
- ident_sizes[ident]["running"] = Resque.redis.get(k).to_i
514
- end
515
-
516
- result[:identifiers] = ident_sizes
517
- else
518
- result[:identifiers] = {}
519
- end
520
-
521
-
522
- lock_keys = Resque.redis.keys("concurrent.lock.*")
523
- result[:lock_count] = lock_keys.size
524
-
525
- runnable_count = Resque.redis.scard(runnables_key)
526
- result[:runnable_count] = runnable_count
527
-
528
- return result
529
-
530
- end
531
-
532
- end
533
-
534
- # Allows users to subclass instead of extending in their job classes
535
- class ConcurrentRestrictionJob
536
- extend ConcurrentRestriction
537
- end
538
-
539
- end
540
- end
1
+ # To configure resque concurrent restriction, add something like the
2
+ # following to an initializer (defaults shown):
3
+ #
4
+ # Resque::Plugins::ConcurrentRestriction.configure do |config|
5
+ # # The lock timeout for the restriction queue lock
6
+ # config.lock_timeout = 60
7
+ # # How many times to try to get a lock before giving up
8
+ # # Worker stays busy for: 2^tries * rand(100) * 0.001 (~30s-3000s)
9
+ # config.lock_tries = 10
10
+ # # Try this many times to reserve a job from a queue. Also, the maximum
11
+ # # number of jobs to move to the restricted queue during this process
12
+ # # before giving up
13
+ # config.reserve_queued_job_attempts = 10
14
+ # # Try to pick jobs off of the restricted queue before normal queues
15
+ # config.restricted_before_queued = true
16
+ # end
17
+
18
+ require 'active_support'
19
+
20
+ module Resque
21
+ module Plugins
22
+ module ConcurrentRestriction
23
+ include ::ActiveSupport::Inflector
24
+
25
+ # Allows configuring via class accessors
26
+ class << self
27
+ # optional
28
+ attr_accessor :lock_timeout, :lock_tries, :reserve_queued_job_attempts, :restricted_before_queued
29
+ end
30
+
31
+ # default values
32
+ self.lock_timeout = 60
33
+ self.lock_tries = 15
34
+ self.reserve_queued_job_attempts = 1
35
+ self.restricted_before_queued = false
36
+
37
+ # Allows configuring via class accessors
38
+ def self.configure
39
+ yield self
40
+ end
41
+
42
+ # Redis Data Structures
43
+ #
44
+ # concurrent.lock.tracking_id => timestamp
45
+ # Maintains the distributed lock for the tracking_key to ensure
46
+ # atomic modification of other data structures
47
+ #
48
+ # concurrent.count.tracking_id => count
49
+ # The count of currently running jobs for the tracking_id
50
+ #
51
+ # concurrent.queue.queue_name.tracking_id => List[job1, job2, ...]
52
+ # The queue of items that is currently unable to run due to count being exceeded
53
+ #
54
+ # concurrent.queue_availability.tracking_key => Set[queue_name1, queue_name2, ...]
55
+ # Maintains the set of queues that currently have something
56
+ # runnable for each tracking_id
57
+ #
58
+ # concurrent.runnable[.queue_name] => Set[tracking_id1, tracking_id2, ...]
59
+ # Maintains the set of tracking_ids that have something
60
+ # runnable for each queue (globally without .queue_name postfix in key)
61
+ #
62
+ # The behavior has two points of entry:
63
+ #
64
+ # When the Resque::Worker is looking for a job to run from a restriction
65
+ # queue, we use the queue_name to look up the set of tracking IDs that
66
+ # are currently runnable for that queue. If we get a tracking id, we
67
+ # know that there is a restriction queue with something runnable in it,
68
+ # and we then use that tracking_id and queue to look up and pop something
69
+ # off of the restriction queue.
70
+ #
71
+ # When the Resque::Worker gets a job off of a normal resque queue, it uses
72
+ # the count to see if that job is currently restricted. If not, it runs it
73
+ # as normal, but if it is restricted, then it sticks it on a restriction queue.
74
+ #
75
+ # In both cases, before a job is handed off to resque to be run, we increment
76
+ # the count so we can keep tracking of how many are currently running. When
77
+ # the job finishes, we then decrement the count.
78
+
79
+ # Used by the user in their job class to set the concurrency limit
80
+ def concurrent(limit)
81
+ @concurrent = limit
82
+ end
83
+
84
+ # Allows the user to specify the unique key that identifies a set
85
+ # of jobs that share a concurrency limit. Defaults to the job class name
86
+ def concurrent_identifier(*args)
87
+ end
88
+
89
+ # Used to query what the limit the user has set
90
+ def concurrent_limit
91
+ @concurrent ||= 1
92
+ end
93
+
94
+ # The key used to acquire a lock so we can operate on multiple
95
+ # redis structures (runnables set, running_count) atomically
96
+ def lock_key(tracking_key)
97
+ parts = tracking_key.split(".")
98
+ "concurrent.lock.#{parts[2..-1].join('.')}"
99
+ end
100
+
101
+ # The redis key used to store the number of currently running
102
+ # jobs for the restriction_identifier
103
+ def running_count_key(tracking_key)
104
+ parts = tracking_key.split(".")
105
+ "concurrent.count.#{parts[2..-1].join('.')}"
106
+ end
107
+
108
+ # The key for the redis list where restricted jobs for the given resque queue are stored
109
+ def restriction_queue_key(tracking_key, queue)
110
+ parts = tracking_key.split(".")
111
+ "concurrent.queue.#{queue}.#{parts[2..-1].join('.')}"
112
+ end
113
+
114
+ # The redis key used to store the aggregate number of jobs
115
+ # in restriction queues by queue name
116
+ def queue_count_key
117
+ "concurrent.queue_counts"
118
+ end
119
+
120
+ def restriction_queue_availability_key(tracking_key)
121
+ parts = tracking_key.split(".")
122
+ "concurrent.queue_availability.#{parts[2..-1].join('.')}"
123
+ end
124
+
125
+ # The key that groups all jobs of the same restriction_identifier together
126
+ # so that we can work on any of those jobs if they are runnable
127
+ # Stored in runnables set, and used to build keys for each queue where jobs
128
+ # for those queues are stored
129
+ def tracking_key(*args)
130
+ id = concurrent_identifier(*args)
131
+ id = ".#{id}" if id && id.strip.size > 0
132
+ "concurrent.tracking.#{self.to_s}#{id}"
133
+ end
134
+
135
+ def tracking_class(tracking_key)
136
+ constantize(tracking_key.split(".")[2])
137
+ end
138
+
139
+ # The key to the redis set where we keep a list of runnable tracking_keys
140
+ def runnables_key(queue=nil)
141
+ key = ".#{queue}" if queue
142
+ "concurrent.runnable#{key}"
143
+ end
144
+
145
+ # Encodes the job into the restriction queue
146
+ def encode(job)
147
+ item = {:queue => job.queue, :payload => job.payload}
148
+ Resque.encode(item)
149
+ end
150
+
151
+ # Decodes the job from the restriction queue
152
+ def decode(str)
153
+ item = Resque.decode(str)
154
+ Resque::Job.new(item['queue'], item['payload']) if item
155
+ end
156
+
157
+ # The restriction queues that have data for each tracking key
158
+ # Adds/Removes the queue to the list of queues for that tracking key
159
+ # so we can quickly tell in next_runnable_job if a runnable job exists on a
160
+ # specific restriction queue
161
+ def update_queues_available(tracking_key, queue, action)
162
+ availability_key = restriction_queue_availability_key(tracking_key)
163
+ case action
164
+ when :add then Resque.redis.send(:sadd, availability_key, queue)
165
+ when :remove then Resque.redis.send(:srem, availability_key, queue)
166
+ else raise "Invalid action to ConcurrentRestriction.track_queue"
167
+ end
168
+ end
169
+
170
+ def queues_available(tracking_key)
171
+ availability_key = restriction_queue_availability_key(tracking_key)
172
+ Resque.redis.smembers(availability_key)
173
+ end
174
+
175
+ # Pushes the job to the restriction queue
176
+ def push_to_restriction_queue(job, location=:back)
177
+ tracking_key = tracking_key(*job.args)
178
+
179
+ case location
180
+ when :back then Resque.redis.rpush(restriction_queue_key(tracking_key, job.queue), encode(job))
181
+ when :front then Resque.redis.lpush(restriction_queue_key(tracking_key, job.queue), encode(job))
182
+ else raise "Invalid location to ConcurrentRestriction.push_to_restriction_queue"
183
+ end
184
+
185
+ increment_queue_count(job.queue)
186
+ update_queues_available(tracking_key, job.queue, :add)
187
+ mark_runnable(tracking_key, false)
188
+ end
189
+
190
+ # Pops a job from the restriction queue
191
+ def pop_from_restriction_queue(tracking_key, queue)
192
+ queue_key = restriction_queue_key(tracking_key, queue)
193
+ str = Resque.redis.lpop(queue_key)
194
+ post_pop_size = Resque.redis.llen(queue_key)
195
+
196
+ if post_pop_size == 0
197
+ update_queues_available(tracking_key, queue, :remove)
198
+ clear_runnable(tracking_key, queue)
199
+ end
200
+
201
+ decrement_queue_count(queue)
202
+
203
+ # increment by one to indicate that we are running
204
+ increment_running_count(tracking_key) if str
205
+
206
+ decode(str)
207
+ end
208
+
209
+ # Grabs the raw data (undecoded) from the restriction queue
210
+ def restriction_queue_raw(tracking_key, queue)
211
+ Array(Resque.redis.lrange(restriction_queue_key(tracking_key, queue), 0, -1))
212
+ end
213
+
214
+ # Grabs the contents of the restriction queue (decoded)
215
+ def restriction_queue(tracking_key, queue)
216
+ restriction_queue_raw(tracking_key, queue).collect {|s| decode(s) }
217
+ end
218
+
219
+ # Returns the number of jobs currently running
220
+ def running_count(tracking_key)
221
+ Resque.redis.get(running_count_key(tracking_key)).to_i
222
+ end
223
+
224
+ # Returns the number of jobs currently running
225
+ def set_running_count(tracking_key, value)
226
+ count_key = running_count_key(tracking_key)
227
+ Resque.redis.set(count_key, value)
228
+ restricted = (value > concurrent_limit)
229
+ mark_runnable(tracking_key, !restricted)
230
+ return restricted
231
+ end
232
+
233
+ def restricted?(tracking_key)
234
+ count_key = running_count_key(tracking_key)
235
+ value = Resque.redis.get(count_key).to_i
236
+ restricted = (value >= concurrent_limit)
237
+ return restricted
238
+ end
239
+
240
+ # The value in redis is the number of jobs currently running
241
+ # If we increment past that, we are restricted. Incrementing is only done
242
+ # after the job is cleared for execution due to checking the runnable
243
+ # state, and post increment we setup runnable for future jobs based on
244
+ # the new "restricted" value
245
+ def increment_running_count(tracking_key)
246
+ count_key = running_count_key(tracking_key)
247
+ value = Resque.redis.incr(count_key)
248
+ restricted = (value >= concurrent_limit)
249
+ mark_runnable(tracking_key, !restricted)
250
+ return restricted
251
+ end
252
+
253
+ def decrement_running_count(tracking_key)
254
+ count_key = running_count_key(tracking_key)
255
+ value = Resque.redis.decr(count_key)
256
+ Resque.redis.set(count_key, 0) if value < 0
257
+ restricted = (value >= concurrent_limit)
258
+ mark_runnable(tracking_key, !restricted)
259
+ return restricted
260
+ end
261
+
262
+ def increment_queue_count(queue, by=1)
263
+ value = Resque.redis.hincrby(queue_count_key, queue, by)
264
+ return value
265
+ end
266
+
267
+ def decrement_queue_count(queue, by=1)
268
+ value = Resque.redis.hincrby(queue_count_key, queue, -by)
269
+ return value
270
+ end
271
+
272
+ def queue_counts
273
+ value = Resque.redis.hgetall(queue_count_key)
274
+ value = Hash[*value.collect {|k, v| [k, v.to_i] }.flatten]
275
+ return value
276
+ end
277
+
278
+ def set_queue_count(queue, count)
279
+ Resque.redis.hset(queue_count_key, queue, count)
280
+ end
281
+
282
+ def runnable?(tracking_key, queue)
283
+ Resque.redis.sismember(runnables_key(queue), tracking_key)
284
+ end
285
+
286
+ def get_next_runnable(queue)
287
+ Resque.redis.srandmember(runnables_key(queue))
288
+ end
289
+
290
+ # Returns the list of tracking_keys that have jobs waiting to run (are not over the concurrency limit)
291
+ def runnables(queue=nil)
292
+ Resque.redis.smembers(runnables_key(queue))
293
+ end
294
+
295
+ # Keeps track of which jobs are currently runnable, that is the
296
+ # tracking_key should have jobs on some restriction queue and
297
+ # also have less than concurrency_limit jobs running
298
+ #
299
+ def mark_runnable(tracking_key, runnable)
300
+ queues = queues_available(tracking_key)
301
+ queues.each do |queue|
302
+ runnable_queues_key = runnables_key(queue)
303
+ if runnable
304
+ Resque.redis.sadd(runnable_queues_key, tracking_key)
305
+ else
306
+ Resque.redis.srem(runnable_queues_key, tracking_key)
307
+ end
308
+ end
309
+ if runnable
310
+ Resque.redis.sadd(runnables_key, tracking_key) if queues.size > 0
311
+ else
312
+ Resque.redis.srem(runnables_key, tracking_key)
313
+ end
314
+ end
315
+
316
+ def clear_runnable(tracking_key, queue)
317
+ Resque.redis.srem(runnables_key(queue), tracking_key)
318
+ Resque.redis.srem(runnables_key, tracking_key)
319
+ end
320
+
321
+ # Acquires a lock using the given key and lock expiration time
322
+ def acquire_lock(lock_key, lock_expiration)
323
+ # acquire the lock to work on the restriction queue
324
+ expiration_time = lock_expiration + 1
325
+ acquired_lock = Resque.redis.setnx(lock_key, expiration_time)
326
+
327
+ # If we didn't acquire the lock, check the expiration as described
328
+ # at http://redis.io/commands/setnx
329
+ if ! acquired_lock
330
+ # If expiration time is in the future, then someone else beat us to getting the lock
331
+ old_expiration_time = Resque.redis.get(lock_key)
332
+ return false if old_expiration_time.to_i > Time.now.to_i
333
+
334
+ # if expiration time was in the future when we set it, then someone beat us to it
335
+ old_expiration_time = Resque.redis.getset(lock_key, expiration_time)
336
+ return false if old_expiration_time.to_i > Time.now.to_i
337
+ end
338
+
339
+ # expire the lock eventually so we clean up keys - not needed to timeout
340
+ # lock, just to keep redis clean for locks that aren't being used'
341
+ Resque.redis.expireat(lock_key, expiration_time + 300)
342
+
343
+ return true
344
+ end
345
+
346
+ # Releases the lock acquired by #acquire_lock
347
+ def release_lock(lock_key, lock_expiration)
348
+ # Only delete the lock if the one we created hasn't expired
349
+ expiration_time = lock_expiration + 1
350
+ Resque.redis.del(lock_key) if expiration_time > Time.now.to_i
351
+ end
352
+
353
+
354
+ # Uses a lock to ensure that a sequence of redis operations happen atomically
355
+ # We don't use watch/multi/exec as it doesn't work in a DistributedRedis setup
356
+ def run_atomically(lock_key, tries=ConcurrentRestriction.lock_tries)
357
+ acquired_lock = false
358
+ exp_backoff = 1
359
+
360
+ tries.times do
361
+ lock_expiration = Time.now.to_i + ConcurrentRestriction.lock_timeout
362
+ if acquire_lock(lock_key, lock_expiration)
363
+ acquired_lock = true
364
+ begin
365
+ yield
366
+ ensure
367
+ release_lock(lock_key, lock_expiration)
368
+ end
369
+ break
370
+ else
371
+ sleep(rand(100) * 0.001 * exp_backoff)
372
+ exp_backoff *= 2
373
+ end
374
+ end
375
+
376
+ return acquired_lock
377
+ end
378
+
379
+ # Pushes the job to restriction queue if it is restricted
380
+ # If the job is within the concurrency limit, thus needs to be run, we
381
+ # keep the running count incremented so that other calls don't erroneously
382
+ # see a lower value and run their job. This count gets decremented by call
383
+ # to release_restriction when job completes
384
+ def stash_if_restricted(job)
385
+ restricted = nil
386
+ tracking_key = tracking_key(*job.args)
387
+ lock_key = lock_key(tracking_key)
388
+
389
+ did_run = run_atomically(lock_key) do
390
+
391
+ restricted = restricted?(tracking_key)
392
+ if restricted
393
+ push_to_restriction_queue(job)
394
+ else
395
+ increment_running_count(tracking_key)
396
+ end
397
+
398
+ end
399
+
400
+ # if run_atomically fails to acquire the lock, we need to put
401
+ # the job back on the queue for processing later and act restricted
402
+ # upstack so nothing gets run
403
+ if !did_run
404
+ restricted = true
405
+ job.recreate
406
+ end
407
+
408
+ return restricted
409
+ end
410
+
411
+ # Returns the next job that is runnable
412
+ def next_runnable_job(queue)
413
+ tracking_key = get_next_runnable(queue)
414
+ return nil unless tracking_key
415
+
416
+ job = nil
417
+ lock_key = lock_key(tracking_key)
418
+
419
+ run_atomically(lock_key) do
420
+
421
+ # since we don't have a lock when we get the runnable,
422
+ # we need to check it again
423
+ still_runnable = runnable?(tracking_key, queue)
424
+ if still_runnable
425
+ klazz = tracking_class(tracking_key)
426
+ job = klazz.pop_from_restriction_queue(tracking_key, queue)
427
+ end
428
+
429
+ end
430
+
431
+ return job
432
+
433
+ end
434
+
435
+ # Decrements the running_count - to be called at end of job
436
+ def release_restriction(job)
437
+ tracking_key = tracking_key(*job.args)
438
+ lock_key = lock_key(tracking_key)
439
+
440
+ run_atomically(lock_key) do
441
+
442
+ # decrement the count after a job has run
443
+ decrement_running_count(tracking_key)
444
+
445
+ end
446
+ end
447
+
448
+ # Resets everything to be runnable
449
+ def reset_restrictions
450
+
451
+ counts_reset = 0
452
+ count_keys = Resque.redis.keys("concurrent.count.*")
453
+ if count_keys.size > 0
454
+ count_keys.each_slice(10000) do |key_slice|
455
+ counts_reset += Resque.redis.del(*key_slice)
456
+ end
457
+ end
458
+
459
+ runnable_keys = Resque.redis.keys("concurrent.runnable*")
460
+ if runnable_keys.size > 0
461
+ runnable_keys.each_slice(10000) do |runnable_slice|
462
+ Resque.redis.del(*runnable_slice)
463
+ end
464
+ end
465
+
466
+ Resque.redis.del(queue_count_key)
467
+ queues_enabled = 0
468
+ queue_keys = Resque.redis.keys("concurrent.queue.*")
469
+ queue_keys.each do |k|
470
+ len = Resque.redis.llen(k)
471
+ if len > 0
472
+ parts = k.split(".")
473
+ queue = parts[2]
474
+ ident = parts[3..-1].join('.')
475
+ tracking_key = "concurrent.tracking.#{ident}"
476
+
477
+ increment_queue_count(queue, len)
478
+ update_queues_available(tracking_key, queue, :add)
479
+ mark_runnable(tracking_key, true)
480
+ queues_enabled += 1
481
+ end
482
+ end
483
+
484
+ return counts_reset, queues_enabled
485
+
486
+ end
487
+
488
+ def stats(extended=false)
489
+ result = {}
490
+
491
+ result[:queues] = queue_counts
492
+
493
+ if extended
494
+ ident_sizes = {}
495
+ queue_keys = Resque.redis.keys("concurrent.queue.*")
496
+ queue_keys.each do |k|
497
+ parts = k.split(".")
498
+ ident = parts[3..-1].join(".")
499
+ queue_name = parts[2]
500
+ size = Resque.redis.llen(k)
501
+ ident_sizes[ident] ||= {}
502
+ ident_sizes[ident][queue_name] ||= 0
503
+ ident_sizes[ident][queue_name] += size
504
+ end
505
+
506
+ count_keys = Resque.redis.keys("concurrent.count.*")
507
+ running_counts = {}
508
+ count_keys.each do |k|
509
+ parts = k.split(".")
510
+ ident = parts[2..-1].join(".")
511
+ ident_sizes[ident] ||= {}
512
+ ident_sizes[ident]["running"] = Resque.redis.get(k).to_i
513
+ end
514
+
515
+ result[:identifiers] = ident_sizes
516
+ else
517
+ result[:identifiers] = {}
518
+ end
519
+
520
+
521
+ lock_keys = Resque.redis.keys("concurrent.lock.*")
522
+ result[:lock_count] = lock_keys.size
523
+
524
+ runnable_count = Resque.redis.scard(runnables_key)
525
+ result[:runnable_count] = runnable_count
526
+
527
+ return result
528
+
529
+ end
530
+
531
+ end
532
+
533
+ # Allows users to subclass instead of extending in their job classes
534
+ class ConcurrentRestrictionJob
535
+ extend ConcurrentRestriction
536
+ end
537
+
538
+ end
539
+ end
@@ -1,7 +1,7 @@
1
- module Resque
2
- module Plugins
3
- module ConcurrentRestriction
4
- VERSION = "0.6.0"
5
- end
6
- end
7
- end
1
+ module Resque
2
+ module Plugins
3
+ module ConcurrentRestriction
4
+ VERSION = "0.6.1"
5
+ end
6
+ end
7
+ end
@@ -1,30 +1,30 @@
1
- # -*- encoding: utf-8 -*-
2
- $:.push File.expand_path("../lib", __FILE__)
3
- require 'resque/plugins/concurrent_restriction/version'
4
-
5
-
6
- Gem::Specification.new do |s|
7
- s.name = "resque-concurrent-restriction"
8
- s.version = Resque::Plugins::ConcurrentRestriction::VERSION
9
- s.platform = Gem::Platform::RUBY
10
- s.authors = ["Matt Conway"]
11
- s.email = ["matt@conwaysplace.com"]
12
- s.homepage = "http://github.com/wr0ngway/resque-concurrent-restriction"
13
- s.summary = %q{A resque plugin for limiting how many of a specific job can run concurrently}
14
- s.description = %q{A resque plugin for limiting how many of a specific job can run concurrently}
15
-
16
- s.rubyforge_project = "resque-concurrent-restriction"
17
-
18
- s.files = `git ls-files`.split("\n")
19
- s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
20
- s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
21
- s.require_paths = ["lib"]
22
-
23
- s.add_dependency("resque", '~> 1.25')
24
-
25
- s.add_development_dependency('rspec', '~> 2.5')
26
- s.add_development_dependency('awesome_print')
27
-
28
- # Needed for testing newer resque on ruby 1.8.7
29
- s.add_development_dependency('json')
30
- end
1
+ # -*- encoding: utf-8 -*-
2
+ $:.push File.expand_path("../lib", __FILE__)
3
+ require 'resque/plugins/concurrent_restriction/version'
4
+
5
+ Gem::Specification.new do |s|
6
+ s.name = "resque-concurrent-restriction"
7
+ s.version = Resque::Plugins::ConcurrentRestriction::VERSION
8
+ s.platform = Gem::Platform::RUBY
9
+ s.authors = ["Matt Conway"]
10
+ s.email = ["matt@conwaysplace.com"]
11
+ s.homepage = "http://github.com/wr0ngway/resque-concurrent-restriction"
12
+ s.summary = %q{A resque plugin for limiting how many of a specific job can run concurrently}
13
+ s.description = %q{A resque plugin for limiting how many of a specific job can run concurrently}
14
+
15
+ s.rubyforge_project = "resque-concurrent-restriction"
16
+
17
+ s.files = `git ls-files`.split("\n")
18
+ s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
19
+ s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
20
+ s.require_paths = ["lib"]
21
+
22
+ s.add_dependency("resque", '~> 1.25')
23
+ s.add_dependency("activesupport", '~> 3.2')
24
+
25
+ s.add_development_dependency('rspec', '~> 2.5')
26
+ s.add_development_dependency('awesome_print')
27
+
28
+ # Needed for testing newer resque on ruby 1.8.7
29
+ s.add_development_dependency('json')
30
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: resque-concurrent-restriction
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.0
4
+ version: 0.6.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Matt Conway
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-02-25 00:00:00.000000000 Z
11
+ date: 2016-02-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: resque
@@ -24,6 +24,20 @@ dependencies:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
26
  version: '1.25'
27
+ - !ruby/object:Gem::Dependency
28
+ name: activesupport
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: '3.2'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: '3.2'
27
41
  - !ruby/object:Gem::Dependency
28
42
  name: rspec
29
43
  requirement: !ruby/object:Gem::Requirement