resque-concurrent-restriction 0.5.5 → 0.5.6
Sign up to get free protection for your applications and to get access to all the features.
@@ -4,6 +4,9 @@
|
|
4
4
|
# Resque::Plugins::ConcurrentRestriction.configure do |config|
|
5
5
|
# # The lock timeout for the restriction queue lock
|
6
6
|
# config.lock_timeout = 60
|
7
|
+
# # How many times to try to get a lock before giving up
|
8
|
+
# # Worker stays busy for: 2^tries * rand(100) * 0.001 (~30s-3000s)
|
9
|
+
# config.lock_tries = 10
|
7
10
|
# # Try to pick jobs off of the restricted queue before normal queues
|
8
11
|
# config.restricted_before_queued = true
|
9
12
|
# end
|
@@ -15,11 +18,12 @@ module Resque
|
|
15
18
|
# Allows configuring via class accessors
|
16
19
|
class << self
|
17
20
|
# optional
|
18
|
-
attr_accessor :lock_timeout, :restricted_before_queued
|
21
|
+
attr_accessor :lock_timeout, :lock_tries, :restricted_before_queued
|
19
22
|
end
|
20
23
|
|
21
24
|
# default values
|
22
25
|
self.lock_timeout = 60
|
26
|
+
self.lock_tries = 15
|
23
27
|
self.restricted_before_queued = false
|
24
28
|
|
25
29
|
# Allows configuring via class accessors
|
@@ -341,25 +345,30 @@ module Resque
|
|
341
345
|
|
342
346
|
# Uses a lock to ensure that a sequence of redis operations happen atomically
|
343
347
|
# We don't use watch/multi/exec as it doesn't work in a DistributedRedis setup
|
344
|
-
def run_atomically(lock_key)
|
345
|
-
|
348
|
+
def run_atomically(lock_key, tries=ConcurrentRestriction.lock_tries)
|
349
|
+
acquired_lock = false
|
346
350
|
exp_backoff = 1
|
347
351
|
|
348
|
-
|
352
|
+
tries.times do
|
349
353
|
lock_expiration = Time.now.to_i + ConcurrentRestriction.lock_timeout
|
354
|
+
p [Time.now.to_f, Process.pid, :start]
|
350
355
|
if acquire_lock(lock_key, lock_expiration)
|
356
|
+
p [Time.now.to_f, Process.pid, :acquired]
|
357
|
+
acquired_lock = true
|
351
358
|
begin
|
352
359
|
yield
|
360
|
+
p [Time.now.to_f, Process.pid, :end]
|
353
361
|
ensure
|
354
362
|
release_lock(lock_key, lock_expiration)
|
355
363
|
end
|
356
|
-
|
364
|
+
break
|
357
365
|
else
|
358
|
-
sleep(rand(
|
366
|
+
sleep(rand(100) * 0.001 * exp_backoff)
|
359
367
|
exp_backoff *= 2
|
360
368
|
end
|
361
369
|
end
|
362
|
-
|
370
|
+
|
371
|
+
return acquired_lock
|
363
372
|
end
|
364
373
|
|
365
374
|
# Pushes the job to restriction queue if it is restricted
|
@@ -368,11 +377,11 @@ module Resque
|
|
368
377
|
# see a lower value and run their job. This count gets decremented by call
|
369
378
|
# to release_restriction when job completes
|
370
379
|
def stash_if_restricted(job)
|
371
|
-
restricted =
|
380
|
+
restricted = nil
|
372
381
|
tracking_key = tracking_key(*job.args)
|
373
382
|
lock_key = lock_key(tracking_key)
|
374
383
|
|
375
|
-
run_atomically(lock_key) do
|
384
|
+
did_run = run_atomically(lock_key) do
|
376
385
|
|
377
386
|
restricted = restricted?(tracking_key)
|
378
387
|
if restricted
|
@@ -382,7 +391,15 @@ module Resque
|
|
382
391
|
end
|
383
392
|
|
384
393
|
end
|
385
|
-
|
394
|
+
|
395
|
+
# if run_atomically fails to acquire the lock, we need to put
|
396
|
+
# the job back on the queue for processing later and act restricted
|
397
|
+
# upstack so nothing gets run
|
398
|
+
if !did_run
|
399
|
+
restricted = true
|
400
|
+
job.recreate
|
401
|
+
end
|
402
|
+
|
386
403
|
return restricted
|
387
404
|
end
|
388
405
|
|
@@ -132,6 +132,37 @@ describe Resque::Plugins::ConcurrentRestriction do
|
|
132
132
|
t1.join
|
133
133
|
counter.should == "second"
|
134
134
|
end
|
135
|
+
|
136
|
+
it "should fail if can't acquire lock within tries" do
|
137
|
+
did_run1 = did_run2 = false
|
138
|
+
|
139
|
+
t1 = Thread.new do
|
140
|
+
ConcurrentRestrictionJob.run_atomically("some_lock_key") do
|
141
|
+
sleep 0.2
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
sleep 0.1
|
146
|
+
t1.alive?.should == true
|
147
|
+
|
148
|
+
t2 = Thread.new do
|
149
|
+
ConcurrentRestrictionJob.run_atomically("some_lock_key", 2) do
|
150
|
+
did_run1 = true
|
151
|
+
end
|
152
|
+
end
|
153
|
+
t2.join
|
154
|
+
|
155
|
+
t3 = Thread.new do
|
156
|
+
ConcurrentRestrictionJob.run_atomically("some_lock_key", 100) do
|
157
|
+
did_run2 = true
|
158
|
+
end
|
159
|
+
end
|
160
|
+
t3.join
|
161
|
+
|
162
|
+
t1.join
|
163
|
+
did_run1.should be_false
|
164
|
+
did_run2.should be_true
|
165
|
+
end
|
135
166
|
|
136
167
|
end
|
137
168
|
|
@@ -402,6 +433,20 @@ describe Resque::Plugins::ConcurrentRestriction do
|
|
402
433
|
ConcurrentRestrictionJob.runnables("somequeue2").sort.should == [ConcurrentRestrictionJob.tracking_key]
|
403
434
|
|
404
435
|
end
|
436
|
+
|
437
|
+
it "should repush job and return true if it can't acquire a lock" do
|
438
|
+
old = Resque::Plugins::ConcurrentRestriction.lock_tries
|
439
|
+
begin
|
440
|
+
Resque::Plugins::ConcurrentRestriction.lock_tries = 0
|
441
|
+
|
442
|
+
job = Resque::Job.new("somequeue", {"class" => "ConcurrentRestrictionJob", "args" => []})
|
443
|
+
ConcurrentRestrictionJob.stash_if_restricted(job).should == true
|
444
|
+
Resque.peek("somequeue").should == {"class" => "ConcurrentRestrictionJob", "args" => []}
|
445
|
+
ensure
|
446
|
+
Resque::Plugins::ConcurrentRestriction.lock_tries = old
|
447
|
+
end
|
448
|
+
|
449
|
+
end
|
405
450
|
|
406
451
|
end
|
407
452
|
|
@@ -411,6 +456,23 @@ describe Resque::Plugins::ConcurrentRestriction do
|
|
411
456
|
ConcurrentRestrictionJob.next_runnable_job('somequeue').should be_nil
|
412
457
|
end
|
413
458
|
|
459
|
+
it "should return nil and not pop from queue if cannot acquire lock" do
|
460
|
+
job1 = Resque::Job.new("somequeue", {"class" => "ConcurrentRestrictionJob", "args" => []})
|
461
|
+
ConcurrentRestrictionJob.set_running_count(ConcurrentRestrictionJob.tracking_key, 99)
|
462
|
+
ConcurrentRestrictionJob.stash_if_restricted(job1)
|
463
|
+
ConcurrentRestrictionJob.set_running_count(ConcurrentRestrictionJob.tracking_key, 0)
|
464
|
+
|
465
|
+
old = Resque::Plugins::ConcurrentRestriction.lock_tries
|
466
|
+
begin
|
467
|
+
Resque::Plugins::ConcurrentRestriction.lock_tries = 0
|
468
|
+
|
469
|
+
ConcurrentRestrictionJob.next_runnable_job('somequeue').should be_nil
|
470
|
+
ConcurrentRestrictionJob.restriction_queue(ConcurrentRestrictionJob.tracking_key, "somequeue").should == [job1]
|
471
|
+
ensure
|
472
|
+
Resque::Plugins::ConcurrentRestriction.lock_tries = old
|
473
|
+
end
|
474
|
+
end
|
475
|
+
|
414
476
|
it "should not get a job if nothing runnable" do
|
415
477
|
job1 = Resque::Job.new("somequeue", {"class" => "ConcurrentRestrictionJob", "args" => []})
|
416
478
|
ConcurrentRestrictionJob.set_running_count(ConcurrentRestrictionJob.tracking_key, 99)
|
@@ -467,6 +529,21 @@ describe Resque::Plugins::ConcurrentRestriction do
|
|
467
529
|
ConcurrentRestrictionJob.running_count(ConcurrentRestrictionJob.tracking_key).should == 0
|
468
530
|
end
|
469
531
|
|
532
|
+
it "should do nothing if cannot acquire lock" do
|
533
|
+
ConcurrentRestrictionJob.set_running_count(ConcurrentRestrictionJob.tracking_key, 1)
|
534
|
+
job = Resque::Job.new("somequeue", {"class" => "ConcurrentRestrictionJob", "args" => []})
|
535
|
+
|
536
|
+
old = Resque::Plugins::ConcurrentRestriction.lock_tries
|
537
|
+
begin
|
538
|
+
Resque::Plugins::ConcurrentRestriction.lock_tries = 0
|
539
|
+
|
540
|
+
ConcurrentRestrictionJob.release_restriction(job)
|
541
|
+
ConcurrentRestrictionJob.running_count(ConcurrentRestrictionJob.tracking_key).should == 1
|
542
|
+
ensure
|
543
|
+
Resque::Plugins::ConcurrentRestriction.lock_tries = old
|
544
|
+
end
|
545
|
+
end
|
546
|
+
|
470
547
|
end
|
471
548
|
|
472
549
|
context "#reset_restrictions" do
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: resque-concurrent-restriction
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.5.
|
4
|
+
version: 0.5.6
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2012-04-
|
12
|
+
date: 2012-04-24 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: resque
|