resque-concurrent-restriction 0.5.8 → 0.5.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +6 -0
- data/lib/resque/plugins/concurrent_restriction/concurrent_restriction_job.rb +8 -3
- data/lib/resque/plugins/concurrent_restriction/resque_worker_extension.rb +14 -10
- data/lib/resque/plugins/concurrent_restriction/version.rb +1 -1
- data/spec/concurrent_restriction_job_spec.rb +15 -5
- data/spec/resque_worker_extensions_spec.rb +15 -0
- metadata +3 -3
data/CHANGELOG
CHANGED
@@ -1,3 +1,9 @@
|
|
1
|
+
0.5.9 (06/04/2013)
|
2
|
+
------------------
|
3
|
+
|
4
|
+
Lower slice-size used during "reset_restrictions" from 100K to 10K (it was causing segfaults when testing) <b9a7b57>
|
5
|
+
Merge pull request #5 from jzaleski/stash_restrictions_more_aggressively <a0c051f>
|
6
|
+
|
1
7
|
0.5.8 (06/07/2012)
|
2
8
|
------------------
|
3
9
|
|
@@ -7,6 +7,10 @@
|
|
7
7
|
# # How many times to try to get a lock before giving up
|
8
8
|
# # Worker stays busy for: 2^tries * rand(100) * 0.001 (~30s-3000s)
|
9
9
|
# config.lock_tries = 10
|
10
|
+
# # Try this many times to reserve a job from a queue. Also, the maximum
|
11
|
+
# # number of jobs to move to the restricted queue during this process
|
12
|
+
# # before giving up
|
13
|
+
# config.reserve_queued_job_attempts = 10
|
10
14
|
# # Try to pick jobs off of the restricted queue before normal queues
|
11
15
|
# config.restricted_before_queued = true
|
12
16
|
# end
|
@@ -18,12 +22,13 @@ module Resque
|
|
18
22
|
# Allows configuring via class accessors
|
19
23
|
class << self
|
20
24
|
# optional
|
21
|
-
attr_accessor :lock_timeout, :lock_tries, :restricted_before_queued
|
25
|
+
attr_accessor :lock_timeout, :lock_tries, :reserve_queued_job_attempts, :restricted_before_queued
|
22
26
|
end
|
23
27
|
|
24
28
|
# default values
|
25
29
|
self.lock_timeout = 60
|
26
30
|
self.lock_tries = 15
|
31
|
+
self.reserve_queued_job_attempts = 1
|
27
32
|
self.restricted_before_queued = false
|
28
33
|
|
29
34
|
# Allows configuring via class accessors
|
@@ -443,14 +448,14 @@ module Resque
|
|
443
448
|
counts_reset = 0
|
444
449
|
count_keys = Resque.redis.keys("concurrent.count.*")
|
445
450
|
if count_keys.size > 0
|
446
|
-
count_keys.each_slice(
|
451
|
+
count_keys.each_slice(10000) do |key_slice|
|
447
452
|
counts_reset += Resque.redis.del(*key_slice)
|
448
453
|
end
|
449
454
|
end
|
450
455
|
|
451
456
|
runnable_keys = Resque.redis.keys("concurrent.runnable*")
|
452
457
|
if runnable_keys.size > 0
|
453
|
-
runnable_keys.each_slice(
|
458
|
+
runnable_keys.each_slice(10000) do |runnable_slice|
|
454
459
|
Resque.redis.del(*runnable_slice)
|
455
460
|
end
|
456
461
|
end
|
@@ -67,23 +67,27 @@ module Resque
|
|
67
67
|
end
|
68
68
|
|
69
69
|
def get_queued_job(queue)
|
70
|
-
|
70
|
+
# Bounded retry
|
71
|
+
1.upto(ConcurrentRestriction.reserve_queued_job_attempts) do |i|
|
72
|
+
resque_job = reserve_without_restriction(queue)
|
71
73
|
|
72
|
-
|
73
|
-
|
74
|
+
# Short-curcuit if a job was not found
|
75
|
+
return if resque_job.nil?
|
74
76
|
|
77
|
+
# If there is a job on regular queues, then only run it if its not restricted
|
75
78
|
job_class = resque_job.payload_class
|
76
79
|
job_args = resque_job.args
|
77
80
|
|
78
|
-
#
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
81
|
+
# Return to work on job if not a restricted job
|
82
|
+
return resque_job unless job_class.is_a?(ConcurrentRestriction)
|
83
|
+
|
84
|
+
# Keep trying if job is restricted. If job is runnable, we keep the lock until
|
85
|
+
# done_working
|
86
|
+
return resque_job unless job_class.stash_if_restricted(resque_job)
|
84
87
|
end
|
85
88
|
|
86
|
-
|
89
|
+
# Safety net, here in case we hit the upper bound and there are still queued items
|
90
|
+
return nil
|
87
91
|
end
|
88
92
|
|
89
93
|
end
|
@@ -28,6 +28,16 @@ describe Resque::Plugins::ConcurrentRestriction do
|
|
28
28
|
Resque::Plugins::ConcurrentRestriction.lock_timeout.should == 60
|
29
29
|
end
|
30
30
|
|
31
|
+
it "should allow setting/getting global config for reserve_queued_job_attempts" do
|
32
|
+
Resque::Plugins::ConcurrentRestriction.reserve_queued_job_attempts.should == 1
|
33
|
+
Resque::Plugins::ConcurrentRestriction.configure do |config|
|
34
|
+
config.reserve_queued_job_attempts = 5
|
35
|
+
end
|
36
|
+
Resque::Plugins::ConcurrentRestriction.reserve_queued_job_attempts.should == 5
|
37
|
+
Resque::Plugins::ConcurrentRestriction.reserve_queued_job_attempts = 3
|
38
|
+
Resque::Plugins::ConcurrentRestriction.reserve_queued_job_attempts.should == 3
|
39
|
+
end
|
40
|
+
|
31
41
|
it "should allow setting/getting global config for restricted_before_queued" do
|
32
42
|
Resque::Plugins::ConcurrentRestriction.restricted_before_queued.should == false
|
33
43
|
Resque::Plugins::ConcurrentRestriction.configure do |config|
|
@@ -591,13 +601,13 @@ describe Resque::Plugins::ConcurrentRestriction do
|
|
591
601
|
# It might be better to actually populate redis with a bunch keys but that makes the test pretty slow
|
592
602
|
|
593
603
|
# we have to keep this splat limitation in mind when populating test data, too
|
594
|
-
concurrent_count_keys =
|
595
|
-
concurrent_count_keys.each_slice(
|
604
|
+
concurrent_count_keys = 20001.times.collect{ |i| ["concurrent.count.#{i}", "#{i}"] }.flatten
|
605
|
+
concurrent_count_keys.each_slice(10000) do |slice|
|
596
606
|
Resque.redis.mset *slice
|
597
607
|
end
|
598
608
|
|
599
|
-
concurrent_runnable_keys =
|
600
|
-
concurrent_runnable_keys.each_slice(
|
609
|
+
concurrent_runnable_keys = 20001.times.collect{ |i| ["concurrent.runnable.#{i}", "#{i}"] }.flatten
|
610
|
+
concurrent_runnable_keys.each_slice(10000) do |slice|
|
601
611
|
Resque.redis.mset *slice
|
602
612
|
end
|
603
613
|
|
@@ -605,7 +615,7 @@ describe Resque::Plugins::ConcurrentRestriction do
|
|
605
615
|
|
606
616
|
lambda{ return_value = ConcurrentRestrictionJob.reset_restrictions }.should_not raise_exception
|
607
617
|
|
608
|
-
return_value.should == [
|
618
|
+
return_value.should == [20001, 0]
|
609
619
|
end
|
610
620
|
|
611
621
|
end
|
@@ -259,4 +259,19 @@ describe Resque::Plugins::ConcurrentRestriction::Worker do
|
|
259
259
|
RestrictionJob.total_run_count.should == 3
|
260
260
|
end
|
261
261
|
|
262
|
+
it "should move multiple items to the restricted queue each iteration" do
|
263
|
+
RestrictionJob.set_running_count(RestrictionJob.tracking_key, 99)
|
264
|
+
|
265
|
+
5.times {|i| Resque.enqueue(RestrictionJob, :queue => :normal)}
|
266
|
+
Resque.size(:normal).should == 5
|
267
|
+
RestrictionJob.restriction_queue(RestrictionJob.tracking_key, :normal).size.should == 0
|
268
|
+
|
269
|
+
Resque::Plugins::ConcurrentRestriction.reserve_queued_job_attempts = 3
|
270
|
+
|
271
|
+
run_resque_queue(:normal)
|
272
|
+
RestrictionJob.run_count.should == 0
|
273
|
+
Resque.size(:normal).should == 2
|
274
|
+
RestrictionJob.restriction_queue(RestrictionJob.tracking_key, :normal).size.should == 3
|
275
|
+
end
|
276
|
+
|
262
277
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: resque-concurrent-restriction
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.5.
|
4
|
+
version: 0.5.9
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2013-06-04 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: resque
|
@@ -135,7 +135,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
135
135
|
version: '0'
|
136
136
|
requirements: []
|
137
137
|
rubyforge_project: resque-concurrent-restriction
|
138
|
-
rubygems_version: 1.8.
|
138
|
+
rubygems_version: 1.8.25
|
139
139
|
signing_key:
|
140
140
|
specification_version: 3
|
141
141
|
summary: A resque plugin for limiting how many of a specific job can run concurrently
|