chore-core 1.10.0 → 4.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -13
- data/LICENSE.txt +1 -1
- data/README.md +172 -153
- data/chore-core.gemspec +3 -3
- data/lib/chore.rb +29 -5
- data/lib/chore/cli.rb +22 -4
- data/lib/chore/configuration.rb +1 -1
- data/lib/chore/consumer.rb +54 -12
- data/lib/chore/fetcher.rb +12 -7
- data/lib/chore/hooks.rb +2 -1
- data/lib/chore/job.rb +19 -0
- data/lib/chore/manager.rb +17 -2
- data/lib/chore/publisher.rb +18 -2
- data/lib/chore/queues/filesystem/consumer.rb +126 -64
- data/lib/chore/queues/filesystem/filesystem_queue.rb +19 -0
- data/lib/chore/queues/filesystem/publisher.rb +10 -16
- data/lib/chore/queues/sqs.rb +22 -13
- data/lib/chore/queues/sqs/consumer.rb +64 -51
- data/lib/chore/queues/sqs/publisher.rb +26 -17
- data/lib/chore/strategies/consumer/batcher.rb +6 -6
- data/lib/chore/strategies/consumer/single_consumer_strategy.rb +5 -5
- data/lib/chore/strategies/consumer/threaded_consumer_strategy.rb +7 -6
- data/lib/chore/strategies/consumer/throttled_consumer_strategy.rb +120 -0
- data/lib/chore/strategies/worker/forked_worker_strategy.rb +5 -6
- data/lib/chore/strategies/worker/helpers/ipc.rb +87 -0
- data/lib/chore/strategies/worker/helpers/preforked_worker.rb +163 -0
- data/lib/chore/strategies/worker/helpers/work_distributor.rb +65 -0
- data/lib/chore/strategies/worker/helpers/worker_info.rb +13 -0
- data/lib/chore/strategies/worker/helpers/worker_killer.rb +40 -0
- data/lib/chore/strategies/worker/helpers/worker_manager.rb +183 -0
- data/lib/chore/strategies/worker/preforked_worker_strategy.rb +150 -0
- data/lib/chore/unit_of_work.rb +2 -1
- data/lib/chore/util.rb +5 -1
- data/lib/chore/version.rb +2 -2
- data/lib/chore/worker.rb +30 -3
- data/spec/chore/cli_spec.rb +2 -2
- data/spec/chore/consumer_spec.rb +1 -5
- data/spec/chore/duplicate_detector_spec.rb +17 -5
- data/spec/chore/fetcher_spec.rb +0 -11
- data/spec/chore/manager_spec.rb +7 -0
- data/spec/chore/queues/filesystem/filesystem_consumer_spec.rb +74 -16
- data/spec/chore/queues/sqs/consumer_spec.rb +117 -78
- data/spec/chore/queues/sqs/publisher_spec.rb +49 -60
- data/spec/chore/queues/sqs_spec.rb +32 -41
- data/spec/chore/strategies/consumer/single_consumer_strategy_spec.rb +3 -3
- data/spec/chore/strategies/consumer/threaded_consumer_strategy_spec.rb +6 -6
- data/spec/chore/strategies/consumer/throttled_consumer_strategy_spec.rb +165 -0
- data/spec/chore/strategies/worker/forked_worker_strategy_spec.rb +6 -1
- data/spec/chore/strategies/worker/helpers/ipc_spec.rb +127 -0
- data/spec/chore/strategies/worker/helpers/preforked_worker_spec.rb +236 -0
- data/spec/chore/strategies/worker/helpers/work_distributor_spec.rb +131 -0
- data/spec/chore/strategies/worker/helpers/worker_info_spec.rb +14 -0
- data/spec/chore/strategies/worker/helpers/worker_killer_spec.rb +97 -0
- data/spec/chore/strategies/worker/helpers/worker_manager_spec.rb +304 -0
- data/spec/chore/strategies/worker/preforked_worker_strategy_spec.rb +183 -0
- data/spec/chore/strategies/worker/single_worker_strategy_spec.rb +1 -1
- data/spec/chore/worker_spec.rb +70 -15
- data/spec/spec_helper.rb +1 -1
- data/spec/support/queues/sqs/fake_objects.rb +18 -0
- metadata +53 -29
data/lib/chore/unit_of_work.rb
CHANGED
@@ -2,13 +2,14 @@ module Chore
|
|
2
2
|
# Simple class to hold job processing information.
|
3
3
|
# Has six attributes:
|
4
4
|
# * +:id+ The queue implementation specific identifier for this message.
|
5
|
+
# * +:receipt_handle+ The queue implementation specific identifier for the receipt of this message.
|
5
6
|
# * +:queue_name+ The name of the queue the job came from
|
6
7
|
# * +:queue_timeout+ The time (in seconds) before the job will get re-enqueued if not processed
|
7
8
|
# * +:message+ The actual data of the message.
|
8
9
|
# * +:previous_attempts+ The number of times the work has been attempted previously.
|
9
10
|
# * +:consumer+ The consumer instance used to fetch this message. Most queue implementations won't need access to this, but some (RabbitMQ) will. So we
|
10
11
|
# make sure to pass it along with each message. This instance will be used by the Worker for things like <tt>complete</tt> and </tt>reject</tt>.
|
11
|
-
class UnitOfWork < Struct.new(:id
|
12
|
+
class UnitOfWork < Struct.new(:id, :receipt_handle, :queue_name, :queue_timeout, :message, :previous_attempts, :consumer, :decoded_message, :klass)
|
12
13
|
# The time at which this unit of work was created
|
13
14
|
attr_accessor :created_at
|
14
15
|
|
data/lib/chore/util.rb
CHANGED
@@ -2,7 +2,7 @@ module Chore
|
|
2
2
|
|
3
3
|
# Collection of utilities and helpers used by Chore internally
|
4
4
|
module Util
|
5
|
-
|
5
|
+
|
6
6
|
# To avoid bringing in all of active_support, we implemented constantize here
|
7
7
|
def constantize(camel_cased_word)
|
8
8
|
names = camel_cased_word.split('::')
|
@@ -14,5 +14,9 @@ module Chore
|
|
14
14
|
end
|
15
15
|
constant
|
16
16
|
end
|
17
|
+
|
18
|
+
def procline(str)
|
19
|
+
$0 = str
|
20
|
+
end
|
17
21
|
end
|
18
22
|
end
|
data/lib/chore/version.rb
CHANGED
data/lib/chore/worker.rb
CHANGED
@@ -42,6 +42,28 @@ module Chore
|
|
42
42
|
@started_at + total_timeout
|
43
43
|
end
|
44
44
|
|
45
|
+
def duplicate_work?(item)
|
46
|
+
# if we've got a duplicate, remove the message from the queue by not actually running and also not reporting any errors
|
47
|
+
payload = options[:payload_handler].payload(item.decoded_message)
|
48
|
+
|
49
|
+
# if we're hitting the custom dedupe key, we want to remove this message from the queue
|
50
|
+
if item.klass.has_dedupe_lambda?
|
51
|
+
dedupe_key = item.klass.dedupe_key(*payload)
|
52
|
+
if dedupe_key.nil? || dedupe_key.strip.empty? # if the dedupe key is nil, don't continue with the rest of the dedupe lambda logic
|
53
|
+
Chore.logger.info { "#{item.klass} dedupe key nil, skipping memcached lookup." }
|
54
|
+
return false
|
55
|
+
end
|
56
|
+
|
57
|
+
if item.consumer.duplicate_message?(dedupe_key, item.klass, item.queue_timeout)
|
58
|
+
Chore.logger.info { "Found and deleted duplicate job #{item.klass}"}
|
59
|
+
item.consumer.complete(item.id, item.receipt_handle)
|
60
|
+
return true
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
return false
|
65
|
+
end
|
66
|
+
|
45
67
|
# The workhorse. Do the work, all of it. This will block for an entirely unspecified amount
|
46
68
|
# of time based on the work to be performed. This will:
|
47
69
|
# * Decode each message.
|
@@ -58,12 +80,16 @@ module Chore
|
|
58
80
|
begin
|
59
81
|
item.decoded_message = options[:payload_handler].decode(item.message)
|
60
82
|
item.klass = options[:payload_handler].payload_class(item.decoded_message)
|
83
|
+
|
84
|
+
next if duplicate_work?(item)
|
85
|
+
|
86
|
+
Chore.run_hooks_for(:worker_to_start, item)
|
61
87
|
start_item(item)
|
62
88
|
rescue => e
|
63
89
|
Chore.logger.error { "Failed to run job for #{item.message} with error: #{e.message} #{e.backtrace * "\n"}" }
|
64
90
|
if item.current_attempt >= Chore.config.max_attempts
|
65
91
|
Chore.run_hooks_for(:on_permanent_failure,item.queue_name,item.message,e)
|
66
|
-
item.consumer.complete(item.id)
|
92
|
+
item.consumer.complete(item.id, item.receipt_handle)
|
67
93
|
else
|
68
94
|
Chore.run_hooks_for(:on_failure,item.message,e)
|
69
95
|
item.consumer.reject(item.id)
|
@@ -86,9 +112,10 @@ module Chore
|
|
86
112
|
begin
|
87
113
|
Chore.logger.info { "Running job #{klass} with params #{message}"}
|
88
114
|
perform_job(klass,message)
|
89
|
-
item.consumer.complete(item.id)
|
115
|
+
item.consumer.complete(item.id, item.receipt_handle)
|
90
116
|
Chore.logger.info { "Finished job #{klass} with params #{message}"}
|
91
117
|
klass.run_hooks_for(:after_perform, message)
|
118
|
+
Chore.run_hooks_for(:worker_ended, item)
|
92
119
|
rescue Job::RejectMessageException
|
93
120
|
item.consumer.reject(item.id)
|
94
121
|
Chore.logger.error { "Failed to run job for #{item.message} with error: Job raised a RejectMessageException" }
|
@@ -114,7 +141,7 @@ module Chore
|
|
114
141
|
Chore.logger.error { "Failed to run job #{item.message} with error: #{e.message} at #{e.backtrace * "\n"}" }
|
115
142
|
if item.current_attempt >= klass.options[:max_attempts]
|
116
143
|
klass.run_hooks_for(:on_permanent_failure,item.queue_name,message,e)
|
117
|
-
item.consumer.complete(item.id)
|
144
|
+
item.consumer.complete(item.id, item.receipt_handle)
|
118
145
|
else
|
119
146
|
klass.run_hooks_for(:on_failure, message, e)
|
120
147
|
item.consumer.reject(item.id)
|
data/spec/chore/cli_spec.rb
CHANGED
data/spec/chore/consumer_spec.rb
CHANGED
@@ -22,10 +22,6 @@ describe Chore::Consumer do
|
|
22
22
|
Chore::Consumer.should respond_to :reset_connection!
|
23
23
|
end
|
24
24
|
|
25
|
-
it 'should have a class level cleanup method' do
|
26
|
-
Chore::Consumer.should respond_to :cleanup
|
27
|
-
end
|
28
|
-
|
29
25
|
it 'should not have an implemented consume method' do
|
30
26
|
expect { consumer.consume }.to raise_error(NotImplementedError)
|
31
27
|
end
|
@@ -35,6 +31,6 @@ describe Chore::Consumer do
|
|
35
31
|
end
|
36
32
|
|
37
33
|
it 'should not have an implemented complete method' do
|
38
|
-
expect { consumer.complete(message) }.to raise_error(NotImplementedError)
|
34
|
+
expect { consumer.complete(message, nil) }.to raise_error(NotImplementedError)
|
39
35
|
end
|
40
36
|
end
|
@@ -2,7 +2,21 @@ require 'spec_helper'
|
|
2
2
|
require 'securerandom'
|
3
3
|
|
4
4
|
describe Chore::DuplicateDetector do
|
5
|
-
|
5
|
+
class FakeDalli
|
6
|
+
def initialize
|
7
|
+
@store = {}
|
8
|
+
end
|
9
|
+
def add(id, val, ttl=0)
|
10
|
+
if @store[id] && @store[id][:inserted] + @store[id][:ttl] > Time.now.to_i
|
11
|
+
return false
|
12
|
+
else
|
13
|
+
@store[id] = {:val => val, :ttl => ttl, :inserted => Time.now.to_i}
|
14
|
+
return true
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
let(:memcache) { FakeDalli.new }
|
6
20
|
let(:dupe_on_cache_failure) { false }
|
7
21
|
let(:dedupe_params) { { :memcached_client => memcache, :dupe_on_cache_failure => dupe_on_cache_failure } }
|
8
22
|
let(:dedupe) { Chore::DuplicateDetector.new(dedupe_params)}
|
@@ -15,12 +29,11 @@ describe Chore::DuplicateDetector do
|
|
15
29
|
|
16
30
|
describe "#found_duplicate" do
|
17
31
|
it 'should not return true if the message has not already been seen' do
|
18
|
-
expect(memcache).to receive(:add).and_return(true)
|
19
32
|
expect(dedupe.found_duplicate?(message_data)).to_not be true
|
20
33
|
end
|
21
34
|
|
22
35
|
it 'should return true if the message has already been seen' do
|
23
|
-
|
36
|
+
memcache.add(message_data[:id], 1, message_data[:visibility_timeout])
|
24
37
|
expect(dedupe.found_duplicate?(message_data)).to be true
|
25
38
|
end
|
26
39
|
|
@@ -34,13 +47,12 @@ describe Chore::DuplicateDetector do
|
|
34
47
|
end
|
35
48
|
|
36
49
|
it "should set the timeout to be the queue's " do
|
37
|
-
expect(memcache).to receive(:add).with(id,"1",timeout).
|
50
|
+
expect(memcache).to receive(:add).with(id,"1",timeout).and_call_original
|
38
51
|
expect(dedupe.found_duplicate?(message_data)).to be false
|
39
52
|
end
|
40
53
|
|
41
54
|
it "should call #visibility_timeout once and only once" do
|
42
55
|
expect(queue).to receive(:visibility_timeout).once
|
43
|
-
expect(memcache).to receive(:add).at_least(3).times.and_return(true)
|
44
56
|
3.times { dedupe.found_duplicate?(message_data) }
|
45
57
|
end
|
46
58
|
|
data/spec/chore/fetcher_spec.rb
CHANGED
@@ -35,15 +35,4 @@ describe Chore::Fetcher do
|
|
35
35
|
fetcher.start
|
36
36
|
end
|
37
37
|
end
|
38
|
-
|
39
|
-
describe "cleaning up" do
|
40
|
-
before(:each) do
|
41
|
-
manager.stub(:assign)
|
42
|
-
end
|
43
|
-
|
44
|
-
it "should run cleanup on each queue" do
|
45
|
-
consumer.should_receive(:cleanup).with('test')
|
46
|
-
fetcher.start
|
47
|
-
end
|
48
|
-
end
|
49
38
|
end
|
data/spec/chore/manager_spec.rb
CHANGED
@@ -35,6 +35,13 @@ describe Chore::Manager do
|
|
35
35
|
manager.assign(work)
|
36
36
|
end
|
37
37
|
end
|
38
|
+
|
39
|
+
describe 'returning work' do
|
40
|
+
it 'should return work to the fetcher' do
|
41
|
+
expect(fetcher).to receive(:return_work).with([work])
|
42
|
+
manager.return_work([work])
|
43
|
+
end
|
44
|
+
end
|
38
45
|
end
|
39
46
|
|
40
47
|
end
|
@@ -8,10 +8,16 @@ describe Chore::Queues::Filesystem::Consumer do
|
|
8
8
|
let(:publisher) { Chore::Queues::Filesystem::Publisher.new }
|
9
9
|
let(:test_queues_dir) { "test-queues" }
|
10
10
|
let(:test_queue) { "test-queue" }
|
11
|
+
let(:default_timeout) { 60 }
|
12
|
+
let(:timeout) { nil }
|
11
13
|
|
12
14
|
before do
|
13
15
|
Chore.config.fs_queue_root = test_queues_dir
|
14
|
-
|
16
|
+
if timeout
|
17
|
+
File.open("#{config_dir}/timeout", "w") {|f| f << timeout.to_s}
|
18
|
+
else
|
19
|
+
expect(Chore.config).to receive(:default_queue_timeout).and_return(default_timeout)
|
20
|
+
end
|
15
21
|
allow(consumer).to receive(:sleep)
|
16
22
|
end
|
17
23
|
|
@@ -22,35 +28,79 @@ describe Chore::Queues::Filesystem::Consumer do
|
|
22
28
|
let(:test_job_hash) {{:class => "TestClass", :args => "test-args"}}
|
23
29
|
let(:new_dir) { described_class.new_dir(test_queue) }
|
24
30
|
let(:in_progress_dir) { described_class.in_progress_dir(test_queue) }
|
31
|
+
let(:config_dir) { described_class.config_dir(test_queue) }
|
25
32
|
|
26
33
|
describe ".cleanup" do
|
27
|
-
it "should move in_progress jobs to new dir" do
|
34
|
+
it "should move expired in_progress jobs to new dir" do
|
35
|
+
timestamp = Time.now.to_i - 1
|
36
|
+
|
37
|
+
FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
|
38
|
+
described_class.cleanup(Time.now.to_i, new_dir, in_progress_dir)
|
39
|
+
expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
|
40
|
+
end
|
41
|
+
|
42
|
+
it "should move non-timestamped jobs from in_progress_dir to new dir" do
|
28
43
|
FileUtils.touch("#{in_progress_dir}/foo.1.job")
|
29
|
-
described_class.cleanup(
|
44
|
+
described_class.cleanup(Time.now.to_i, new_dir, in_progress_dir)
|
30
45
|
expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
|
31
46
|
end
|
47
|
+
|
48
|
+
it "should not affect non-expired jobs" do
|
49
|
+
timestamp = Time.now.to_i - 1
|
50
|
+
|
51
|
+
FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
|
52
|
+
described_class.cleanup(Time.now.to_i - 2, new_dir, in_progress_dir)
|
53
|
+
expect(File.exist?("#{new_dir}/foo.2.job")).to eq(false)
|
54
|
+
end
|
32
55
|
end
|
33
56
|
|
34
57
|
describe ".make_in_progress" do
|
35
|
-
it "should move job to in_progress dir" do
|
58
|
+
it "should move non-empty job to in_progress dir" do
|
59
|
+
now = Time.now
|
60
|
+
|
61
|
+
Timecop.freeze(now) do
|
62
|
+
File.open("#{new_dir}/foo.1.job", "w") {|f| f << "{}"}
|
63
|
+
described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
|
64
|
+
expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(true)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
it "should not move empty jobs to in_progress dir" do
|
69
|
+
now = Time.now
|
70
|
+
|
71
|
+
Timecop.freeze(now) do
|
72
|
+
FileUtils.touch("#{new_dir}/foo.1.job")
|
73
|
+
described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
|
74
|
+
expect(File.exist?("#{new_dir}/foo.1.job")).to eq(true)
|
75
|
+
expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(false)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
it "should delete expired empty jobs" do
|
36
80
|
FileUtils.touch("#{new_dir}/foo.1.job")
|
37
|
-
|
38
|
-
|
81
|
+
|
82
|
+
now = Time.now + default_timeout
|
83
|
+
Timecop.freeze(now) do
|
84
|
+
described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
|
85
|
+
expect(File.exist?("#{new_dir}/foo.1.job")).to eq(false)
|
86
|
+
expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(false)
|
87
|
+
end
|
39
88
|
end
|
40
89
|
end
|
41
90
|
|
42
91
|
describe ".make_new_again" do
|
43
92
|
it "should move job to new dir" do
|
44
|
-
|
45
|
-
|
93
|
+
timestamp = Time.now.to_i
|
94
|
+
FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
|
95
|
+
described_class.make_new_again("foo.1.#{timestamp}.job", new_dir, in_progress_dir)
|
46
96
|
expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
|
47
97
|
end
|
48
98
|
end
|
49
99
|
|
50
|
-
describe ".
|
100
|
+
describe ".each_file" do
|
51
101
|
it "should list jobs in dir" do
|
52
102
|
FileUtils.touch("#{new_dir}/foo.1.job")
|
53
|
-
expect
|
103
|
+
expect {|b| described_class.each_file(new_dir, &b) }.to yield_with_args("foo.1.job")
|
54
104
|
end
|
55
105
|
end
|
56
106
|
|
@@ -71,7 +121,7 @@ describe Chore::Queues::Filesystem::Consumer do
|
|
71
121
|
end
|
72
122
|
|
73
123
|
it "should consume a published job and yield the job to the handler block" do
|
74
|
-
expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 0)
|
124
|
+
expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 0)
|
75
125
|
end
|
76
126
|
|
77
127
|
context "rejecting a job" do
|
@@ -85,7 +135,9 @@ describe Chore::Queues::Filesystem::Consumer do
|
|
85
135
|
end
|
86
136
|
expect(rejected).to be true
|
87
137
|
|
88
|
-
|
138
|
+
Timecop.freeze(Time.now + 61) do
|
139
|
+
expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 1)
|
140
|
+
end
|
89
141
|
end
|
90
142
|
end
|
91
143
|
|
@@ -93,16 +145,23 @@ describe Chore::Queues::Filesystem::Consumer do
|
|
93
145
|
let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
|
94
146
|
|
95
147
|
it "should remove job on completion" do
|
96
|
-
|
148
|
+
|
97
149
|
consumer.consume do |job_id, queue_name, job_hash|
|
150
|
+
expect(File).to receive(:delete).with(kind_of(String))
|
98
151
|
consumer.complete(job_id)
|
99
|
-
completed = true
|
100
152
|
end
|
101
|
-
expect(completed).to be true
|
102
153
|
|
103
154
|
expect { |b| consumer.consume(&b) }.to_not yield_control
|
104
155
|
end
|
105
156
|
end
|
157
|
+
|
158
|
+
context "with queue-specific timeout config" do
|
159
|
+
let(:timeout) { 30 }
|
160
|
+
|
161
|
+
it "should consume a published job and yield the job to the handler block" do
|
162
|
+
expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 30, test_job_hash.to_json, 0)
|
163
|
+
end
|
164
|
+
end
|
106
165
|
end
|
107
166
|
|
108
167
|
context "not finding a published job" do
|
@@ -112,4 +171,3 @@ describe Chore::Queues::Filesystem::Consumer do
|
|
112
171
|
end
|
113
172
|
end
|
114
173
|
end
|
115
|
-
|
@@ -1,133 +1,170 @@
|
|
1
1
|
require 'spec_helper'
|
2
2
|
|
3
3
|
describe Chore::Queues::SQS::Consumer do
|
4
|
-
|
5
|
-
|
6
|
-
let(:queues) { double("queues") }
|
7
|
-
let(:queue) { double("test_queue", :visibility_timeout=>10, :url=>"test_queue", :name=>"test_queue") }
|
4
|
+
include_context 'fake objects'
|
5
|
+
|
8
6
|
let(:options) { {} }
|
9
7
|
let(:consumer) { Chore::Queues::SQS::Consumer.new(queue_name) }
|
10
|
-
let(:
|
11
|
-
let(:
|
12
|
-
|
13
|
-
let(:
|
14
|
-
|
8
|
+
let(:job) { {'class' => 'TestJob', 'args'=>[1,2,'3']} }
|
9
|
+
let(:backoff_func) { Proc.new { 2 + 2 } }
|
10
|
+
|
11
|
+
let(:receive_message_result) { Aws::SQS::Message::Collection.new([message], size: 1) }
|
12
|
+
|
13
|
+
let(:message) do
|
14
|
+
Aws::SQS::Message.new(
|
15
|
+
message_id: 'message id',
|
16
|
+
receipt_handle: "receipt_handle",
|
17
|
+
body: job.to_json,
|
18
|
+
data: job,
|
19
|
+
queue: queue,
|
20
|
+
queue_url: queue_url,
|
21
|
+
)
|
22
|
+
end
|
15
23
|
|
16
|
-
|
17
|
-
|
18
|
-
|
24
|
+
# Since a message handler is required (but not validated), this convenience method lets us
|
25
|
+
# effectively stub the block.
|
26
|
+
def consume(&block)
|
27
|
+
block = Proc.new{} unless block_given?
|
28
|
+
consumer.consume(&block)
|
29
|
+
end
|
19
30
|
|
20
|
-
|
21
|
-
allow(
|
22
|
-
allow(
|
23
|
-
allow(
|
31
|
+
before do
|
32
|
+
allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
|
33
|
+
allow(Aws::SQS::Queue).to receive(:new).and_return(queue)
|
34
|
+
allow(queue).to receive(:receive_messages).and_return(receive_message_result)
|
35
|
+
allow(message).to receive(:attributes).and_return({ 'ApproximateReceiveCount' => rand(10) })
|
24
36
|
end
|
25
37
|
|
26
38
|
describe "consuming messages" do
|
27
|
-
|
28
|
-
|
29
|
-
let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message) }
|
30
|
-
|
31
|
-
it 'should configure sqs' do
|
32
|
-
allow(Chore.config).to receive(:aws_access_key).and_return('key')
|
33
|
-
allow(Chore.config).to receive(:aws_secret_key).and_return('secret')
|
34
|
-
|
35
|
-
expect(AWS::SQS).to receive(:new).with(
|
36
|
-
:access_key_id => 'key',
|
37
|
-
:secret_access_key => 'secret',
|
38
|
-
:logger => Chore.logger,
|
39
|
-
:log_level => :debug
|
40
|
-
).and_return(sqs)
|
41
|
-
consumer.consume
|
39
|
+
before do
|
40
|
+
allow(consumer).to receive(:running?).and_return(true, false)
|
42
41
|
end
|
43
42
|
|
44
|
-
|
45
|
-
|
43
|
+
context "should create objects for interacting with the SQS API" do
|
44
|
+
it 'should create an sqs client' do
|
45
|
+
expect(queue).to receive(:receive_messages)
|
46
|
+
consume
|
47
|
+
end
|
46
48
|
|
47
|
-
|
48
|
-
|
49
|
-
|
49
|
+
it "should only create an sqs client when one doesn't exist" do
|
50
|
+
allow(consumer).to receive(:running?).and_return(true, true, true, true, false, true, true)
|
51
|
+
expect(Aws::SQS::Client).to receive(:new).exactly(:once)
|
52
|
+
consume
|
53
|
+
end
|
50
54
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
+
it 'should look up the queue url based on the queue name' do
|
56
|
+
expect(sqs).to receive(:get_queue_url).with(queue_name: queue_name)
|
57
|
+
consume
|
58
|
+
end
|
55
59
|
|
56
|
-
|
57
|
-
|
58
|
-
|
60
|
+
it 'should create a queue object' do
|
61
|
+
expect(consumer.send(:queue)).to_not be_nil
|
62
|
+
consume
|
63
|
+
end
|
59
64
|
end
|
60
65
|
|
61
66
|
context "should receive a message from the queue" do
|
62
|
-
|
63
67
|
it 'should use the default size of 10 when no queue_polling_size is specified' do
|
64
|
-
expect(queue).to receive(:receive_messages).with(
|
65
|
-
|
68
|
+
expect(queue).to receive(:receive_messages).with(
|
69
|
+
:max_number_of_messages => 10,
|
70
|
+
:attribute_names => ['ApproximateReceiveCount']
|
71
|
+
).and_return(message)
|
72
|
+
consume
|
66
73
|
end
|
67
74
|
|
68
75
|
it 'should respect the queue_polling_size when specified' do
|
69
76
|
allow(Chore.config).to receive(:queue_polling_size).and_return(5)
|
70
|
-
expect(queue).to receive(:receive_messages).with(
|
71
|
-
|
77
|
+
expect(queue).to receive(:receive_messages).with(
|
78
|
+
:max_number_of_messages => 5,
|
79
|
+
:attribute_names => ['ApproximateReceiveCount']
|
80
|
+
)
|
81
|
+
consume
|
72
82
|
end
|
73
83
|
end
|
74
84
|
|
75
|
-
it "should check the uniqueness of the message" do
|
76
|
-
allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(false)
|
77
|
-
consumer.consume
|
78
|
-
end
|
79
|
-
|
80
|
-
it "should yield the message to the handler block" do
|
81
|
-
expect { |b| consumer.consume(&b) }.to yield_with_args('handle', queue_name, 10, 'message body', 0)
|
82
|
-
end
|
83
|
-
|
84
|
-
it 'should not yield for a dupe message' do
|
85
|
-
allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(true)
|
86
|
-
expect {|b| consumer.consume(&b) }.not_to yield_control
|
87
|
-
end
|
88
|
-
|
89
85
|
context 'with no messages' do
|
90
|
-
|
91
|
-
|
86
|
+
before do
|
87
|
+
allow(consumer).to receive(:handle_messages).and_return([])
|
88
|
+
end
|
92
89
|
|
93
90
|
it 'should sleep' do
|
94
91
|
expect(consumer).to receive(:sleep).with(1)
|
95
|
-
|
92
|
+
consume
|
96
93
|
end
|
97
94
|
end
|
98
95
|
|
99
96
|
context 'with messages' do
|
100
|
-
|
101
|
-
|
97
|
+
before do
|
98
|
+
allow(consumer).to receive(:duplicate_message?).and_return(false)
|
99
|
+
allow(queue).to receive(:receive_messages).and_return(message)
|
100
|
+
end
|
101
|
+
|
102
|
+
it "should check the uniqueness of the message" do
|
103
|
+
expect(consumer).to receive(:duplicate_message?)
|
104
|
+
consume
|
105
|
+
end
|
106
|
+
|
107
|
+
it "should yield the message to the handler block" do
|
108
|
+
expect { |b| consume(&b) }
|
109
|
+
.to yield_with_args(
|
110
|
+
message.message_id,
|
111
|
+
message.receipt_handle,
|
112
|
+
queue_name,
|
113
|
+
queue.attributes['VisibilityTimeout'].to_i,
|
114
|
+
message.body,
|
115
|
+
message.attributes['ApproximateReceiveCount'].to_i - 1
|
116
|
+
)
|
117
|
+
end
|
102
118
|
|
103
119
|
it 'should not sleep' do
|
104
120
|
expect(consumer).to_not receive(:sleep)
|
105
|
-
|
121
|
+
consume
|
106
122
|
end
|
123
|
+
|
124
|
+
context 'with duplicates' do
|
125
|
+
before do
|
126
|
+
allow(consumer).to receive(:duplicate_message?).and_return(true)
|
127
|
+
end
|
128
|
+
|
129
|
+
it 'should not yield for a dupe message' do
|
130
|
+
expect {|b| consume(&b) }.not_to yield_control
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
describe "completing work" do
|
137
|
+
it 'deletes the message from the queue' do
|
138
|
+
expect(queue).to receive(:delete_messages).with(entries: [{id: message.message_id, receipt_handle: message.receipt_handle}])
|
139
|
+
consumer.complete(message.message_id, message.receipt_handle)
|
107
140
|
end
|
108
141
|
end
|
109
142
|
|
110
143
|
describe '#delay' do
|
111
|
-
let(:item) { Chore::UnitOfWork.new(message.
|
112
|
-
let(:
|
144
|
+
let(:item) { Chore::UnitOfWork.new(message.message_id, message.receipt_handle, message.queue, 60, message.body, 0, consumer) }
|
145
|
+
let(:entries) do
|
146
|
+
[
|
147
|
+
{ id: item.id, receipt_handle: item.receipt_handle, visibility_timeout: backoff_func.call(item) },
|
148
|
+
]
|
149
|
+
end
|
113
150
|
|
114
151
|
it 'changes the visiblity of the message' do
|
115
|
-
expect(queue).to receive(:
|
152
|
+
expect(queue).to receive(:change_message_visibility_batch).with(entries: entries)
|
116
153
|
consumer.delay(item, backoff_func)
|
117
154
|
end
|
118
155
|
end
|
119
156
|
|
120
157
|
describe '#reset_connection!' do
|
121
158
|
it 'should reset the connection after a call to reset_connection!' do
|
122
|
-
expect(
|
123
|
-
expect(pool).to receive(:empty!)
|
159
|
+
expect(Aws).to receive(:empty_connection_pools!)
|
124
160
|
Chore::Queues::SQS::Consumer.reset_connection!
|
125
161
|
consumer.send(:queue)
|
126
162
|
end
|
127
163
|
|
128
164
|
it 'should not reset the connection between calls' do
|
129
|
-
|
130
|
-
|
165
|
+
expect(Aws).to receive(:empty_connection_pools!).once
|
166
|
+
q = consumer.send(:queue)
|
167
|
+
expect(consumer.send(:queue)).to be(q)
|
131
168
|
end
|
132
169
|
|
133
170
|
it 'should reconfigure sqs' do
|
@@ -135,13 +172,15 @@ describe Chore::Queues::SQS::Consumer do
|
|
135
172
|
allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).and_return(false)
|
136
173
|
|
137
174
|
allow(queue).to receive(:receive_messages).and_return(message)
|
138
|
-
|
175
|
+
allow(sqs).to receive(:receive_message).with({:attribute_names=>["ApproximateReceiveCount"], :max_number_of_messages=>10, :queue_url=>queue_url})
|
176
|
+
|
177
|
+
consume
|
139
178
|
|
140
179
|
Chore::Queues::SQS::Consumer.reset_connection!
|
141
|
-
allow(
|
180
|
+
allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
|
142
181
|
|
143
182
|
expect(consumer).to receive(:running?).and_return(true, false)
|
144
|
-
|
183
|
+
consume
|
145
184
|
end
|
146
185
|
end
|
147
186
|
end
|