chore-core 1.10.0 → 4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (60) hide show
  1. checksums.yaml +5 -13
  2. data/LICENSE.txt +1 -1
  3. data/README.md +172 -153
  4. data/chore-core.gemspec +3 -3
  5. data/lib/chore.rb +29 -5
  6. data/lib/chore/cli.rb +22 -4
  7. data/lib/chore/configuration.rb +1 -1
  8. data/lib/chore/consumer.rb +54 -12
  9. data/lib/chore/fetcher.rb +12 -7
  10. data/lib/chore/hooks.rb +2 -1
  11. data/lib/chore/job.rb +19 -0
  12. data/lib/chore/manager.rb +17 -2
  13. data/lib/chore/publisher.rb +18 -2
  14. data/lib/chore/queues/filesystem/consumer.rb +126 -64
  15. data/lib/chore/queues/filesystem/filesystem_queue.rb +19 -0
  16. data/lib/chore/queues/filesystem/publisher.rb +10 -16
  17. data/lib/chore/queues/sqs.rb +22 -13
  18. data/lib/chore/queues/sqs/consumer.rb +64 -51
  19. data/lib/chore/queues/sqs/publisher.rb +26 -17
  20. data/lib/chore/strategies/consumer/batcher.rb +6 -6
  21. data/lib/chore/strategies/consumer/single_consumer_strategy.rb +5 -5
  22. data/lib/chore/strategies/consumer/threaded_consumer_strategy.rb +7 -6
  23. data/lib/chore/strategies/consumer/throttled_consumer_strategy.rb +120 -0
  24. data/lib/chore/strategies/worker/forked_worker_strategy.rb +5 -6
  25. data/lib/chore/strategies/worker/helpers/ipc.rb +87 -0
  26. data/lib/chore/strategies/worker/helpers/preforked_worker.rb +163 -0
  27. data/lib/chore/strategies/worker/helpers/work_distributor.rb +65 -0
  28. data/lib/chore/strategies/worker/helpers/worker_info.rb +13 -0
  29. data/lib/chore/strategies/worker/helpers/worker_killer.rb +40 -0
  30. data/lib/chore/strategies/worker/helpers/worker_manager.rb +183 -0
  31. data/lib/chore/strategies/worker/preforked_worker_strategy.rb +150 -0
  32. data/lib/chore/unit_of_work.rb +2 -1
  33. data/lib/chore/util.rb +5 -1
  34. data/lib/chore/version.rb +2 -2
  35. data/lib/chore/worker.rb +30 -3
  36. data/spec/chore/cli_spec.rb +2 -2
  37. data/spec/chore/consumer_spec.rb +1 -5
  38. data/spec/chore/duplicate_detector_spec.rb +17 -5
  39. data/spec/chore/fetcher_spec.rb +0 -11
  40. data/spec/chore/manager_spec.rb +7 -0
  41. data/spec/chore/queues/filesystem/filesystem_consumer_spec.rb +74 -16
  42. data/spec/chore/queues/sqs/consumer_spec.rb +117 -78
  43. data/spec/chore/queues/sqs/publisher_spec.rb +49 -60
  44. data/spec/chore/queues/sqs_spec.rb +32 -41
  45. data/spec/chore/strategies/consumer/single_consumer_strategy_spec.rb +3 -3
  46. data/spec/chore/strategies/consumer/threaded_consumer_strategy_spec.rb +6 -6
  47. data/spec/chore/strategies/consumer/throttled_consumer_strategy_spec.rb +165 -0
  48. data/spec/chore/strategies/worker/forked_worker_strategy_spec.rb +6 -1
  49. data/spec/chore/strategies/worker/helpers/ipc_spec.rb +127 -0
  50. data/spec/chore/strategies/worker/helpers/preforked_worker_spec.rb +236 -0
  51. data/spec/chore/strategies/worker/helpers/work_distributor_spec.rb +131 -0
  52. data/spec/chore/strategies/worker/helpers/worker_info_spec.rb +14 -0
  53. data/spec/chore/strategies/worker/helpers/worker_killer_spec.rb +97 -0
  54. data/spec/chore/strategies/worker/helpers/worker_manager_spec.rb +304 -0
  55. data/spec/chore/strategies/worker/preforked_worker_strategy_spec.rb +183 -0
  56. data/spec/chore/strategies/worker/single_worker_strategy_spec.rb +1 -1
  57. data/spec/chore/worker_spec.rb +70 -15
  58. data/spec/spec_helper.rb +1 -1
  59. data/spec/support/queues/sqs/fake_objects.rb +18 -0
  60. metadata +53 -29
@@ -2,13 +2,14 @@ module Chore
2
2
  # Simple class to hold job processing information.
3
3
  # Has six attributes:
4
4
  # * +:id+ The queue implementation specific identifier for this message.
5
+ # * +:receipt_handle+ The queue implementation specific identifier for the receipt of this message.
5
6
  # * +:queue_name+ The name of the queue the job came from
6
7
  # * +:queue_timeout+ The time (in seconds) before the job will get re-enqueued if not processed
7
8
  # * +:message+ The actual data of the message.
8
9
  # * +:previous_attempts+ The number of times the work has been attempted previously.
9
10
  # * +:consumer+ The consumer instance used to fetch this message. Most queue implementations won't need access to this, but some (RabbitMQ) will. So we
10
11
  # make sure to pass it along with each message. This instance will be used by the Worker for things like <tt>complete</tt> and </tt>reject</tt>.
11
- class UnitOfWork < Struct.new(:id,:queue_name,:queue_timeout,:message,:previous_attempts,:consumer,:decoded_message, :klass)
12
+ class UnitOfWork < Struct.new(:id, :receipt_handle, :queue_name, :queue_timeout, :message, :previous_attempts, :consumer, :decoded_message, :klass)
12
13
  # The time at which this unit of work was created
13
14
  attr_accessor :created_at
14
15
 
@@ -2,7 +2,7 @@ module Chore
2
2
 
3
3
  # Collection of utilities and helpers used by Chore internally
4
4
  module Util
5
-
5
+
6
6
  # To avoid bringing in all of active_support, we implemented constantize here
7
7
  def constantize(camel_cased_word)
8
8
  names = camel_cased_word.split('::')
@@ -14,5 +14,9 @@ module Chore
14
14
  end
15
15
  constant
16
16
  end
17
+
18
+ def procline(str)
19
+ $0 = str
20
+ end
17
21
  end
18
22
  end
@@ -1,7 +1,7 @@
1
1
  module Chore
2
2
  module Version #:nodoc:
3
- MAJOR = 1
4
- MINOR = 10
3
+ MAJOR = 4
4
+ MINOR = 0
5
5
  PATCH = 0
6
6
 
7
7
  STRING = [ MAJOR, MINOR, PATCH ].join('.')
@@ -42,6 +42,28 @@ module Chore
42
42
  @started_at + total_timeout
43
43
  end
44
44
 
45
+ def duplicate_work?(item)
46
+ # if we've got a duplicate, remove the message from the queue by not actually running and also not reporting any errors
47
+ payload = options[:payload_handler].payload(item.decoded_message)
48
+
49
+ # if we're hitting the custom dedupe key, we want to remove this message from the queue
50
+ if item.klass.has_dedupe_lambda?
51
+ dedupe_key = item.klass.dedupe_key(*payload)
52
+ if dedupe_key.nil? || dedupe_key.strip.empty? # if the dedupe key is nil, don't continue with the rest of the dedupe lambda logic
53
+ Chore.logger.info { "#{item.klass} dedupe key nil, skipping memcached lookup." }
54
+ return false
55
+ end
56
+
57
+ if item.consumer.duplicate_message?(dedupe_key, item.klass, item.queue_timeout)
58
+ Chore.logger.info { "Found and deleted duplicate job #{item.klass}"}
59
+ item.consumer.complete(item.id, item.receipt_handle)
60
+ return true
61
+ end
62
+ end
63
+
64
+ return false
65
+ end
66
+
45
67
  # The workhorse. Do the work, all of it. This will block for an entirely unspecified amount
46
68
  # of time based on the work to be performed. This will:
47
69
  # * Decode each message.
@@ -58,12 +80,16 @@ module Chore
58
80
  begin
59
81
  item.decoded_message = options[:payload_handler].decode(item.message)
60
82
  item.klass = options[:payload_handler].payload_class(item.decoded_message)
83
+
84
+ next if duplicate_work?(item)
85
+
86
+ Chore.run_hooks_for(:worker_to_start, item)
61
87
  start_item(item)
62
88
  rescue => e
63
89
  Chore.logger.error { "Failed to run job for #{item.message} with error: #{e.message} #{e.backtrace * "\n"}" }
64
90
  if item.current_attempt >= Chore.config.max_attempts
65
91
  Chore.run_hooks_for(:on_permanent_failure,item.queue_name,item.message,e)
66
- item.consumer.complete(item.id)
92
+ item.consumer.complete(item.id, item.receipt_handle)
67
93
  else
68
94
  Chore.run_hooks_for(:on_failure,item.message,e)
69
95
  item.consumer.reject(item.id)
@@ -86,9 +112,10 @@ module Chore
86
112
  begin
87
113
  Chore.logger.info { "Running job #{klass} with params #{message}"}
88
114
  perform_job(klass,message)
89
- item.consumer.complete(item.id)
115
+ item.consumer.complete(item.id, item.receipt_handle)
90
116
  Chore.logger.info { "Finished job #{klass} with params #{message}"}
91
117
  klass.run_hooks_for(:after_perform, message)
118
+ Chore.run_hooks_for(:worker_ended, item)
92
119
  rescue Job::RejectMessageException
93
120
  item.consumer.reject(item.id)
94
121
  Chore.logger.error { "Failed to run job for #{item.message} with error: Job raised a RejectMessageException" }
@@ -114,7 +141,7 @@ module Chore
114
141
  Chore.logger.error { "Failed to run job #{item.message} with error: #{e.message} at #{e.backtrace * "\n"}" }
115
142
  if item.current_attempt >= klass.options[:max_attempts]
116
143
  klass.run_hooks_for(:on_permanent_failure,item.queue_name,message,e)
117
- item.consumer.complete(item.id)
144
+ item.consumer.complete(item.id, item.receipt_handle)
118
145
  else
119
146
  klass.run_hooks_for(:on_failure, message, e)
120
147
  item.consumer.reject(item.id)
@@ -201,8 +201,8 @@ describe Chore::CLI do
201
201
 
202
202
  context 'given no value' do
203
203
  let(:command) { [] }
204
- it 'is the default value, nil' do
205
- subject.should == nil
204
+ it 'is the default value, 1' do
205
+ subject.should == 1
206
206
  end
207
207
  end
208
208
  end
@@ -22,10 +22,6 @@ describe Chore::Consumer do
22
22
  Chore::Consumer.should respond_to :reset_connection!
23
23
  end
24
24
 
25
- it 'should have a class level cleanup method' do
26
- Chore::Consumer.should respond_to :cleanup
27
- end
28
-
29
25
  it 'should not have an implemented consume method' do
30
26
  expect { consumer.consume }.to raise_error(NotImplementedError)
31
27
  end
@@ -35,6 +31,6 @@ describe Chore::Consumer do
35
31
  end
36
32
 
37
33
  it 'should not have an implemented complete method' do
38
- expect { consumer.complete(message) }.to raise_error(NotImplementedError)
34
+ expect { consumer.complete(message, nil) }.to raise_error(NotImplementedError)
39
35
  end
40
36
  end
@@ -2,7 +2,21 @@ require 'spec_helper'
2
2
  require 'securerandom'
3
3
 
4
4
  describe Chore::DuplicateDetector do
5
- let(:memcache) { double("memcache") }
5
+ class FakeDalli
6
+ def initialize
7
+ @store = {}
8
+ end
9
+ def add(id, val, ttl=0)
10
+ if @store[id] && @store[id][:inserted] + @store[id][:ttl] > Time.now.to_i
11
+ return false
12
+ else
13
+ @store[id] = {:val => val, :ttl => ttl, :inserted => Time.now.to_i}
14
+ return true
15
+ end
16
+ end
17
+ end
18
+
19
+ let(:memcache) { FakeDalli.new }
6
20
  let(:dupe_on_cache_failure) { false }
7
21
  let(:dedupe_params) { { :memcached_client => memcache, :dupe_on_cache_failure => dupe_on_cache_failure } }
8
22
  let(:dedupe) { Chore::DuplicateDetector.new(dedupe_params)}
@@ -15,12 +29,11 @@ describe Chore::DuplicateDetector do
15
29
 
16
30
  describe "#found_duplicate" do
17
31
  it 'should not return true if the message has not already been seen' do
18
- expect(memcache).to receive(:add).and_return(true)
19
32
  expect(dedupe.found_duplicate?(message_data)).to_not be true
20
33
  end
21
34
 
22
35
  it 'should return true if the message has already been seen' do
23
- expect(memcache).to receive(:add).and_return(false)
36
+ memcache.add(message_data[:id], 1, message_data[:visibility_timeout])
24
37
  expect(dedupe.found_duplicate?(message_data)).to be true
25
38
  end
26
39
 
@@ -34,13 +47,12 @@ describe Chore::DuplicateDetector do
34
47
  end
35
48
 
36
49
  it "should set the timeout to be the queue's " do
37
- expect(memcache).to receive(:add).with(id,"1",timeout).and_return(true)
50
+ expect(memcache).to receive(:add).with(id,"1",timeout).and_call_original
38
51
  expect(dedupe.found_duplicate?(message_data)).to be false
39
52
  end
40
53
 
41
54
  it "should call #visibility_timeout once and only once" do
42
55
  expect(queue).to receive(:visibility_timeout).once
43
- expect(memcache).to receive(:add).at_least(3).times.and_return(true)
44
56
  3.times { dedupe.found_duplicate?(message_data) }
45
57
  end
46
58
 
@@ -35,15 +35,4 @@ describe Chore::Fetcher do
35
35
  fetcher.start
36
36
  end
37
37
  end
38
-
39
- describe "cleaning up" do
40
- before(:each) do
41
- manager.stub(:assign)
42
- end
43
-
44
- it "should run cleanup on each queue" do
45
- consumer.should_receive(:cleanup).with('test')
46
- fetcher.start
47
- end
48
- end
49
38
  end
@@ -35,6 +35,13 @@ describe Chore::Manager do
35
35
  manager.assign(work)
36
36
  end
37
37
  end
38
+
39
+ describe 'returning work' do
40
+ it 'should return work to the fetcher' do
41
+ expect(fetcher).to receive(:return_work).with([work])
42
+ manager.return_work([work])
43
+ end
44
+ end
38
45
  end
39
46
 
40
47
  end
@@ -8,10 +8,16 @@ describe Chore::Queues::Filesystem::Consumer do
8
8
  let(:publisher) { Chore::Queues::Filesystem::Publisher.new }
9
9
  let(:test_queues_dir) { "test-queues" }
10
10
  let(:test_queue) { "test-queue" }
11
+ let(:default_timeout) { 60 }
12
+ let(:timeout) { nil }
11
13
 
12
14
  before do
13
15
  Chore.config.fs_queue_root = test_queues_dir
14
- expect(Chore.config).to receive(:default_queue_timeout).and_return(60)
16
+ if timeout
17
+ File.open("#{config_dir}/timeout", "w") {|f| f << timeout.to_s}
18
+ else
19
+ expect(Chore.config).to receive(:default_queue_timeout).and_return(default_timeout)
20
+ end
15
21
  allow(consumer).to receive(:sleep)
16
22
  end
17
23
 
@@ -22,35 +28,79 @@ describe Chore::Queues::Filesystem::Consumer do
22
28
  let(:test_job_hash) {{:class => "TestClass", :args => "test-args"}}
23
29
  let(:new_dir) { described_class.new_dir(test_queue) }
24
30
  let(:in_progress_dir) { described_class.in_progress_dir(test_queue) }
31
+ let(:config_dir) { described_class.config_dir(test_queue) }
25
32
 
26
33
  describe ".cleanup" do
27
- it "should move in_progress jobs to new dir" do
34
+ it "should move expired in_progress jobs to new dir" do
35
+ timestamp = Time.now.to_i - 1
36
+
37
+ FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
38
+ described_class.cleanup(Time.now.to_i, new_dir, in_progress_dir)
39
+ expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
40
+ end
41
+
42
+ it "should move non-timestamped jobs from in_progress_dir to new dir" do
28
43
  FileUtils.touch("#{in_progress_dir}/foo.1.job")
29
- described_class.cleanup(test_queue)
44
+ described_class.cleanup(Time.now.to_i, new_dir, in_progress_dir)
30
45
  expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
31
46
  end
47
+
48
+ it "should not affect non-expired jobs" do
49
+ timestamp = Time.now.to_i - 1
50
+
51
+ FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
52
+ described_class.cleanup(Time.now.to_i - 2, new_dir, in_progress_dir)
53
+ expect(File.exist?("#{new_dir}/foo.2.job")).to eq(false)
54
+ end
32
55
  end
33
56
 
34
57
  describe ".make_in_progress" do
35
- it "should move job to in_progress dir" do
58
+ it "should move non-empty job to in_progress dir" do
59
+ now = Time.now
60
+
61
+ Timecop.freeze(now) do
62
+ File.open("#{new_dir}/foo.1.job", "w") {|f| f << "{}"}
63
+ described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
64
+ expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(true)
65
+ end
66
+ end
67
+
68
+ it "should not move empty jobs to in_progress dir" do
69
+ now = Time.now
70
+
71
+ Timecop.freeze(now) do
72
+ FileUtils.touch("#{new_dir}/foo.1.job")
73
+ described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
74
+ expect(File.exist?("#{new_dir}/foo.1.job")).to eq(true)
75
+ expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(false)
76
+ end
77
+ end
78
+
79
+ it "should delete expired empty jobs" do
36
80
  FileUtils.touch("#{new_dir}/foo.1.job")
37
- described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir)
38
- expect(File.exist?("#{in_progress_dir}/foo.1.job")).to eq(true)
81
+
82
+ now = Time.now + default_timeout
83
+ Timecop.freeze(now) do
84
+ described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
85
+ expect(File.exist?("#{new_dir}/foo.1.job")).to eq(false)
86
+ expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(false)
87
+ end
39
88
  end
40
89
  end
41
90
 
42
91
  describe ".make_new_again" do
43
92
  it "should move job to new dir" do
44
- FileUtils.touch("#{in_progress_dir}/foo.1.job")
45
- described_class.make_new_again("foo.1.job", new_dir, in_progress_dir)
93
+ timestamp = Time.now.to_i
94
+ FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
95
+ described_class.make_new_again("foo.1.#{timestamp}.job", new_dir, in_progress_dir)
46
96
  expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
47
97
  end
48
98
  end
49
99
 
50
- describe ".job_files" do
100
+ describe ".each_file" do
51
101
  it "should list jobs in dir" do
52
102
  FileUtils.touch("#{new_dir}/foo.1.job")
53
- expect(described_class.job_files(new_dir)).to eq(["foo.1.job"])
103
+ expect {|b| described_class.each_file(new_dir, &b) }.to yield_with_args("foo.1.job")
54
104
  end
55
105
  end
56
106
 
@@ -71,7 +121,7 @@ describe Chore::Queues::Filesystem::Consumer do
71
121
  end
72
122
 
73
123
  it "should consume a published job and yield the job to the handler block" do
74
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 0)
124
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 0)
75
125
  end
76
126
 
77
127
  context "rejecting a job" do
@@ -85,7 +135,9 @@ describe Chore::Queues::Filesystem::Consumer do
85
135
  end
86
136
  expect(rejected).to be true
87
137
 
88
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 1)
138
+ Timecop.freeze(Time.now + 61) do
139
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 1)
140
+ end
89
141
  end
90
142
  end
91
143
 
@@ -93,16 +145,23 @@ describe Chore::Queues::Filesystem::Consumer do
93
145
  let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
94
146
 
95
147
  it "should remove job on completion" do
96
- completed = false
148
+
97
149
  consumer.consume do |job_id, queue_name, job_hash|
150
+ expect(File).to receive(:delete).with(kind_of(String))
98
151
  consumer.complete(job_id)
99
- completed = true
100
152
  end
101
- expect(completed).to be true
102
153
 
103
154
  expect { |b| consumer.consume(&b) }.to_not yield_control
104
155
  end
105
156
  end
157
+
158
+ context "with queue-specific timeout config" do
159
+ let(:timeout) { 30 }
160
+
161
+ it "should consume a published job and yield the job to the handler block" do
162
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 30, test_job_hash.to_json, 0)
163
+ end
164
+ end
106
165
  end
107
166
 
108
167
  context "not finding a published job" do
@@ -112,4 +171,3 @@ describe Chore::Queues::Filesystem::Consumer do
112
171
  end
113
172
  end
114
173
  end
115
-
@@ -1,133 +1,170 @@
1
1
  require 'spec_helper'
2
2
 
3
3
  describe Chore::Queues::SQS::Consumer do
4
- let(:queue_name) { "test" }
5
- let(:queue_url) { "test_url" }
6
- let(:queues) { double("queues") }
7
- let(:queue) { double("test_queue", :visibility_timeout=>10, :url=>"test_queue", :name=>"test_queue") }
4
+ include_context 'fake objects'
5
+
8
6
  let(:options) { {} }
9
7
  let(:consumer) { Chore::Queues::SQS::Consumer.new(queue_name) }
10
- let(:message) { TestMessage.new("handle",queue, "message body", 1) }
11
- let(:message_data) {{:id=>message.id, :queue=>message.queue.url, :visibility_timeout=>message.queue.visibility_timeout}}
12
- let(:pool) { double("pool") }
13
- let(:sqs) { double('AWS::SQS') }
14
- let(:backoff_func) { nil }
8
+ let(:job) { {'class' => 'TestJob', 'args'=>[1,2,'3']} }
9
+ let(:backoff_func) { Proc.new { 2 + 2 } }
10
+
11
+ let(:receive_message_result) { Aws::SQS::Message::Collection.new([message], size: 1) }
12
+
13
+ let(:message) do
14
+ Aws::SQS::Message.new(
15
+ message_id: 'message id',
16
+ receipt_handle: "receipt_handle",
17
+ body: job.to_json,
18
+ data: job,
19
+ queue: queue,
20
+ queue_url: queue_url,
21
+ )
22
+ end
15
23
 
16
- before do
17
- allow(AWS::SQS).to receive(:new).and_return(sqs)
18
- allow(sqs).to receive(:queues) { queues }
24
+ # Since a message handler is required (but not validated), this convenience method lets us
25
+ # effectively stub the block.
26
+ def consume(&block)
27
+ block = Proc.new{} unless block_given?
28
+ consumer.consume(&block)
29
+ end
19
30
 
20
- allow(queues).to receive(:url_for) { queue_url }
21
- allow(queues).to receive(:[]) { queue }
22
- allow(queue).to receive(:receive_message) { message }
23
- allow(pool).to receive(:empty!) { nil }
31
+ before do
32
+ allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
33
+ allow(Aws::SQS::Queue).to receive(:new).and_return(queue)
34
+ allow(queue).to receive(:receive_messages).and_return(receive_message_result)
35
+ allow(message).to receive(:attributes).and_return({ 'ApproximateReceiveCount' => rand(10) })
24
36
  end
25
37
 
26
38
  describe "consuming messages" do
27
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, false) }
28
- let!(:messages_be_unique) { allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).and_return(false) }
29
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message) }
30
-
31
- it 'should configure sqs' do
32
- allow(Chore.config).to receive(:aws_access_key).and_return('key')
33
- allow(Chore.config).to receive(:aws_secret_key).and_return('secret')
34
-
35
- expect(AWS::SQS).to receive(:new).with(
36
- :access_key_id => 'key',
37
- :secret_access_key => 'secret',
38
- :logger => Chore.logger,
39
- :log_level => :debug
40
- ).and_return(sqs)
41
- consumer.consume
39
+ before do
40
+ allow(consumer).to receive(:running?).and_return(true, false)
42
41
  end
43
42
 
44
- it 'should not configure sqs multiple times' do
45
- allow(consumer).to receive(:running?).and_return(true, true, false)
43
+ context "should create objects for interacting with the SQS API" do
44
+ it 'should create an sqs client' do
45
+ expect(queue).to receive(:receive_messages)
46
+ consume
47
+ end
46
48
 
47
- expect(AWS::SQS).to receive(:new).once.and_return(sqs)
48
- consumer.consume
49
- end
49
+ it "should only create an sqs client when one doesn't exist" do
50
+ allow(consumer).to receive(:running?).and_return(true, true, true, true, false, true, true)
51
+ expect(Aws::SQS::Client).to receive(:new).exactly(:once)
52
+ consume
53
+ end
50
54
 
51
- it 'should look up the queue url based on the queue name' do
52
- expect(queues).to receive(:url_for).with('test').and_return(queue_url)
53
- consumer.consume
54
- end
55
+ it 'should look up the queue url based on the queue name' do
56
+ expect(sqs).to receive(:get_queue_url).with(queue_name: queue_name)
57
+ consume
58
+ end
55
59
 
56
- it 'should look up the queue based on the queue url' do
57
- expect(queues).to receive(:[]).with(queue_url).and_return(queue)
58
- consumer.consume
60
+ it 'should create a queue object' do
61
+ expect(consumer.send(:queue)).to_not be_nil
62
+ consume
63
+ end
59
64
  end
60
65
 
61
66
  context "should receive a message from the queue" do
62
-
63
67
  it 'should use the default size of 10 when no queue_polling_size is specified' do
64
- expect(queue).to receive(:receive_messages).with(:limit => 10, :attributes => [:receive_count])
65
- consumer.consume
68
+ expect(queue).to receive(:receive_messages).with(
69
+ :max_number_of_messages => 10,
70
+ :attribute_names => ['ApproximateReceiveCount']
71
+ ).and_return(message)
72
+ consume
66
73
  end
67
74
 
68
75
  it 'should respect the queue_polling_size when specified' do
69
76
  allow(Chore.config).to receive(:queue_polling_size).and_return(5)
70
- expect(queue).to receive(:receive_messages).with(:limit => 5, :attributes => [:receive_count])
71
- consumer.consume
77
+ expect(queue).to receive(:receive_messages).with(
78
+ :max_number_of_messages => 5,
79
+ :attribute_names => ['ApproximateReceiveCount']
80
+ )
81
+ consume
72
82
  end
73
83
  end
74
84
 
75
- it "should check the uniqueness of the message" do
76
- allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(false)
77
- consumer.consume
78
- end
79
-
80
- it "should yield the message to the handler block" do
81
- expect { |b| consumer.consume(&b) }.to yield_with_args('handle', queue_name, 10, 'message body', 0)
82
- end
83
-
84
- it 'should not yield for a dupe message' do
85
- allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(true)
86
- expect {|b| consumer.consume(&b) }.not_to yield_control
87
- end
88
-
89
85
  context 'with no messages' do
90
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, true, false) }
91
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message, nil) }
86
+ before do
87
+ allow(consumer).to receive(:handle_messages).and_return([])
88
+ end
92
89
 
93
90
  it 'should sleep' do
94
91
  expect(consumer).to receive(:sleep).with(1)
95
- consumer.consume
92
+ consume
96
93
  end
97
94
  end
98
95
 
99
96
  context 'with messages' do
100
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, true, false) }
101
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message, message) }
97
+ before do
98
+ allow(consumer).to receive(:duplicate_message?).and_return(false)
99
+ allow(queue).to receive(:receive_messages).and_return(message)
100
+ end
101
+
102
+ it "should check the uniqueness of the message" do
103
+ expect(consumer).to receive(:duplicate_message?)
104
+ consume
105
+ end
106
+
107
+ it "should yield the message to the handler block" do
108
+ expect { |b| consume(&b) }
109
+ .to yield_with_args(
110
+ message.message_id,
111
+ message.receipt_handle,
112
+ queue_name,
113
+ queue.attributes['VisibilityTimeout'].to_i,
114
+ message.body,
115
+ message.attributes['ApproximateReceiveCount'].to_i - 1
116
+ )
117
+ end
102
118
 
103
119
  it 'should not sleep' do
104
120
  expect(consumer).to_not receive(:sleep)
105
- consumer.consume
121
+ consume
106
122
  end
123
+
124
+ context 'with duplicates' do
125
+ before do
126
+ allow(consumer).to receive(:duplicate_message?).and_return(true)
127
+ end
128
+
129
+ it 'should not yield for a dupe message' do
130
+ expect {|b| consume(&b) }.not_to yield_control
131
+ end
132
+ end
133
+ end
134
+ end
135
+
136
+ describe "completing work" do
137
+ it 'deletes the message from the queue' do
138
+ expect(queue).to receive(:delete_messages).with(entries: [{id: message.message_id, receipt_handle: message.receipt_handle}])
139
+ consumer.complete(message.message_id, message.receipt_handle)
107
140
  end
108
141
  end
109
142
 
110
143
  describe '#delay' do
111
- let(:item) { Chore::UnitOfWork.new(message.id, message.queue, 60, message.body, 0, consumer) }
112
- let(:backoff_func) { lambda { |item| 2 } }
144
+ let(:item) { Chore::UnitOfWork.new(message.message_id, message.receipt_handle, message.queue, 60, message.body, 0, consumer) }
145
+ let(:entries) do
146
+ [
147
+ { id: item.id, receipt_handle: item.receipt_handle, visibility_timeout: backoff_func.call(item) },
148
+ ]
149
+ end
113
150
 
114
151
  it 'changes the visiblity of the message' do
115
- expect(queue).to receive(:batch_change_visibility).with(2, [item.id])
152
+ expect(queue).to receive(:change_message_visibility_batch).with(entries: entries)
116
153
  consumer.delay(item, backoff_func)
117
154
  end
118
155
  end
119
156
 
120
157
  describe '#reset_connection!' do
121
158
  it 'should reset the connection after a call to reset_connection!' do
122
- expect(AWS::Core::Http::ConnectionPool).to receive(:pools).and_return([pool])
123
- expect(pool).to receive(:empty!)
159
+ expect(Aws).to receive(:empty_connection_pools!)
124
160
  Chore::Queues::SQS::Consumer.reset_connection!
125
161
  consumer.send(:queue)
126
162
  end
127
163
 
128
164
  it 'should not reset the connection between calls' do
129
- sqs = consumer.send(:queue)
130
- expect(sqs).to be consumer.send(:queue)
165
+ expect(Aws).to receive(:empty_connection_pools!).once
166
+ q = consumer.send(:queue)
167
+ expect(consumer.send(:queue)).to be(q)
131
168
  end
132
169
 
133
170
  it 'should reconfigure sqs' do
@@ -135,13 +172,15 @@ describe Chore::Queues::SQS::Consumer do
135
172
  allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).and_return(false)
136
173
 
137
174
  allow(queue).to receive(:receive_messages).and_return(message)
138
- consumer.consume
175
+ allow(sqs).to receive(:receive_message).with({:attribute_names=>["ApproximateReceiveCount"], :max_number_of_messages=>10, :queue_url=>queue_url})
176
+
177
+ consume
139
178
 
140
179
  Chore::Queues::SQS::Consumer.reset_connection!
141
- allow(AWS::SQS).to receive(:new).and_return(sqs)
180
+ allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
142
181
 
143
182
  expect(consumer).to receive(:running?).and_return(true, false)
144
- consumer.consume
183
+ consume
145
184
  end
146
185
  end
147
186
  end