chore-core 3.2.3 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. checksums.yaml +5 -5
  2. data/LICENSE.txt +1 -1
  3. data/README.md +170 -153
  4. data/chore-core.gemspec +2 -3
  5. data/lib/chore.rb +20 -0
  6. data/lib/chore/cli.rb +1 -2
  7. data/lib/chore/configuration.rb +1 -1
  8. data/lib/chore/consumer.rb +41 -9
  9. data/lib/chore/job.rb +2 -0
  10. data/lib/chore/publisher.rb +18 -2
  11. data/lib/chore/queues/filesystem/consumer.rb +18 -13
  12. data/lib/chore/queues/filesystem/publisher.rb +1 -1
  13. data/lib/chore/queues/sqs.rb +22 -13
  14. data/lib/chore/queues/sqs/consumer.rb +61 -33
  15. data/lib/chore/queues/sqs/publisher.rb +26 -17
  16. data/lib/chore/strategies/consumer/batcher.rb +6 -6
  17. data/lib/chore/strategies/consumer/single_consumer_strategy.rb +5 -5
  18. data/lib/chore/strategies/consumer/threaded_consumer_strategy.rb +6 -6
  19. data/lib/chore/strategies/consumer/throttled_consumer_strategy.rb +10 -11
  20. data/lib/chore/strategies/worker/helpers/ipc.rb +0 -1
  21. data/lib/chore/unit_of_work.rb +2 -1
  22. data/lib/chore/version.rb +3 -3
  23. data/lib/chore/worker.rb +4 -4
  24. data/spec/chore/consumer_spec.rb +1 -1
  25. data/spec/chore/queues/filesystem/filesystem_consumer_spec.rb +5 -7
  26. data/spec/chore/queues/sqs/consumer_spec.rb +117 -76
  27. data/spec/chore/queues/sqs/publisher_spec.rb +49 -60
  28. data/spec/chore/queues/sqs_spec.rb +32 -41
  29. data/spec/chore/strategies/consumer/single_consumer_strategy_spec.rb +3 -3
  30. data/spec/chore/strategies/consumer/threaded_consumer_strategy_spec.rb +6 -6
  31. data/spec/chore/strategies/worker/forked_worker_strategy_spec.rb +1 -1
  32. data/spec/chore/strategies/worker/single_worker_strategy_spec.rb +1 -1
  33. data/spec/chore/worker_spec.rb +21 -21
  34. data/spec/spec_helper.rb +1 -1
  35. data/spec/support/queues/sqs/fake_objects.rb +18 -0
  36. metadata +9 -13
@@ -3,21 +3,26 @@ require 'chore/publisher'
3
3
  module Chore
4
4
  module Queues
5
5
  module SQS
6
-
7
6
  # SQS Publisher, for writing messages to SQS from Chore
8
7
  class Publisher < Chore::Publisher
9
8
  @@reset_next = true
10
9
 
10
+ # @param [Hash] opts Publisher options
11
11
  def initialize(opts={})
12
12
  super
13
13
  @sqs_queues = {}
14
14
  @sqs_queue_urls = {}
15
15
  end
16
16
 
17
- # Takes a given Chore::Job instance +job+, and publishes it by looking up the +queue_name+.
17
+ # Publishes a message to an SQS queue
18
+ #
19
+ # @param [String] queue_name Name of the SQS queue
20
+ # @param [Hash] job Job instance definition, will be encoded to JSON
21
+ #
22
+ # @return [struct Aws::SQS::Types::SendMessageResult]
18
23
  def publish(queue_name,job)
19
- queue = self.queue(queue_name)
20
- queue.send_message(encode_job(job))
24
+ queue = queue(queue_name)
25
+ queue.send_message(message_body: encode_job(job))
21
26
  end
22
27
 
23
28
  # Sets a flag that instructs the publisher to reset the connection the next time it's used
@@ -25,29 +30,33 @@ module Chore
25
30
  @@reset_next = true
26
31
  end
27
32
 
28
- # Access to the configured SQS connection object
33
+ private
34
+
35
+ # SQS API client object
36
+ #
37
+ # @return [Aws::SQS::Client]
29
38
  def sqs
30
- @sqs ||= AWS::SQS.new(
31
- :access_key_id => Chore.config.aws_access_key,
32
- :secret_access_key => Chore.config.aws_secret_key,
33
- :logger => Chore.logger,
34
- :log_level => :debug)
39
+ @sqs ||= Chore::Queues::SQS.sqs_client
35
40
  end
36
41
 
37
- # Retrieves the SQS queue with the given +name+. The method will cache the results to prevent round trips on subsequent calls
42
+ # Retrieves the SQS queue object. The method will cache the results to prevent round trips on subsequent calls
43
+ #
38
44
  # If <tt>reset_connection!</tt> has been called, this will result in the connection being re-initialized,
39
45
  # as well as clear any cached results from prior calls
46
+ #
47
+ # @param [String] name Name of SQS queue
48
+ #
49
+ # @return [Aws::SQS::Queue]
40
50
  def queue(name)
41
- if @@reset_next
42
- AWS::Core::Http::ConnectionPool.pools.each do |p|
43
- p.empty!
44
- end
51
+ if @@reset_next
52
+ Aws.empty_connection_pools!
45
53
  @sqs = nil
46
54
  @@reset_next = false
47
55
  @sqs_queues = {}
48
56
  end
49
- @sqs_queue_urls[name] ||= self.sqs.queues.url_for(name)
50
- @sqs_queues[name] ||= self.sqs.queues[@sqs_queue_urls[name]]
57
+
58
+ @sqs_queue_urls[name] ||= sqs.get_queue_url(queue_name: name).queue_url
59
+ @sqs_queues[name] ||= Aws::SQS::Queue.new(url: @sqs_queue_urls[name], client: sqs)
51
60
  end
52
61
  end
53
62
  end
@@ -15,17 +15,17 @@ module Chore
15
15
  @running = true
16
16
  end
17
17
 
18
- # The main entry point of the Batcher, <tt>schedule</tt> begins a thread with the provided +batch_timeout+
19
- # as the only argument. While the Batcher is running, it will attempt to check if either the batch is full,
18
+ # The main entry point of the Batcher, <tt>schedule</tt> begins a thread with the provided +batch_timeout+
19
+ # as the only argument. While the Batcher is running, it will attempt to check if either the batch is full,
20
20
  # or if the +batch_timeout+ has elapsed since the oldest message was added. If either case is true, the
21
21
  # items in the batch will be executed.
22
- #
22
+ #
23
23
  # Calling <tt>stop</tt> will cause the thread to finish it's current check, and exit
24
24
  def schedule(batch_timeout)
25
25
  @thread = Thread.new(batch_timeout) do |timeout|
26
- Chore.logger.info "Batching timeout thread starting"
26
+ Chore.logger.info "Batching thread starting with #{batch_timeout} second timeout"
27
27
  while @running do
28
- begin
28
+ begin
29
29
  oldest_item = @batch.first
30
30
  timestamp = oldest_item && oldest_item.created_at
31
31
  Chore.logger.debug "Oldest message in batch: #{timestamp}, size: #{@batch.size}"
@@ -33,7 +33,7 @@ module Chore
33
33
  Chore.logger.debug "Batching timeout reached (#{timestamp + timeout}), current size: #{@batch.size}"
34
34
  self.execute(true)
35
35
  end
36
- sleep(1)
36
+ sleep(1)
37
37
  rescue => e
38
38
  Chore.logger.error "Batcher#schedule raised an exception: #{e.inspect}"
39
39
  end
@@ -10,16 +10,16 @@ module Chore
10
10
  end
11
11
 
12
12
  # Begins fetching from the configured queue by way of the configured Consumer. This can only be used if you have a
13
- # single queue which can be kept up with at a relatively low volume. If you have more than a single queue configured,
14
- # it will raise an exception.
13
+ # single queue which can be kept up with at a relatively low volume. If you have more than a single queue
14
+ # configured, it will raise an exception.
15
15
  def fetch
16
16
  Chore.logger.debug "Starting up consumer strategy: #{self.class.name}"
17
17
  queues = Chore.config.queues
18
18
  raise "When using SingleConsumerStrategy only one queue can be defined. Queues: #{queues}" unless queues.size == 1
19
-
19
+
20
20
  @consumer = Chore.config.consumer.new(queues.first)
21
- @consumer.consume do |id,queue_name,queue_timeout,body,previous_attempts|
22
- work = UnitOfWork.new(id, queue_name, queue_timeout, body, previous_attempts, @consumer)
21
+ @consumer.consume do |message_id, message_receipt_handle, queue_name, queue_timeout, body, previous_attempts|
22
+ work = UnitOfWork.new(message_id, message_receipt_handle, queue_name, queue_timeout, body, previous_attempts, @consumer)
23
23
  @fetcher.manager.assign(work)
24
24
  end
25
25
  end
@@ -23,7 +23,7 @@ module Chore
23
23
  Chore.logger.debug "Starting up consumer strategy: #{self.class.name}"
24
24
  threads = []
25
25
  Chore.config.queues.each do |queue|
26
- Chore.config.threads_per_queue.times do
26
+ Chore.config.threads_per_queue.times do
27
27
  if running?
28
28
  threads << start_consumer_thread(queue)
29
29
  end
@@ -32,7 +32,7 @@ module Chore
32
32
 
33
33
  threads.each(&:join)
34
34
  end
35
-
35
+
36
36
  # If the ThreadedConsumerStrategy is currently running <tt>stop!</tt> will begin signalling it to stop
37
37
  # It will stop the batcher from forking more work, as well as set a flag which will disable it's own consuming
38
38
  # threads once they finish with their current work.
@@ -49,21 +49,21 @@ module Chore
49
49
  @running
50
50
  end
51
51
 
52
- private
52
+ private
53
53
  # Starts a consumer thread for polling the given +queue+.
54
54
  # If <tt>stop!<tt> is called, the threads will shut themsevles down.
55
55
  def start_consumer_thread(queue)
56
56
  t = Thread.new(queue) do |tQueue|
57
57
  begin
58
58
  consumer = Chore.config.consumer.new(tQueue)
59
- consumer.consume do |id, queue_name, queue_timeout, body, previous_attempts|
59
+ consumer.consume do |message_id, message_receipt_handle, queue_name, queue_timeout, body, previous_attempts|
60
60
  # Quick hack to force this thread to end it's work
61
61
  # if we're shutting down. Could be delayed due to the
62
62
  # weird sometimes-blocking nature of SQS.
63
63
  consumer.stop if !running?
64
- Chore.logger.debug { "Got message: #{id}"}
64
+ Chore.logger.debug { "Got message: #{message_id}"}
65
65
 
66
- work = UnitOfWork.new(id, queue_name, queue_timeout, body, previous_attempts, consumer)
66
+ work = UnitOfWork.new(message_id, message_receipt_handle, queue_name, queue_timeout, body, previous_attempts, consumer)
67
67
  Chore.run_hooks_for(:consumed_from_source, work)
68
68
  @batcher.add(work)
69
69
  end
@@ -65,7 +65,7 @@ module Chore
65
65
  end
66
66
 
67
67
  # Gives work back to the queue in case it couldn't be assigned
68
- #
68
+ #
69
69
  # This will go into a separate queue so that it will be prioritized
70
70
  # over other work that hasn't been attempted yet. It also avoids
71
71
  # a deadlock where @queue is full and the master is waiting to return
@@ -100,22 +100,21 @@ module Chore
100
100
  end
101
101
 
102
102
  def create_work_units(consumer)
103
- consumer.consume do |id, queue, timeout, body, previous_attempts|
104
- # Note: The unit of work object contains a consumer object that when
105
- # used to consume from SQS, would have a mutex (that comes as a part
106
- # of the AWS sdk); When sending these objects across from one process
107
- # to another, we cannot send this across (becasue of the mutex). To
103
+ consumer.consume do |message_id, message_receipt_handle, queue, timeout, body, previous_attempts|
104
+ # Note: The unit of work object contains a consumer object that when
105
+ # used to consume from SQS, would have a mutex (that comes as a part
106
+ # of the AWS sdk); When sending these objects across from one process
107
+ # to another, we cannot send this across (becasue of the mutex). To
108
108
  # work around this, we simply ignore the consumer object when creating
109
- # the unit of work object, and when the worker recieves the work
110
- # object, it assigns it a consumer object.
109
+ # the unit of work object, and when the worker recieves the work
110
+ # object, it assigns it a consumer object.
111
111
  # (to allow for communication back to the queue it was consumed from)
112
- work = UnitOfWork.new(id, queue, timeout, body,
113
- previous_attempts)
112
+ work = UnitOfWork.new(message_id, message_receipt_handle, queue, timeout, body, previous_attempts)
114
113
  Chore.run_hooks_for(:consumed_from_source, work)
115
114
  @queue.push(work) if running?
116
115
  Chore.run_hooks_for(:added_to_queue, work)
117
116
  end
118
117
  end
119
- end # ThrottledConsumerStrategyyeah
118
+ end # ThrottledConsumerStrategy
120
119
  end
121
120
  end # Chore
@@ -75,7 +75,6 @@ module Chore
75
75
 
76
76
  private
77
77
 
78
- # TODO: do we need this as a optional param
79
78
  def socket_file
80
79
  "./prefork_worker_sock-#{Process.pid}"
81
80
  end
@@ -2,13 +2,14 @@ module Chore
2
2
  # Simple class to hold job processing information.
3
3
  # Has six attributes:
4
4
  # * +:id+ The queue implementation specific identifier for this message.
5
+ # * +:receipt_handle+ The queue implementation specific identifier for the receipt of this message.
5
6
  # * +:queue_name+ The name of the queue the job came from
6
7
  # * +:queue_timeout+ The time (in seconds) before the job will get re-enqueued if not processed
7
8
  # * +:message+ The actual data of the message.
8
9
  # * +:previous_attempts+ The number of times the work has been attempted previously.
9
10
  # * +:consumer+ The consumer instance used to fetch this message. Most queue implementations won't need access to this, but some (RabbitMQ) will. So we
10
11
  # make sure to pass it along with each message. This instance will be used by the Worker for things like <tt>complete</tt> and </tt>reject</tt>.
11
- class UnitOfWork < Struct.new(:id,:queue_name,:queue_timeout,:message,:previous_attempts,:consumer,:decoded_message, :klass)
12
+ class UnitOfWork < Struct.new(:id, :receipt_handle, :queue_name, :queue_timeout, :message, :previous_attempts, :consumer, :decoded_message, :klass)
12
13
  # The time at which this unit of work was created
13
14
  attr_accessor :created_at
14
15
 
data/lib/chore/version.rb CHANGED
@@ -1,8 +1,8 @@
1
1
  module Chore
2
2
  module Version #:nodoc:
3
- MAJOR = 3
4
- MINOR = 2
5
- PATCH = 3
3
+ MAJOR = 4
4
+ MINOR = 0
5
+ PATCH = 0
6
6
 
7
7
  STRING = [ MAJOR, MINOR, PATCH ].join('.')
8
8
  end
data/lib/chore/worker.rb CHANGED
@@ -56,7 +56,7 @@ module Chore
56
56
 
57
57
  if item.consumer.duplicate_message?(dedupe_key, item.klass, item.queue_timeout)
58
58
  Chore.logger.info { "Found and deleted duplicate job #{item.klass}"}
59
- item.consumer.complete(item.id)
59
+ item.consumer.complete(item.id, item.receipt_handle)
60
60
  return true
61
61
  end
62
62
  end
@@ -89,7 +89,7 @@ module Chore
89
89
  Chore.logger.error { "Failed to run job for #{item.message} with error: #{e.message} #{e.backtrace * "\n"}" }
90
90
  if item.current_attempt >= Chore.config.max_attempts
91
91
  Chore.run_hooks_for(:on_permanent_failure,item.queue_name,item.message,e)
92
- item.consumer.complete(item.id)
92
+ item.consumer.complete(item.id, item.receipt_handle)
93
93
  else
94
94
  Chore.run_hooks_for(:on_failure,item.message,e)
95
95
  item.consumer.reject(item.id)
@@ -112,7 +112,7 @@ module Chore
112
112
  begin
113
113
  Chore.logger.info { "Running job #{klass} with params #{message}"}
114
114
  perform_job(klass,message)
115
- item.consumer.complete(item.id)
115
+ item.consumer.complete(item.id, item.receipt_handle)
116
116
  Chore.logger.info { "Finished job #{klass} with params #{message}"}
117
117
  klass.run_hooks_for(:after_perform, message)
118
118
  Chore.run_hooks_for(:worker_ended, item)
@@ -141,7 +141,7 @@ module Chore
141
141
  Chore.logger.error { "Failed to run job #{item.message} with error: #{e.message} at #{e.backtrace * "\n"}" }
142
142
  if item.current_attempt >= klass.options[:max_attempts]
143
143
  klass.run_hooks_for(:on_permanent_failure,item.queue_name,message,e)
144
- item.consumer.complete(item.id)
144
+ item.consumer.complete(item.id, item.receipt_handle)
145
145
  else
146
146
  klass.run_hooks_for(:on_failure, message, e)
147
147
  item.consumer.reject(item.id)
@@ -31,6 +31,6 @@ describe Chore::Consumer do
31
31
  end
32
32
 
33
33
  it 'should not have an implemented complete method' do
34
- expect { consumer.complete(message) }.to raise_error(NotImplementedError)
34
+ expect { consumer.complete(message, nil) }.to raise_error(NotImplementedError)
35
35
  end
36
36
  end
@@ -121,7 +121,7 @@ describe Chore::Queues::Filesystem::Consumer do
121
121
  end
122
122
 
123
123
  it "should consume a published job and yield the job to the handler block" do
124
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 0)
124
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 0)
125
125
  end
126
126
 
127
127
  context "rejecting a job" do
@@ -136,7 +136,7 @@ describe Chore::Queues::Filesystem::Consumer do
136
136
  expect(rejected).to be true
137
137
 
138
138
  Timecop.freeze(Time.now + 61) do
139
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 1)
139
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 1)
140
140
  end
141
141
  end
142
142
  end
@@ -145,12 +145,11 @@ describe Chore::Queues::Filesystem::Consumer do
145
145
  let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
146
146
 
147
147
  it "should remove job on completion" do
148
- completed = false
148
+
149
149
  consumer.consume do |job_id, queue_name, job_hash|
150
+ expect(File).to receive(:delete).with(kind_of(String))
150
151
  consumer.complete(job_id)
151
- completed = true
152
152
  end
153
- expect(completed).to be true
154
153
 
155
154
  expect { |b| consumer.consume(&b) }.to_not yield_control
156
155
  end
@@ -160,7 +159,7 @@ describe Chore::Queues::Filesystem::Consumer do
160
159
  let(:timeout) { 30 }
161
160
 
162
161
  it "should consume a published job and yield the job to the handler block" do
163
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 30, test_job_hash.to_json, 0)
162
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 30, test_job_hash.to_json, 0)
164
163
  end
165
164
  end
166
165
  end
@@ -172,4 +171,3 @@ describe Chore::Queues::Filesystem::Consumer do
172
171
  end
173
172
  end
174
173
  end
175
-
@@ -1,131 +1,170 @@
1
1
  require 'spec_helper'
2
2
 
3
3
  describe Chore::Queues::SQS::Consumer do
4
- let(:queue_name) { "test" }
5
- let(:queue_url) { "test_url" }
6
- let(:queues) { double("queues") }
7
- let(:queue) { double("test_queue", :visibility_timeout=>10, :url=>"test_queue", :name=>"test_queue") }
4
+ include_context 'fake objects'
5
+
8
6
  let(:options) { {} }
9
7
  let(:consumer) { Chore::Queues::SQS::Consumer.new(queue_name) }
10
- let(:message) { TestMessage.new("handle",queue, "message body", 1) }
11
- let(:message_data) {{:id=>message.id, :queue=>message.queue.url, :visibility_timeout=>message.queue.visibility_timeout}}
12
- let(:pool) { double("pool") }
13
- let(:sqs) { double('AWS::SQS') }
14
- let(:backoff_func) { nil }
8
+ let(:job) { {'class' => 'TestJob', 'args'=>[1,2,'3']} }
9
+ let(:backoff_func) { Proc.new { 2 + 2 } }
10
+
11
+ let(:receive_message_result) { Aws::SQS::Message::Collection.new([message], size: 1) }
12
+
13
+ let(:message) do
14
+ Aws::SQS::Message.new(
15
+ message_id: 'message id',
16
+ receipt_handle: "receipt_handle",
17
+ body: job.to_json,
18
+ data: job,
19
+ queue: queue,
20
+ queue_url: queue_url,
21
+ )
22
+ end
15
23
 
16
- before do
17
- allow(AWS::SQS).to receive(:new).and_return(sqs)
18
- allow(sqs).to receive(:queues) { queues }
24
+ # Since a message handler is required (but not validated), this convenience method lets us
25
+ # effectively stub the block.
26
+ def consume(&block)
27
+ block = Proc.new{} unless block_given?
28
+ consumer.consume(&block)
29
+ end
19
30
 
20
- allow(queues).to receive(:url_for) { queue_url }
21
- allow(queues).to receive(:[]) { queue }
22
- allow(queue).to receive(:receive_message) { message }
23
- allow(pool).to receive(:empty!) { nil }
31
+ before do
32
+ allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
33
+ allow(Aws::SQS::Queue).to receive(:new).and_return(queue)
34
+ allow(queue).to receive(:receive_messages).and_return(receive_message_result)
35
+ allow(message).to receive(:attributes).and_return({ 'ApproximateReceiveCount' => rand(10) })
24
36
  end
25
37
 
26
38
  describe "consuming messages" do
27
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, false) }
28
- let!(:messages_be_unique) { allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).and_return(false) }
29
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message) }
30
-
31
- it 'should configure sqs' do
32
- allow(Chore.config).to receive(:aws_access_key).and_return('key')
33
- allow(Chore.config).to receive(:aws_secret_key).and_return('secret')
34
-
35
- expect(AWS::SQS).to receive(:new).with(
36
- :access_key_id => 'key',
37
- :secret_access_key => 'secret'
38
- ).and_return(sqs)
39
- consumer.consume
39
+ before do
40
+ allow(consumer).to receive(:running?).and_return(true, false)
40
41
  end
41
42
 
42
- it 'should not configure sqs multiple times' do
43
- allow(consumer).to receive(:running?).and_return(true, true, false)
43
+ context "should create objects for interacting with the SQS API" do
44
+ it 'should create an sqs client' do
45
+ expect(queue).to receive(:receive_messages)
46
+ consume
47
+ end
44
48
 
45
- expect(AWS::SQS).to receive(:new).once.and_return(sqs)
46
- consumer.consume
47
- end
49
+ it "should only create an sqs client when one doesn't exist" do
50
+ allow(consumer).to receive(:running?).and_return(true, true, true, true, false, true, true)
51
+ expect(Aws::SQS::Client).to receive(:new).exactly(:once)
52
+ consume
53
+ end
48
54
 
49
- it 'should look up the queue url based on the queue name' do
50
- expect(queues).to receive(:url_for).with('test').and_return(queue_url)
51
- consumer.consume
52
- end
55
+ it 'should look up the queue url based on the queue name' do
56
+ expect(sqs).to receive(:get_queue_url).with(queue_name: queue_name)
57
+ consume
58
+ end
53
59
 
54
- it 'should look up the queue based on the queue url' do
55
- expect(queues).to receive(:[]).with(queue_url).and_return(queue)
56
- consumer.consume
60
+ it 'should create a queue object' do
61
+ expect(consumer.send(:queue)).to_not be_nil
62
+ consume
63
+ end
57
64
  end
58
65
 
59
66
  context "should receive a message from the queue" do
60
-
61
67
  it 'should use the default size of 10 when no queue_polling_size is specified' do
62
- expect(queue).to receive(:receive_messages).with(:limit => 10, :attributes => [:receive_count])
63
- consumer.consume
68
+ expect(queue).to receive(:receive_messages).with(
69
+ :max_number_of_messages => 10,
70
+ :attribute_names => ['ApproximateReceiveCount']
71
+ ).and_return(message)
72
+ consume
64
73
  end
65
74
 
66
75
  it 'should respect the queue_polling_size when specified' do
67
76
  allow(Chore.config).to receive(:queue_polling_size).and_return(5)
68
- expect(queue).to receive(:receive_messages).with(:limit => 5, :attributes => [:receive_count])
69
- consumer.consume
77
+ expect(queue).to receive(:receive_messages).with(
78
+ :max_number_of_messages => 5,
79
+ :attribute_names => ['ApproximateReceiveCount']
80
+ )
81
+ consume
70
82
  end
71
83
  end
72
84
 
73
- it "should check the uniqueness of the message" do
74
- allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(false)
75
- consumer.consume
76
- end
77
-
78
- it "should yield the message to the handler block" do
79
- expect { |b| consumer.consume(&b) }.to yield_with_args('handle', queue_name, 10, 'message body', 0)
80
- end
81
-
82
- it 'should not yield for a dupe message' do
83
- allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(true)
84
- expect {|b| consumer.consume(&b) }.not_to yield_control
85
- end
86
-
87
85
  context 'with no messages' do
88
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, true, false) }
89
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message, nil) }
86
+ before do
87
+ allow(consumer).to receive(:handle_messages).and_return([])
88
+ end
90
89
 
91
90
  it 'should sleep' do
92
91
  expect(consumer).to receive(:sleep).with(1)
93
- consumer.consume
92
+ consume
94
93
  end
95
94
  end
96
95
 
97
96
  context 'with messages' do
98
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, true, false) }
99
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message, message) }
97
+ before do
98
+ allow(consumer).to receive(:duplicate_message?).and_return(false)
99
+ allow(queue).to receive(:receive_messages).and_return(message)
100
+ end
101
+
102
+ it "should check the uniqueness of the message" do
103
+ expect(consumer).to receive(:duplicate_message?)
104
+ consume
105
+ end
106
+
107
+ it "should yield the message to the handler block" do
108
+ expect { |b| consume(&b) }
109
+ .to yield_with_args(
110
+ message.message_id,
111
+ message.receipt_handle,
112
+ queue_name,
113
+ queue.attributes['VisibilityTimeout'].to_i,
114
+ message.body,
115
+ message.attributes['ApproximateReceiveCount'].to_i - 1
116
+ )
117
+ end
100
118
 
101
119
  it 'should not sleep' do
102
120
  expect(consumer).to_not receive(:sleep)
103
- consumer.consume
121
+ consume
104
122
  end
123
+
124
+ context 'with duplicates' do
125
+ before do
126
+ allow(consumer).to receive(:duplicate_message?).and_return(true)
127
+ end
128
+
129
+ it 'should not yield for a dupe message' do
130
+ expect {|b| consume(&b) }.not_to yield_control
131
+ end
132
+ end
133
+ end
134
+ end
135
+
136
+ describe "completing work" do
137
+ it 'deletes the message from the queue' do
138
+ expect(queue).to receive(:delete_messages).with(entries: [{id: message.message_id, receipt_handle: message.receipt_handle}])
139
+ consumer.complete(message.message_id, message.receipt_handle)
105
140
  end
106
141
  end
107
142
 
108
143
  describe '#delay' do
109
- let(:item) { Chore::UnitOfWork.new(message.id, message.queue, 60, message.body, 0, consumer) }
110
- let(:backoff_func) { lambda { |item| 2 } }
144
+ let(:item) { Chore::UnitOfWork.new(message.message_id, message.receipt_handle, message.queue, 60, message.body, 0, consumer) }
145
+ let(:entries) do
146
+ [
147
+ { id: item.id, receipt_handle: item.receipt_handle, visibility_timeout: backoff_func.call(item) },
148
+ ]
149
+ end
111
150
 
112
151
  it 'changes the visiblity of the message' do
113
- expect(queue).to receive(:batch_change_visibility).with(2, [item.id])
152
+ expect(queue).to receive(:change_message_visibility_batch).with(entries: entries)
114
153
  consumer.delay(item, backoff_func)
115
154
  end
116
155
  end
117
156
 
118
157
  describe '#reset_connection!' do
119
158
  it 'should reset the connection after a call to reset_connection!' do
120
- expect(AWS::Core::Http::ConnectionPool).to receive(:pools).and_return([pool])
121
- expect(pool).to receive(:empty!)
159
+ expect(Aws).to receive(:empty_connection_pools!)
122
160
  Chore::Queues::SQS::Consumer.reset_connection!
123
161
  consumer.send(:queue)
124
162
  end
125
163
 
126
164
  it 'should not reset the connection between calls' do
127
- sqs = consumer.send(:queue)
128
- expect(sqs).to be consumer.send(:queue)
165
+ expect(Aws).to receive(:empty_connection_pools!).once
166
+ q = consumer.send(:queue)
167
+ expect(consumer.send(:queue)).to be(q)
129
168
  end
130
169
 
131
170
  it 'should reconfigure sqs' do
@@ -133,13 +172,15 @@ describe Chore::Queues::SQS::Consumer do
133
172
  allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).and_return(false)
134
173
 
135
174
  allow(queue).to receive(:receive_messages).and_return(message)
136
- consumer.consume
175
+ allow(sqs).to receive(:receive_message).with({:attribute_names=>["ApproximateReceiveCount"], :max_number_of_messages=>10, :queue_url=>queue_url})
176
+
177
+ consume
137
178
 
138
179
  Chore::Queues::SQS::Consumer.reset_connection!
139
- allow(AWS::SQS).to receive(:new).and_return(sqs)
180
+ allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
140
181
 
141
182
  expect(consumer).to receive(:running?).and_return(true, false)
142
- consumer.consume
183
+ consume
143
184
  end
144
185
  end
145
186
  end