chore-core 3.2.3 → 4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (36) hide show
  1. checksums.yaml +5 -5
  2. data/LICENSE.txt +1 -1
  3. data/README.md +170 -153
  4. data/chore-core.gemspec +2 -3
  5. data/lib/chore.rb +20 -0
  6. data/lib/chore/cli.rb +1 -2
  7. data/lib/chore/configuration.rb +1 -1
  8. data/lib/chore/consumer.rb +41 -9
  9. data/lib/chore/job.rb +2 -0
  10. data/lib/chore/publisher.rb +18 -2
  11. data/lib/chore/queues/filesystem/consumer.rb +18 -13
  12. data/lib/chore/queues/filesystem/publisher.rb +1 -1
  13. data/lib/chore/queues/sqs.rb +22 -13
  14. data/lib/chore/queues/sqs/consumer.rb +61 -33
  15. data/lib/chore/queues/sqs/publisher.rb +26 -17
  16. data/lib/chore/strategies/consumer/batcher.rb +6 -6
  17. data/lib/chore/strategies/consumer/single_consumer_strategy.rb +5 -5
  18. data/lib/chore/strategies/consumer/threaded_consumer_strategy.rb +6 -6
  19. data/lib/chore/strategies/consumer/throttled_consumer_strategy.rb +10 -11
  20. data/lib/chore/strategies/worker/helpers/ipc.rb +0 -1
  21. data/lib/chore/unit_of_work.rb +2 -1
  22. data/lib/chore/version.rb +3 -3
  23. data/lib/chore/worker.rb +4 -4
  24. data/spec/chore/consumer_spec.rb +1 -1
  25. data/spec/chore/queues/filesystem/filesystem_consumer_spec.rb +5 -7
  26. data/spec/chore/queues/sqs/consumer_spec.rb +117 -76
  27. data/spec/chore/queues/sqs/publisher_spec.rb +49 -60
  28. data/spec/chore/queues/sqs_spec.rb +32 -41
  29. data/spec/chore/strategies/consumer/single_consumer_strategy_spec.rb +3 -3
  30. data/spec/chore/strategies/consumer/threaded_consumer_strategy_spec.rb +6 -6
  31. data/spec/chore/strategies/worker/forked_worker_strategy_spec.rb +1 -1
  32. data/spec/chore/strategies/worker/single_worker_strategy_spec.rb +1 -1
  33. data/spec/chore/worker_spec.rb +21 -21
  34. data/spec/spec_helper.rb +1 -1
  35. data/spec/support/queues/sqs/fake_objects.rb +18 -0
  36. metadata +9 -13
data/chore-core.gemspec CHANGED
@@ -37,11 +37,10 @@ Gem::Specification.new do |s|
37
37
  s.summary = "Job processing... for the future!"
38
38
 
39
39
  s.add_runtime_dependency(%q<json>, [">= 0"])
40
- s.add_runtime_dependency(%q<aws-sdk-v1>, ["~> 1.56", ">= 1.56.0"])
40
+ s.add_runtime_dependency(%q<aws-sdk-sqs>, ["~> 1"])
41
41
  s.add_runtime_dependency(%q<thread>, ["~> 0.1.3"])
42
42
  s.add_runtime_dependency('get_process_mem', ["~> 0.2.0"])
43
- s.add_development_dependency(%q<rspec>, ["~> 3.3.0"])
43
+ s.add_development_dependency(%q<rspec>, ["~> 3.3"])
44
44
  s.add_development_dependency(%q<rdoc>, ["~> 3.12"])
45
45
  s.add_development_dependency(%q<bundler>, [">= 0"])
46
46
  end
47
-
data/lib/chore.rb CHANGED
@@ -63,6 +63,24 @@ module Chore #:nodoc:
63
63
  end
64
64
  end
65
65
 
66
+ def self.log_level_to_sym
67
+ return self.config[:log_level] if self.config[:log_level].is_a?(Symbol)
68
+ case self.config[:log_level]
69
+ when 0
70
+ :debug
71
+ when 1
72
+ :info
73
+ when 2
74
+ :warn
75
+ when 3
76
+ :error
77
+ when 4
78
+ :fatal
79
+ else
80
+ :unknown
81
+ end
82
+ end
83
+
66
84
  # Reopens any open files. This will match any logfile that was opened by Chore,
67
85
  # Rails, or any other library.
68
86
  def self.reopen_logs
@@ -218,6 +236,8 @@ module Chore #:nodoc:
218
236
  end
219
237
 
220
238
  # List of queue_names as configured via Chore::Job including their prefix, if set.
239
+ #
240
+ # @return [Array<String>]
221
241
  def self.prefixed_queue_names
222
242
  Chore::Job.job_classes.collect {|klass| c = constantize(klass); c.prefixed_queue_name}
223
243
  end
data/lib/chore/cli.rb CHANGED
@@ -92,8 +92,8 @@ module Chore #:nodoc:
92
92
  validate_strategy!
93
93
  end
94
94
 
95
-
96
95
  private
96
+
97
97
  def setup_options #:nodoc:
98
98
  register_option "queues", "-q", "--queues QUEUE1,QUEUE2", "Names of queues to process (default: all known)" do |arg|
99
99
  # This will remove duplicates. We ultimately force this to be a Set further below
@@ -289,4 +289,3 @@ module Chore #:nodoc:
289
289
  end
290
290
  end
291
291
  end
292
-
@@ -1,6 +1,6 @@
1
1
  module Chore
2
2
  # Wrapper around an OpenStruct to define configuration data
3
- # (TODO): Add required opts, and validate that they're set
3
+ # TODO: Add required opts, and validate that they're set
4
4
  class Configuration < OpenStruct
5
5
  # Helper method to make merging Hashes into OpenStructs easier
6
6
  def merge_hash(hsh={})
@@ -1,16 +1,17 @@
1
1
  module Chore
2
2
  # Raised when Chore is booting up, but encounters a set of configuration that is impossible to boot from. Typically
3
- # you'll find additional information around the cause of the exception by examining the logfiles
3
+ # you'll find additional information around the cause of the exception by examining the logfiles.
4
+ # You can raise this exception if your queue is in a terrible state and must shut down.
4
5
  class TerribleMistake < Exception
5
- # You can raise this exception if your queue is in a terrible state and must shut down
6
6
  end
7
7
 
8
- # Base class for a Chore Consumer. Provides the basic interface to adhere to for building custom
9
- # Chore Consumers.
8
+ # Base class for a Chore Consumer. Provides the interface that a Chore::Consumer implementation should adhere to.
10
9
  class Consumer
11
10
 
12
11
  attr_accessor :queue_name
13
12
 
13
+ # @param [String] queue_name Name of queue to be consumed from
14
+ # @param [Hash] opts
14
15
  def initialize(queue_name, opts={})
15
16
  @queue_name = queue_name
16
17
  @running = true
@@ -24,18 +25,25 @@ module Chore
24
25
  # Consume takes a block with an arity of two. The two params are
25
26
  # |message_id,message_body| where message_id is any object that the
26
27
  # consumer will need to be able to act on a message later (reject, complete, etc)
27
- def consume(&block)
28
+ #
29
+ # @param [Block] &handler Message handler, used by the calling context (worker) to create & assigns a UnitOfWork
30
+ def consume(&handler)
28
31
  raise NotImplementedError
29
32
  end
30
33
 
31
34
  # Reject should put a message back on a queue to be processed again later. It takes
32
35
  # a message_id as returned via consume.
36
+ #
37
+ # @param [String] message_id Unique ID of the message
33
38
  def reject(message_id)
34
39
  raise NotImplementedError
35
40
  end
36
41
 
37
- # Complete should mark a message as finished. It takes a message_id as returned via consume
38
- def complete(message_id)
42
+ # Complete should mark a message as finished.
43
+ #
44
+ # @param [String] message_id Unique ID of the message
45
+ # @param [Hash] receipt_handle Unique ID of the consuming transaction in non-filesystem implementations
46
+ def complete(message_id, receipt_handle)
39
47
  raise NotImplementedError
40
48
  end
41
49
 
@@ -45,23 +53,47 @@ module Chore
45
53
  end
46
54
 
47
55
  # Returns true if the Consumer is currently running
56
+ #
57
+ # @return [TrueClass, FalseClass]
48
58
  def running?
49
59
  @running
50
60
  end
51
61
 
52
- # returns up to n work
62
+ # Returns up to n work
63
+ #
64
+ # @param n
53
65
  def provide_work(n)
54
66
  raise NotImplementedError
55
67
  end
56
68
 
57
- # now, given an arbitrary key and klass, have we seen the key already?
69
+ # Determine whether or not we have already seen this message
70
+ #
71
+ # @param [String] dedupe_key
72
+ # @param [Class] klass
73
+ # @param [Integer] queue_timeout
74
+ #
75
+ # @return [TrueClass, FalseClass]
58
76
  def duplicate_message?(dedupe_key, klass, queue_timeout)
59
77
  dupe_detector.found_duplicate?(:id=>dedupe_key, :queue=>klass.to_s, :visibility_timeout=>queue_timeout)
60
78
  end
61
79
 
80
+ # Instance of duplicate detection implementation class
81
+ #
82
+ # @return [DuplicateDetector]
62
83
  def dupe_detector
63
84
  @dupes ||= DuplicateDetector.new({:servers => Chore.config.dedupe_servers,
64
85
  :dupe_on_cache_failure => false})
65
86
  end
87
+
88
+ private
89
+
90
+ # Gets messages from queue implementation and invokes the provided block over each one. Afterwards, the :on_fetch
91
+ # hook will be invoked per message. This block call provides data necessary for the worker (calling context) to
92
+ # populate a UnitOfWork struct.
93
+ #
94
+ # @param [Block] &handler Message handler, passed along by #consume
95
+ def handle_messages(&handler)
96
+ raise NotImplementedError
97
+ end
66
98
  end
67
99
  end
data/lib/chore/job.rb CHANGED
@@ -105,6 +105,8 @@ module Chore
105
105
  end
106
106
 
107
107
  # The name of the configured queue, combined with an optional prefix
108
+ #
109
+ # @return [String]
108
110
  def prefixed_queue_name
109
111
  "#{Chore.config.queue_prefix}#{self.options[:name]}"
110
112
  end
@@ -1,26 +1,42 @@
1
1
  module Chore
2
- # Base class for Chore Publishers. Provides the bare interface one needs to adhere to when writing custom publishers
2
+ # Base class for a Chore Publisher. Provides the interface that a Chore::Publisher implementation should adhere to.
3
3
  class Publisher
4
4
  DEFAULT_OPTIONS = { :encoder => Encoder::JsonEncoder }
5
5
 
6
6
  attr_accessor :options
7
7
 
8
+ # @param [Hash] opts
8
9
  def initialize(opts={})
9
10
  self.options = DEFAULT_OPTIONS.merge(opts)
10
11
  end
11
12
 
12
13
  # Publishes the provided +job+ to the queue identified by the +queue_name+. Not designed to be used directly, this
13
14
  # method ferries to the publish method on an instance of your configured Publisher.
15
+ #
16
+ # @param [String] queue_name Name of queue to be consumed from
17
+ # @param [Hash] job Job instance definition, will be encoded to JSON
14
18
  def self.publish(queue_name,job)
15
19
  self.new.publish(queue_name,job)
16
20
  end
17
21
 
18
- # Raises a NotImplementedError. This method should be overridden in your descendent, custom publisher class
22
+ # Publishes a message to queue
23
+ #
24
+ # @param [String] queue_name Name of the SQS queue
25
+ # @param [Hash] job Job instance definition, will be encoded to JSON
19
26
  def publish(queue_name,job)
20
27
  raise NotImplementedError
21
28
  end
29
+
30
+ # Sets a flag that instructs the publisher to reset the connection the next time it's used.
31
+ # Should be overriden in publishers (but is not required)
32
+ def self.reset_connection!
33
+ end
34
+
22
35
  protected
23
36
 
37
+ # Encodes the job class to format provided by endoder implementation
38
+ #
39
+ # @param [Any] job
24
40
  def encode_job(job)
25
41
  options[:encoder].encode(job)
26
42
  end
@@ -15,12 +15,12 @@ module Chore
15
15
  # Once complete job files are deleted.
16
16
  # If rejected they are moved back into new and will be processed again. This may not be the
17
17
  # desired behavior long term and we may want to add configuration to this class to allow more
18
- # creating failure handling and retrying.
18
+ # creating failure handling and retrying.
19
19
  class Consumer < Chore::Consumer
20
20
  extend FilesystemQueue
21
21
 
22
22
  Chore::CLI.register_option 'fs_queue_root', '--fs-queue-root DIRECTORY', 'Root directory for fs based queue'
23
-
23
+
24
24
  class << self
25
25
  # Cleans up expired in-progress files by making them new again.
26
26
  def cleanup(expiration_time, new_dir, in_progress_dir)
@@ -51,7 +51,7 @@ module Chore
51
51
 
52
52
  # If the file is non-zero, this means it was successfully written to
53
53
  # by a publisher and we can attempt to move it to "in progress".
54
- #
54
+ #
55
55
  # There is a small window of time where the file can be zero, but
56
56
  # the publisher hasn't finished writing to the file yet.
57
57
  if !File.zero?(from)
@@ -68,7 +68,7 @@ module Chore
68
68
  # The file is empty (zero bytes) and enough time has passed since
69
69
  # the file was written that we can safely assume it will never
70
70
  # get written to be the publisher.
71
- #
71
+ #
72
72
  # The scenario where this happens is when the publisher created
73
73
  # the file, but the process was killed before it had a chance to
74
74
  # actually write the data.
@@ -114,7 +114,7 @@ module Chore
114
114
 
115
115
  # The minimum number of seconds to allow to pass between checks for expired
116
116
  # jobs on the filesystem.
117
- #
117
+ #
118
118
  # Since queue times are measured on the order of seconds, 1 second is the
119
119
  # smallest duration. It also prevents us from burning a lot of CPU looking
120
120
  # at expired jobs when the consumer sleep interval is less than 1 second.
@@ -144,7 +144,7 @@ module Chore
144
144
  end
145
145
 
146
146
  found_files = false
147
- handle_jobs do |*args|
147
+ handle_messages do |*args|
148
148
  found_files = true
149
149
  yield(*args)
150
150
  end
@@ -161,9 +161,14 @@ module Chore
161
161
 
162
162
  end
163
163
 
164
- def complete(id)
165
- Chore.logger.debug "Completing (deleting): #{id}"
166
- File.delete(File.join(@in_progress_dir, id))
164
+ # Deletes the given message from filesystem queue. Since the filesystem is not a remote API, there is no
165
+ # notion of a "receipt handle".
166
+ #
167
+ # @param [String] message_id Unique ID of the message
168
+ # @param [Hash] receipt_handle Receipt handle of the message. Always nil for the filesystem consumer
169
+ def complete(message_id, receipt_handle = nil)
170
+ Chore.logger.debug "Completing (deleting): #{message_id}"
171
+ File.delete(File.join(@in_progress_dir, message_id))
167
172
  rescue Errno::ENOENT
168
173
  # The job took too long to complete, was deemed expired, and moved
169
174
  # back into "new". Ignore.
@@ -173,7 +178,7 @@ module Chore
173
178
 
174
179
  # finds all new job files, moves them to in progress and starts the job
175
180
  # Returns a list of the job files processed
176
- def handle_jobs(&block)
181
+ def handle_messages(&block)
177
182
  self.class.each_file(@new_dir, Chore.config.queue_polling_size) do |job_file|
178
183
  Chore.logger.debug "Found a new job #{job_file}"
179
184
 
@@ -186,8 +191,9 @@ module Chore
186
191
  job_json = File.read(in_progress_path)
187
192
  basename, previous_attempts, * = self.class.file_info(job_file)
188
193
 
189
- # job_file is just the name which is the job id
190
- block.call(job_file, queue_name, queue_timeout, job_json, previous_attempts)
194
+ # job_file is just the name which is the job id. 2nd argument (:receipt_handle) is nil because the
195
+ # filesystem is dealt with directly, as opposed to being an external API
196
+ block.call(job_file, nil, queue_name, queue_timeout, job_json, previous_attempts)
191
197
  Chore.run_hooks_for(:on_fetch, job_file, job_json)
192
198
  end
193
199
  end
@@ -203,4 +209,3 @@ module Chore
203
209
  end
204
210
  end
205
211
  end
206
-
@@ -11,7 +11,7 @@ module Chore
11
11
  include FilesystemQueue
12
12
 
13
13
  # use of mutex and file locking should make this both threadsafe and safe for multiple
14
- # processes to use the same queue directory simultaneously.
14
+ # processes to use the same queue directory simultaneously.
15
15
  def publish(queue_name,job)
16
16
  # First try encoding the job to avoid writing empty job files if this fails
17
17
  encoded_job = encode_job(job)
@@ -1,10 +1,18 @@
1
1
  module Chore
2
2
  module Queues
3
3
  module SQS
4
+ def self.sqs_client
5
+ Aws::SQS::Client.new(logger: Chore.logger, log_level: Chore.log_level_to_sym)
6
+ end
7
+
4
8
  # Helper method to create queues based on the currently known list as provided by your configured Chore::Jobs
5
9
  # This is meant to be invoked from a rake task, and not directly.
6
10
  # These queues will be created with the default settings, which may not be ideal.
7
11
  # This is meant only as a convenience helper for testing, and not as a way to create production quality queues in SQS
12
+ #
13
+ # @param [TrueClass, FalseClass] halt_on_existing Raise an exception if the queue already exists
14
+ #
15
+ # @return [Array<String>]
8
16
  def self.create_queues!(halt_on_existing=false)
9
17
  raise 'You must have atleast one Chore Job configured and loaded before attempting to create queues' unless Chore.prefixed_queue_names.length > 0
10
18
 
@@ -20,49 +28,50 @@ module Chore
20
28
  end
21
29
  end
22
30
 
23
- #This will raise an exception if AWS has not been configured by the project making use of Chore
24
- sqs_queues = AWS::SQS.new.queues
25
31
  Chore.prefixed_queue_names.each do |queue_name|
26
32
  Chore.logger.info "Chore Creating Queue: #{queue_name}"
27
33
  begin
28
- sqs_queues.create(queue_name)
29
- rescue AWS::SQS::Errors::QueueAlreadyExists
34
+ sqs_client.create_queue(queue_name: queue_name)
35
+ rescue Aws::SQS::Errors::QueueAlreadyExists
30
36
  Chore.logger.info "exists with different config"
31
37
  end
32
38
  end
39
+
33
40
  Chore.prefixed_queue_names
34
41
  end
35
42
 
36
43
  # Helper method to delete all known queues based on the list as provided by your configured Chore::Jobs
37
44
  # This is meant to be invoked from a rake task, and not directly.
45
+ #
46
+ # @return [Array<String>]
47
+
38
48
  def self.delete_queues!
39
49
  raise 'You must have atleast one Chore Job configured and loaded before attempting to create queues' unless Chore.prefixed_queue_names.length > 0
40
- #This will raise an exception if AWS has not been configured by the project making use of Chore
41
- sqs_queues = AWS::SQS.new.queues
50
+
42
51
  Chore.prefixed_queue_names.each do |queue_name|
43
52
  begin
44
53
  Chore.logger.info "Chore Deleting Queue: #{queue_name}"
45
- url = sqs_queues.url_for(queue_name)
46
- sqs_queues[url].delete
54
+ url = sqs_client.get_queue_url(queue_name: queue_name).queue_url
55
+ sqs_client.delete_queue(queue_url: url)
47
56
  rescue => e
48
57
  # This could fail for a few reasons - log out why it failed, then continue on
49
58
  Chore.logger.error "Deleting Queue: #{queue_name} failed because #{e}"
50
59
  end
51
60
  end
61
+
52
62
  Chore.prefixed_queue_names
53
63
  end
54
64
 
55
65
  # Collect a list of queues that already exist
66
+ #
67
+ # @return [Array<String>]
56
68
  def self.existing_queues
57
- #This will raise an exception if AWS has not been configured by the project making use of Chore
58
- sqs_queues = AWS::SQS.new.queues
59
-
60
69
  Chore.prefixed_queue_names.select do |queue_name|
61
70
  # If the NonExistentQueue exception is raised we do not care about that queue name.
62
71
  begin
63
- sqs_queues.named(queue_name)
72
+ sqs_client.get_queue_url(queue_name: queue_name)
64
73
  true
65
- rescue AWS::SQS::Errors::NonExistentQueue
74
+ rescue Aws::SQS::Errors::NonExistentQueue
66
75
  false
67
76
  end
68
77
  end
@@ -1,9 +1,6 @@
1
- require 'aws/sqs'
1
+ require 'aws-sdk-sqs'
2
2
  require 'chore/duplicate_detector'
3
3
 
4
- AWS.eager_autoload! AWS::Core
5
- AWS.eager_autoload! AWS::SQS
6
-
7
4
  module Chore
8
5
  module Queues
9
6
  module SQS
@@ -17,24 +14,29 @@ module Chore
17
14
  Chore::CLI.register_option 'aws_secret_key', '--aws-secret-key KEY', 'Valid AWS Secret Key'
18
15
  Chore::CLI.register_option 'dedupe_servers', '--dedupe-servers SERVERS', 'List of mememcache compatible server(s) to use for storing SQS Message Dedupe cache'
19
16
 
17
+ # @param [String] queue_name Name of SQS queue
18
+ # @param [Hash] opts Options
20
19
  def initialize(queue_name, opts={})
21
20
  super(queue_name, opts)
22
-
23
21
  raise Chore::TerribleMistake, "Cannot specify a queue polling size greater than 10" if sqs_polling_amount > 10
24
22
  end
25
23
 
26
- # Sets a flag that instructs the publisher to reset the connection the next time it's used
24
+ # Resets the API client connection and provides @@reset_at so we know when the last time that was done
27
25
  def self.reset_connection!
28
26
  @@reset_at = Time.now
29
27
  end
30
28
 
31
29
  # Begins requesting messages from SQS, which will invoke the +&handler+ over each message
30
+ #
31
+ # @param [Block] &handler Message handler, used by the calling context (worker) to create & assigns a UnitOfWork
32
+ #
33
+ # @return [Array<Aws::SQS::Message>]
32
34
  def consume(&handler)
33
35
  while running?
34
36
  begin
35
37
  messages = handle_messages(&handler)
36
38
  sleep (Chore.config.consumer_sleep_interval) if messages.empty?
37
- rescue AWS::SQS::Errors::NonExistentQueue => e
39
+ rescue Aws::SQS::Errors::NonExistentQueue => e
38
40
  Chore.logger.error "You specified a queue '#{queue_name}' that does not exist. You must create the queue before starting Chore. Shutting down..."
39
41
  raise Chore::TerribleMistake
40
42
  rescue => e
@@ -43,21 +45,34 @@ module Chore
43
45
  end
44
46
  end
45
47
 
46
- # Rejects the given message from SQS by +id+. Currently a noop
47
- def reject(id)
48
-
48
+ # Unimplemented. Rejects the given message from SQS.
49
+ #
50
+ # @param [String] message_id Unique ID of the SQS message
51
+ #
52
+ # @return nil
53
+ def reject(message_id)
49
54
  end
50
55
 
51
- # Deletes the given message from SQS by +id+
52
- def complete(id)
53
- Chore.logger.debug "Completing (deleting): #{id}"
54
- queue.batch_delete([id])
56
+ # Deletes the given message from the SQS queue
57
+ #
58
+ # @param [String] message_id Unique ID of the SQS message
59
+ # @param [Hash] receipt_handle Receipt handle (unique per consume request) of the SQS message
60
+ def complete(message_id, receipt_handle)
61
+ Chore.logger.debug "Completing (deleting): #{message_id}"
62
+ queue.delete_messages(entries: [{ id: message_id, receipt_handle: receipt_handle }])
55
63
  end
56
64
 
65
+ # Delays retry of a job by +backoff_calc+ seconds.
66
+ #
67
+ # @param [UnitOfWork] item Item to be delayed
68
+ # @param [Block] backoff_calc Code that determines the backoff.
57
69
  def delay(item, backoff_calc)
58
70
  delay = backoff_calc.call(item)
59
71
  Chore.logger.debug "Delaying #{item.id} by #{delay} seconds"
60
- queue.batch_change_visibility(delay, [item.id])
72
+
73
+ queue.change_message_visibility_batch(entries: [
74
+ { id: item.id, receipt_handle: item.receipt_handle, visibility_timeout: delay },
75
+ ])
61
76
 
62
77
  return delay
63
78
  end
@@ -66,46 +81,59 @@ module Chore
66
81
 
67
82
  # Requests messages from SQS, and invokes the provided +&block+ over each one. Afterwards, the :on_fetch
68
83
  # hook will be invoked, per message
84
+ #
85
+ # @param [Block] &handler Message handler, passed along by #consume
86
+ #
87
+ # @return [Array<Aws::SQS::Message>]
69
88
  def handle_messages(&block)
70
- msg = queue.receive_messages(:limit => sqs_polling_amount, :attributes => [:receive_count])
89
+ msg = queue.receive_messages(:max_number_of_messages => sqs_polling_amount, :attribute_names => ['ApproximateReceiveCount'])
71
90
  messages = *msg
91
+
72
92
  messages.each do |message|
73
- unless duplicate_message?(message.id, message.queue.url, queue_timeout)
74
- block.call(message.handle, queue_name, queue_timeout, message.body, message.receive_count - 1)
93
+ unless duplicate_message?(message.message_id, message.queue_url, queue_timeout)
94
+ block.call(message.message_id, message.receipt_handle, queue_name, queue_timeout, message.body, message.attributes['ApproximateReceiveCount'].to_i - 1)
75
95
  end
76
- Chore.run_hooks_for(:on_fetch, message.handle, message.body)
96
+ Chore.run_hooks_for(:on_fetch, message.receipt_handle, message.body)
77
97
  end
98
+
78
99
  messages
79
100
  end
80
101
 
81
- # Retrieves the SQS queue with the given +name+. The method will cache the results to prevent round trips on
82
- # subsequent calls. If <tt>reset_connection!</tt> has been called, this will result in the connection being
83
- # re-initialized, as well as clear any cached results from prior calls
102
+ # Retrieves the SQS queue object. The method will cache the results to prevent round trips on subsequent calls
103
+ #
104
+ # If <tt>reset_connection!</tt> has been called, this will result in the connection being re-initialized,
105
+ # as well as clear any cached results from prior calls
106
+ #
107
+ # @return [Aws::SQS::Queue]
84
108
  def queue
85
109
  if !@sqs_last_connected || (@@reset_at && @@reset_at >= @sqs_last_connected)
86
- AWS::Core::Http::ConnectionPool.pools.each do |p|
87
- p.empty!
88
- end
110
+ Aws.empty_connection_pools!
89
111
  @sqs = nil
90
112
  @sqs_last_connected = Time.now
91
113
  @queue = nil
92
114
  end
93
- @queue_url ||= sqs.queues.url_for(@queue_name)
94
- @queue ||= sqs.queues[@queue_url]
115
+
116
+ @queue_url ||= sqs.get_queue_url(queue_name: @queue_name).queue_url
117
+ @queue ||= Aws::SQS::Queue.new(url: @queue_url, client: sqs)
95
118
  end
96
119
 
97
- # The visibility timeout of the queue for this consumer
120
+ # The visibility timeout (in seconds) of the queue
121
+ #
122
+ # @return [Integer]
98
123
  def queue_timeout
99
- @queue_timeout ||= queue.visibility_timeout
124
+ @queue_timeout ||= queue.attributes['VisibilityTimeout'].to_i
100
125
  end
101
126
 
102
- # Access to the configured SQS connection object
127
+ # SQS API client object
128
+ #
129
+ # @return [Aws::SQS::Client]
103
130
  def sqs
104
- @sqs ||= AWS::SQS.new(
105
- :access_key_id => Chore.config.aws_access_key,
106
- :secret_access_key => Chore.config.aws_secret_key)
131
+ @sqs ||= Chore::Queues::SQS.sqs_client
107
132
  end
108
133
 
134
+ # Maximum number of messages to retrieve on each request
135
+ #
136
+ # @return [Integer]
109
137
  def sqs_polling_amount
110
138
  Chore.config.queue_polling_size
111
139
  end