tom_queue 0.0.1.dev

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,136 @@
1
+ require 'active_support/concern'
2
+
3
+ module TomQueue
4
+
5
+ # Public: This module can be mixed into a class to make that class
6
+ # a consumer of messages to an AMQP exchange. Mixing in this class
7
+ # provides the necessary methods, but you need to configure which
8
+ # exchange the messages get pulled from.
9
+ #
10
+ # Example:
11
+ #
12
+ # class MyAwesomeConsumer
13
+ #
14
+ # include TomQueue::ExternalConsumer
15
+ # bind_exchange(:fanout, 'exchange_name') do |work|
16
+ # .. do something with a work (a TomQueue::Work object) ..
17
+ # end
18
+ #
19
+ # end
20
+ #
21
+ # Then you just need to register the consumer with TomQueue for it to receive
22
+ # messages:
23
+ #
24
+ # TomQueue.consumers << MyAwesomeConsumer
25
+ #
26
+ # In addition to receiving messages, this mixin also adds a producer accessor
27
+ # which returns an object that can be used to publish a message to the appropriate
28
+ # exchange. Example:
29
+ #
30
+ # MyAwesomeConsumer.publisher.publish("my message here")
31
+ #
32
+ # which will pass the message to the consumer block
33
+ #
34
+ # Block behaviour
35
+ # ---------------
36
+ #
37
+ # You should do the minimum work necessary in the bind_exchange block ideally just
38
+ # creating a delayed job object to do the actual work of reacting to the message.
39
+ #
40
+ # If you return a Delayed::Job record to the block caller, then the worker will immediately
41
+ # perform that job. Also, if you omit the block entirely, there is a default block provided
42
+ # that carries out the following:
43
+ #
44
+ # class MyAwesomeConsumer
45
+ # bind_exchange(...) do |work|
46
+ # new(work.payload, work.headers).delay.perform
47
+ # end
48
+ #
49
+ # def initialize(payload, headers)
50
+ # ...
51
+ # end
52
+ #
53
+ # def perform
54
+ # ... do something! ...
55
+ # end
56
+ # end
57
+ #
58
+ # This returns a Delayed::Job instance (as per the behaviour of the .delay method) which is then
59
+ # immediately called. If your block looks like the above, then you can omit it entirely!
60
+ #
61
+ module ExternalConsumer
62
+
63
+ # This class is the producer that is
64
+ class Producer
65
+ def initialize(type, name, opts={}, *args)
66
+ @type, @name, @opts = type, name, opts
67
+ @encoder = opts.fetch(:encoder, nil)
68
+ end
69
+
70
+ # Public: Push a message to the AMQP exchange associated with this consumer
71
+ def publish(message)
72
+ message = @encoder.encode(message) if @encoder
73
+ routing_key = @opts.fetch(:routing_key, nil)
74
+ auto_delete = @opts.fetch(:auto_delete, false)
75
+ durable = @opts.fetch(:durable, true)
76
+
77
+ Delayed::Job.tomqueue_manager.channel.exchange(@name, :type => @type, :auto_delete => auto_delete, :durable => durable).publish(message, :routing_key => routing_key)
78
+ end
79
+ end
80
+
81
+ extend ActiveSupport::Concern
82
+
83
+ module ClassMethods
84
+ # Public: Binds this consumer to an AMQP exchange.
85
+ #
86
+ # type - the type of exchange, from :direct, :fanout, :topic or :headers
87
+ # name - the name of the exchange
88
+ # opts - some options:
89
+ # :priority = a TomQueue priority constant (defaults to TomQueue::NORMAL_PRIORITY)
90
+ # :durable = should this exchange be durable (defaults to true)
91
+ # &block - called when a message is received
92
+ #
93
+ def bind_exchange(type, name, opts={}, &block)
94
+ encoder = opts.fetch(:encoder, nil)
95
+ block ||= lambda do |work|
96
+ payload = if encoder
97
+ encoder.decode(work.payload)
98
+ else
99
+ work.payload
100
+ end
101
+ new(payload, work.headers).delay.perform
102
+ end
103
+ @bind_exchange = [type, name, opts, block]
104
+ end
105
+
106
+ # Public: Create and return a producer for the consumer
107
+ #
108
+ # Returns TomQueue::ExternalConsumer::Producer object
109
+ def producer
110
+ TomQueue::ExternalConsumer::Producer.new(*@bind_exchange)
111
+ end
112
+
113
+ def claim_work?(work)
114
+ type, name, opts, block = @bind_exchange
115
+
116
+ (work.response.exchange == @bind_exchange[1]) ? @bind_exchange.last : false
117
+ end
118
+
119
+ def setup_binding(manager)
120
+ type, name, opts, block = @bind_exchange
121
+ encoder = opts.fetch(:encoder, nil)
122
+ priority = opts.fetch(:priority, TomQueue::NORMAL_PRIORITY)
123
+ routing_key = opts.fetch(:routing_key, nil)
124
+ auto_delete = opts.fetch(:auto_delete, false)
125
+ durable = opts.fetch(:durable, true)
126
+
127
+ # make sure the exchange is declared
128
+ manager.channel.exchange(name, :type => type, :auto_delete => auto_delete, :durable => durable)
129
+ manager.queues[priority].bind(name, :routing_key => routing_key)
130
+ end
131
+ end
132
+
133
+ end
134
+ end
135
+
136
+
@@ -0,0 +1,19 @@
1
+ module TomQueue
2
+
3
+ module LoggingHelper
4
+ def self.included(base)
5
+ base.extend(TomQueue::LoggingHelper)
6
+ end
7
+
8
+ [:debug, :info, :warn, :error].each do |level|
9
+ eval <<-RUBY
10
+ def #{level}(message=nil, &block)
11
+ if TomQueue.logger && TomQueue.logger.#{level}?
12
+ message ||= yield if block_given?
13
+ TomQueue.logger.#{level}(message)
14
+ end
15
+ end
16
+ RUBY
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,264 @@
1
+ require 'bunny'
2
+ module TomQueue
3
+
4
+
5
+ # Public: Priority values for QueueManager#publish
6
+ #
7
+ # Rather than an arbitrary numeric scale, we use distinct
8
+ # priority values, one should be selected depending on the
9
+ # type and use-case of the work.
10
+ #
11
+ # The scheduler simply trumps lower-priority jobs with higher
12
+ # priority jobs. So ensure you don't saturate the worker with many
13
+ # or lengthy high priority jobs as you'll negatively impact normal
14
+ # and bulk jobs.
15
+ #
16
+ # HIGH_PRIORITY - use where the job is relatively short and the
17
+ # user is waiting on completion. For example sending a password
18
+ # reset email.
19
+ #
20
+ # NORMAL_PRIORITY - use for longer-interactive tasks (rebuilding ledgers?)
21
+ #
22
+ # BULK_PRIORITY - typically when you want to schedule lots of work to be done
23
+ # at some point in the future - background emailing, cron-triggered
24
+ # syncs, etc.
25
+ #
26
+ HIGH_PRIORITY = "high"
27
+ NORMAL_PRIORITY = "normal"
28
+ LOW_PRIORITY = "low"
29
+ BULK_PRIORITY = "bulk"
30
+
31
+ # Internal: A list of all the known priority values
32
+ #
33
+ # This array is where the priority ordering comes from, so get the
34
+ # order right!
35
+ PRIORITIES = [HIGH_PRIORITY, NORMAL_PRIORITY, LOW_PRIORITY, BULK_PRIORITY].freeze
36
+ DEFAULT_PRIORITY = LOW_PRIORITY
37
+
38
+ # Public: This is your interface to pushing work onto and
39
+ # pulling work off the work queue. Instantiate one of these
40
+ # and, if you're planning on operating as a consumer, then call
41
+ # become_consumer!
42
+ #
43
+ class QueueManager
44
+
45
+ include LoggingHelper
46
+
47
+ # Public: Return the string used as a prefix for all queues and exchanges
48
+ attr_reader :prefix
49
+
50
+ # Public: Returns the instance of Bunny that this object uses
51
+ attr_reader :bunny
52
+
53
+ # Internal: The work queues used by consumers
54
+ #
55
+ # Internal, this is an implementation detail. Accessor is mainly for
56
+ # convenient testing
57
+ #
58
+ # Returns a hash of { "priority" => <Bunny::Queue>, ... }
59
+ attr_reader :queues
60
+
61
+ # Internal: The exchange to which work is published
62
+ #
63
+ # Internal, this is an implementation detail. Accessor is mainly for
64
+ # convenient testing
65
+ #
66
+ # Returns Bunny::Exchange instance.
67
+ attr_reader :exchange
68
+
69
+ class PersistentWorkPool < ::Bunny::ConsumerWorkPool
70
+ def kill
71
+ end
72
+ end
73
+
74
+ attr_reader :channel
75
+
76
+ # Public: Create the manager.
77
+ #
78
+ # name - used as a prefix for AMQP exchanges and queues.
79
+ # (this will default to TomQueue.default_prefix if set)
80
+ #
81
+ # NOTE: All consumers and producers sharing work must have the same
82
+ # prefix value.
83
+ #
84
+ # Returns an instance, duh!
85
+ def initialize(prefix = nil, ident=nil)
86
+ @ident = ident
87
+ @bunny = TomQueue.bunny
88
+ @prefix = prefix || TomQueue.default_prefix || raise(ArgumentError, 'prefix is required')
89
+
90
+ # We create our on work pool so we don't continually create and
91
+ # destroy threads. This pool ignores the kill commands issued by
92
+ # the channels, so stays running, and is shared by all channels.
93
+ @work_pool = PersistentWorkPool.new(4)
94
+
95
+ # These are used when we block waiting for new messages, we declare here
96
+ # so we're not constantly blowing them away and re-creating.
97
+ @mutex = Mutex.new
98
+ @condvar = ConditionVariable.new
99
+
100
+ # Call the initial setup_amqp! to create the channels, exchanges and queues
101
+ setup_amqp!
102
+ end
103
+
104
+ # Internal: Opens channels and declares the necessary queues, exchanges and bindings
105
+ #
106
+ # As a convenience to tests, this will tear-down any existing connections, so it is
107
+ # possible to simulate a failed connection by calling this a second time.
108
+ #
109
+ # Retunrs nil
110
+ def setup_amqp!
111
+ debug "[setup_amqp!] (re) openining channels"
112
+ # Test convenience
113
+ @publisher_channel && @publisher_channel.close
114
+ @channel && @channel.close
115
+
116
+ # Publishing is going to come in from the host app, as well as
117
+ # the Deferred thread, so create a dedicated channel and mutex
118
+ @publisher_channel = Bunny::Channel.new(@bunny, nil, @work_pool)
119
+ @publisher_channel.open
120
+ @publisher_mutex = Mutex.new
121
+
122
+ @channel = Bunny::Channel.new(@bunny, nil, @work_pool)
123
+ @channel.open
124
+ @channel.prefetch(1)
125
+
126
+ @queues = {}
127
+
128
+ @exchange = @channel.topic("#{@prefix}.work", :durable => true, :auto_delete => false)
129
+
130
+ PRIORITIES.each do |priority|
131
+ @queues[priority] = @channel.queue("#{@prefix}.balance.#{priority}", :durable => true)
132
+ @queues[priority].bind(@exchange, :routing_key => priority)
133
+ end
134
+
135
+ nil
136
+ end
137
+
138
+ # Public: Publish some work to the queue
139
+ #
140
+ # work - a serialized string representing the work
141
+ # options - a hash of options, with keys:
142
+ # :priority = (default: NORMAL_PRIORITY) a priority constant from above
143
+ # :run_at = (default: immediate) defer execution of this work for a given time
144
+ #
145
+ # Raises an ArgumentError unless the work is a string
146
+ # Returns nil
147
+ def publish(work, opts={})
148
+ priority = opts.fetch('priority', opts.fetch(:priority, NORMAL_PRIORITY))
149
+ run_at = opts.fetch('run_at', opts.fetch(:run_at, Time.now))
150
+
151
+ raise ArgumentError, 'work must be a string' unless work.is_a?(String)
152
+ raise ArgumentError, 'unknown priority level' unless PRIORITIES.include?(priority)
153
+ raise ArgumentError, ':run_at must be a Time object if specified' unless run_at.nil? or run_at.is_a?(Time)
154
+
155
+ if run_at > Time.now
156
+
157
+ debug "[publish] Handing work to deferred work manager to be run in #{run_at - Time.now}"
158
+
159
+ #  Make sure we explicitly pass all options in, even if they're the defaulted values
160
+ DeferredWorkManager.instance(self.prefix).handle_deferred(work, {
161
+ :priority => priority,
162
+ :run_at => run_at
163
+ })
164
+ else
165
+
166
+ debug "[publish] Pushing work onto exchange '#{@exchange.name}' with routing key '#{priority}'"
167
+ @publisher_mutex.synchronize do
168
+ @publisher_channel.direct(@exchange.name, :passive=>true).publish(work, {
169
+ :routing_key => priority,
170
+ :headers => {
171
+ :job_priority => priority,
172
+ :run_at => run_at.iso8601(4)
173
+ }
174
+ })
175
+ end
176
+ end
177
+ nil
178
+ end
179
+
180
+ # Public: Acknowledge some work
181
+ #
182
+ # work - the TomQueue::Work object to acknowledge
183
+ #
184
+ # Returns the work object passed.
185
+ def ack(work)
186
+ @channel.ack(work.response.delivery_tag)
187
+ work
188
+ end
189
+
190
+ # Public: Pop some work off the queue
191
+ #
192
+ # This call will block, if necessary, until work becomes available.
193
+ #
194
+ # Returns QueueManager::Work instance
195
+ def pop(opts={})
196
+ DeferredWorkManager.instance(self.prefix).ensure_running
197
+
198
+ work = sync_poll_queues
199
+ work ||= wait_for_message
200
+ work
201
+ end
202
+
203
+ # Internal: Synchronously poll priority queues in order
204
+ #
205
+ # Returns: highest priority TomQueue::Work instance; or
206
+ # nil if no work is queued.
207
+ def sync_poll_queues
208
+ debug "[pop] Synchronously popping message"
209
+
210
+ response, headers, payload = nil
211
+
212
+ # Synchronously poll the head of all the queues in priority order
213
+ PRIORITIES.find do |priority|
214
+ debug "[pop] Popping '#{@queues[priority].name}'..."
215
+ # Perform a basic get. Calling Queue#get gets into a mess wrt the subscribe
216
+ # below. Don't do it.
217
+ response, headers, payload = @channel.basic_get(@queues[priority].name, :ack => true)
218
+
219
+ # Array#find will break out of the loop if we return a non-nil value.
220
+ payload
221
+ end
222
+
223
+ payload && Work.new(self, response, headers, payload)
224
+ end
225
+
226
+ # Internal: Setup a consumer and block, waiting for the first message to arrive
227
+ # on any of the priority queues.
228
+ #
229
+ # Returns: TomQueue::Work instance
230
+ def wait_for_message
231
+
232
+ debug "[wait_for_message] setting up consumer, waiting for next message"
233
+
234
+ consumer_thread_value = nil
235
+
236
+ # Setup a subscription to all the queues. The channel pre-fetch
237
+ # will ensure we get exactly one message delivered
238
+ consumers = PRIORITIES.map do |priority|
239
+ @queues[priority].subscribe(:ack => true) do |*args|
240
+ @mutex.synchronize do
241
+ consumer_thread_value = args
242
+ @condvar.signal
243
+ end
244
+ end
245
+ end
246
+
247
+ # Back on the calling thread, block on the callback above and, when
248
+ # it's signalled, pull the arguments over to this thread inside the mutex
249
+ response, header, payload = @mutex.synchronize do
250
+ @condvar.wait(@mutex, 10.0) until consumer_thread_value
251
+ consumer_thread_value
252
+ end
253
+
254
+ debug "[wait_for_message] Shutting down consumers"
255
+
256
+ # Now, cancel the consumers - the prefetch level on the channel will
257
+ # ensure we only got the message we're about to return.
258
+ consumers.each { |c| c.cancel }
259
+
260
+ # Return the message we got passed.
261
+ TomQueue::Work.new(self, response, header, payload)
262
+ end
263
+ end
264
+ end
@@ -0,0 +1,69 @@
1
+ # Add a binary search method to Range.
2
+ #
3
+ # The search method has the same interface as Ruby 2.0's Range#bsearch method
4
+ # but a different name, so it should be easy to adopt 2.0's version.
5
+ #
6
+ class Range
7
+
8
+ def tomqueue_binary_search
9
+ return nil if min == nil
10
+
11
+ low = min
12
+ high = max
13
+
14
+ while (low <= high)
15
+ mid = (high + low) >> 1
16
+
17
+ output = yield mid
18
+ if output == 0
19
+ return mid
20
+
21
+ elsif output < 0
22
+ if low == high || low+1 == high
23
+ return low
24
+ else
25
+ high = mid - 1
26
+ end
27
+ elsif output > 0
28
+ if mid == high
29
+ return high + 1
30
+ else
31
+ low = mid + 1
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end
37
+
38
+
39
+ module TomQueue
40
+
41
+ # Internal A sorted array is one in which all the elements remain sorted
42
+ #
43
+ # On insertion, a binary search of the existing elements is carried out in order to find the
44
+ # correct location for the new element.
45
+ #
46
+ # NOTE: This thread is /NOT/ thread safe, so it is up to the caller to ensure that concurrent
47
+ # access is correctly synchronized.
48
+ #
49
+ # NOTE: You must also use the << method to add elements, otherwise this array isn't guaranteed to be
50
+ # sorted!
51
+ #
52
+ class SortedArray < ::Array
53
+
54
+
55
+ # Public: Add an element to the array.
56
+ #
57
+ # This will insert the element into the array in the correct place
58
+ #
59
+ # Returns self so this method can be chained
60
+ def <<(element)
61
+ pos = (0...self.length).tomqueue_binary_search do |index|
62
+ element <=> self[index]
63
+ end
64
+ pos ||= 0 # this is for the empty array
65
+
66
+ self.insert(pos, element)
67
+ end
68
+ end
69
+ end