beetle 0.4.2 → 0.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/RELEASE_NOTES.rdoc +8 -0
- data/Rakefile +1 -0
- data/examples/consume_many_messages_and_shutdown_randomly.rb +56 -0
- data/examples/publish_many_messages.rb +23 -0
- data/lib/beetle/client.rb +9 -8
- data/lib/beetle/deduplication_store.rb +12 -0
- data/lib/beetle/message.rb +30 -24
- data/lib/beetle/subscriber.rb +48 -23
- data/lib/beetle/version.rb +1 -1
- data/test/beetle/subscriber_test.rb +45 -24
- metadata +5 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 0e984630440947f81643287a32934138fcdaee58
|
4
|
+
data.tar.gz: 421b42f6d56a837ba9e2a05a0d0f0087f8d5a737
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c91106551e7e66cf01cdf614130568abe45dd490bcf51c8e9669214c20af29d91a883c545fea13ae3555a2e8a5c5609784d7b7bbb7e01db733a4c06eefdee91f
|
7
|
+
data.tar.gz: 588b1921ed5a473953572fcf542c62a6a5344700eef6c50d821ee0430a3e38576ee740e0c3330295ff011a887aa63eebd7c56cc056f01a011a502cd4cd2adf46
|
data/RELEASE_NOTES.rdoc
CHANGED
@@ -1,5 +1,13 @@
|
|
1
1
|
= Release Notes
|
2
2
|
|
3
|
+
== Version 0.4.3
|
4
|
+
* fixed a race condition which could lead to duplicate message processing
|
5
|
+
* fixed eventmachine shutdown sequence problem, which led to ACKs
|
6
|
+
occasionally being lost due to writing to a closed socket, which in
|
7
|
+
turn caused messages to be processed twice
|
8
|
+
* stop_listening now always triggers the subscribe shutdown sequence
|
9
|
+
via a eventmachine timer callback, if the eventmachine reactor is running
|
10
|
+
|
3
11
|
== Version 0.4.2
|
4
12
|
* Fail hard on missing master file
|
5
13
|
* Set message timestamp header
|
data/Rakefile
CHANGED
@@ -0,0 +1,56 @@
|
|
1
|
+
# consume_many_messages_and_shutdown_randomly.rb
|
2
|
+
# this example excercises the shutdown sequence and tests whether
|
3
|
+
# messages are handled more than once due to the shutdown
|
4
|
+
#
|
5
|
+
# ! check the examples/README.rdoc for information on starting your redis/rabbit !
|
6
|
+
#
|
7
|
+
# use it like so:
|
8
|
+
#
|
9
|
+
# while ruby consume_many_messages_and_shutdown_randomly.rb; do echo "no duplicate found yet"; done
|
10
|
+
#
|
11
|
+
# if the loop stops, a duplicate has been found.
|
12
|
+
# you can stop this process by sending an interrupt signal
|
13
|
+
|
14
|
+
trap("INT"){ puts "ignoring interrupt, please wait" }
|
15
|
+
|
16
|
+
require "rubygems"
|
17
|
+
require File.expand_path("../lib/beetle", File.dirname(__FILE__))
|
18
|
+
|
19
|
+
# set Beetle log level to info, less noisy than debug
|
20
|
+
Beetle.config.logger.level = Logger::INFO
|
21
|
+
|
22
|
+
# setup client
|
23
|
+
client = Beetle::Client.new
|
24
|
+
client.register_queue(:test)
|
25
|
+
client.register_message(:test)
|
26
|
+
|
27
|
+
# create a redis instance with a different database
|
28
|
+
redis = Redis.new(:db => 7)
|
29
|
+
|
30
|
+
exit_code = 0
|
31
|
+
|
32
|
+
# register our handler to the message, check out the message.rb for more stuff you can get from the message object
|
33
|
+
client.register_handler(:test) do |message|
|
34
|
+
uuid = message.uuid
|
35
|
+
if redis.incr(uuid) > 1
|
36
|
+
exit_code = 1
|
37
|
+
puts "\n\nRECEIVED A MESSAGE twice: #{uuid}\n\n"
|
38
|
+
client.stop_listening
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
# start listening
|
43
|
+
# this starts the event machine event loop using EM.run
|
44
|
+
# the block passed to listen will be yielded as the last step of the setup process
|
45
|
+
client.listen do
|
46
|
+
trap("TERM"){ client.stop_listening }
|
47
|
+
trap("INT"){ exit_code = 1; client.stop_listening }
|
48
|
+
# start a thread which randomly kills us
|
49
|
+
Thread.new do
|
50
|
+
sleep(2 + rand)
|
51
|
+
Process.kill("TERM", $$)
|
52
|
+
end
|
53
|
+
puts "trying to detect duplicates"
|
54
|
+
end
|
55
|
+
|
56
|
+
exit exit_code
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# publish_many_messages.rb
|
2
|
+
# this script pbulishes ARGV[0] small test messages (or 100000 if no argument is provided)
|
3
|
+
#
|
4
|
+
# ! check the examples/README.rdoc for information on starting your redis/rabbit !
|
5
|
+
#
|
6
|
+
# start it with ruby publish_many_messages.rb 1000000
|
7
|
+
|
8
|
+
require "rubygems"
|
9
|
+
require File.expand_path("../lib/beetle", File.dirname(__FILE__))
|
10
|
+
|
11
|
+
# set Beetle log level to info, less noisy than debug
|
12
|
+
Beetle.config.logger.level = Logger::INFO
|
13
|
+
|
14
|
+
# setup client
|
15
|
+
client = Beetle::Client.new
|
16
|
+
client.register_queue(:test)
|
17
|
+
client.register_message(:test)
|
18
|
+
|
19
|
+
# publish a lot of identical messages
|
20
|
+
n = (ARGV[0] || 100000).to_i
|
21
|
+
n.times{ client.publish(:test, 'x') }
|
22
|
+
|
23
|
+
puts "published #{n} messages"
|
data/lib/beetle/client.rb
CHANGED
@@ -11,9 +11,9 @@ module Beetle
|
|
11
11
|
# On the publisher side, publishing a message will ensure that the exchange it will be
|
12
12
|
# sent to, and each of the queues bound to the exchange, will be created on demand. On
|
13
13
|
# the subscriber side, exchanges, queues, bindings and queue subscriptions will be
|
14
|
-
# created when the application calls the
|
15
|
-
# subscribe to only a subset of the configured queues by passing a list of queue
|
16
|
-
# to the listen method.
|
14
|
+
# created when the application calls the listen_queues method. An application can decide
|
15
|
+
# to subscribe to only a subset of the configured queues by passing a list of queue
|
16
|
+
# names to the listen method.
|
17
17
|
#
|
18
18
|
# The net effect of this strategy is that producers and consumers can be started in any
|
19
19
|
# order, so that no message is lost if message producers are accidentally started before
|
@@ -216,14 +216,15 @@ module Beetle
|
|
216
216
|
subscriber.listen_queues(queues, &block)
|
217
217
|
end
|
218
218
|
|
219
|
-
# stops the
|
219
|
+
# stops the subscriber by closing all channels and connections. note this an
|
220
|
+
# asynchronous operation due to the underlying eventmachine mechanism.
|
220
221
|
def stop_listening
|
221
|
-
subscriber.stop!
|
222
|
+
@subscriber.stop! if @subscriber
|
222
223
|
end
|
223
224
|
|
224
225
|
# disconnects the publisher from all servers it's currently connected to
|
225
226
|
def stop_publishing
|
226
|
-
publisher.stop
|
227
|
+
@publisher.stop if @publisher
|
227
228
|
end
|
228
229
|
|
229
230
|
# pause listening on a list of queues
|
@@ -267,8 +268,8 @@ module Beetle
|
|
267
268
|
end
|
268
269
|
|
269
270
|
def reset
|
270
|
-
stop_publishing
|
271
|
-
stop_listening
|
271
|
+
stop_publishing
|
272
|
+
stop_listening
|
272
273
|
config.reload
|
273
274
|
load_brokers_from_config
|
274
275
|
rescue Exception => e
|
@@ -124,6 +124,12 @@ module Beetle
|
|
124
124
|
with_failover { redis.setnx(key(msg_id, suffix), value) }
|
125
125
|
end
|
126
126
|
|
127
|
+
# store some key/value pairs
|
128
|
+
def mset(msg_id, values)
|
129
|
+
values = values.inject([]){|a,(k,v)| a.concat([key(msg_id, k), v])}
|
130
|
+
with_failover { redis.mset(*values) }
|
131
|
+
end
|
132
|
+
|
127
133
|
# store some key/value pairs if none of the given keys exist.
|
128
134
|
def msetnx(msg_id, values)
|
129
135
|
values = values.inject([]){|a,(k,v)| a.concat([key(msg_id, k), v])}
|
@@ -140,6 +146,12 @@ module Beetle
|
|
140
146
|
with_failover { redis.get(key(msg_id, suffix)) }
|
141
147
|
end
|
142
148
|
|
149
|
+
# retrieve the values with given <tt>suffixes</tt> for given <tt>msg_id</tt>. returns a list of strings.
|
150
|
+
def mget(msg_id, keys)
|
151
|
+
keys = keys.map{|suffix| key(msg_id, suffix)}
|
152
|
+
with_failover { redis.mget(*keys) }
|
153
|
+
end
|
154
|
+
|
143
155
|
# delete key with given <tt>suffix</tt> for given <tt>msg_id</tt>.
|
144
156
|
def del(msg_id, suffix)
|
145
157
|
with_failover { redis.del(key(msg_id, suffix)) }
|
data/lib/beetle/message.rb
CHANGED
@@ -172,8 +172,7 @@ module Beetle
|
|
172
172
|
|
173
173
|
# mark message handling complete in the deduplication store
|
174
174
|
def completed!
|
175
|
-
@store.
|
176
|
-
timed_out!
|
175
|
+
@store.mset(msg_id, :status => "completed", :timeout => 0)
|
177
176
|
end
|
178
177
|
|
179
178
|
# whether we should wait before running the handler
|
@@ -237,6 +236,10 @@ module Beetle
|
|
237
236
|
logger.debug "Beetle: deleted mutex: #{msg_id}"
|
238
237
|
end
|
239
238
|
|
239
|
+
def fetch_status_delay_timeout_attempts_exceptions
|
240
|
+
@store.mget(msg_id, [:status, :delay, :timeout, :attempts, :exceptions])
|
241
|
+
end
|
242
|
+
|
240
243
|
# process this message and do not allow any exception to escape to the caller
|
241
244
|
def process(handler)
|
242
245
|
logger.debug "Beetle: processing message #{msg_id}"
|
@@ -269,30 +272,33 @@ module Beetle
|
|
269
272
|
run_handler(handler) == RC::HandlerCrash ? RC::AttemptsLimitReached : RC::OK
|
270
273
|
elsif !key_exists?
|
271
274
|
run_handler!(handler)
|
272
|
-
elsif completed?
|
273
|
-
ack!
|
274
|
-
RC::OK
|
275
|
-
elsif delayed?
|
276
|
-
logger.warn "Beetle: ignored delayed message (#{msg_id})!"
|
277
|
-
RC::Delayed
|
278
|
-
elsif !timed_out?
|
279
|
-
RC::HandlerNotYetTimedOut
|
280
|
-
elsif attempts_limit_reached?
|
281
|
-
completed!
|
282
|
-
ack!
|
283
|
-
logger.warn "Beetle: reached the handler execution attempts limit: #{attempts_limit} on #{msg_id}"
|
284
|
-
RC::AttemptsLimitReached
|
285
|
-
elsif exceptions_limit_reached?
|
286
|
-
completed!
|
287
|
-
ack!
|
288
|
-
logger.warn "Beetle: reached the handler exceptions limit: #{exceptions_limit} on #{msg_id}"
|
289
|
-
RC::ExceptionsLimitReached
|
290
275
|
else
|
291
|
-
|
292
|
-
if
|
293
|
-
|
276
|
+
status, delay, timeout, attempts, exceptions = fetch_status_delay_timeout_attempts_exceptions
|
277
|
+
if status == "completed"
|
278
|
+
ack!
|
279
|
+
RC::OK
|
280
|
+
elsif delay && delay.to_i > now
|
281
|
+
logger.warn "Beetle: ignored delayed message (#{msg_id})!"
|
282
|
+
RC::Delayed
|
283
|
+
elsif !(timeout && timeout.to_i < now)
|
284
|
+
RC::HandlerNotYetTimedOut
|
285
|
+
elsif attempts.to_i >= attempts_limit
|
286
|
+
completed!
|
287
|
+
ack!
|
288
|
+
logger.warn "Beetle: reached the handler execution attempts limit: #{attempts_limit} on #{msg_id}"
|
289
|
+
RC::AttemptsLimitReached
|
290
|
+
elsif exceptions.to_i > exceptions_limit
|
291
|
+
completed!
|
292
|
+
ack!
|
293
|
+
logger.warn "Beetle: reached the handler exceptions limit: #{exceptions_limit} on #{msg_id}"
|
294
|
+
RC::ExceptionsLimitReached
|
294
295
|
else
|
295
|
-
|
296
|
+
set_timeout!
|
297
|
+
if aquire_mutex!
|
298
|
+
run_handler!(handler)
|
299
|
+
else
|
300
|
+
RC::MutexLocked
|
301
|
+
end
|
296
302
|
end
|
297
303
|
end
|
298
304
|
end
|
data/lib/beetle/subscriber.rb
CHANGED
@@ -7,14 +7,13 @@ module Beetle
|
|
7
7
|
# create a new subscriber instance
|
8
8
|
def initialize(client, options = {}) #:nodoc:
|
9
9
|
super
|
10
|
-
@status = :idle
|
11
|
-
@request_stop = false
|
12
10
|
@servers.concat @client.additional_subscription_servers
|
13
11
|
@handlers = {}
|
14
12
|
@connections = {}
|
15
13
|
@channels = {}
|
16
14
|
@subscriptions = {}
|
17
15
|
@listened_queues = []
|
16
|
+
@channels_closed = false
|
18
17
|
end
|
19
18
|
|
20
19
|
# the client calls this method to subscribe to a list of queues.
|
@@ -48,24 +47,19 @@ module Beetle
|
|
48
47
|
end
|
49
48
|
end
|
50
49
|
|
51
|
-
# closes all AMQP connections and
|
50
|
+
# closes all AMQP connections and stop the eventmachine loop. note that the shutdown
|
51
|
+
# process is asynchronous. must not be called while a message handler is
|
52
|
+
# running. typically one would use <tt>EM.add_timer(0) { stop! }</tt> to ensure this.
|
52
53
|
def stop! #:nodoc:
|
53
|
-
if
|
54
|
-
EM.
|
55
|
-
|
56
|
-
|
57
|
-
# otherwise messages can get ACKed after the connection is closed
|
58
|
-
# resulting in the ACK not being received and hence the
|
59
|
-
# message being re-delivered
|
60
|
-
if @status == :idle
|
61
|
-
server, connection = @connections.shift
|
62
|
-
logger.debug "Beetle: closing connection to #{server}"
|
63
|
-
connection.close { stop! }
|
64
|
-
else
|
65
|
-
# else ask for stop. After processing the current message the
|
66
|
-
# stop will be re-attempted
|
67
|
-
@request_stop = true
|
54
|
+
if EM.reactor_running?
|
55
|
+
EM.add_timer(0) do
|
56
|
+
close_all_channels
|
57
|
+
close_all_connections
|
68
58
|
end
|
59
|
+
else
|
60
|
+
# try to clean up as much a possible under the circumstances, by closing all connections
|
61
|
+
# this should a least close the sockets
|
62
|
+
close_connections_with_reactor_not_running
|
69
63
|
end
|
70
64
|
end
|
71
65
|
|
@@ -78,6 +72,38 @@ module Beetle
|
|
78
72
|
|
79
73
|
private
|
80
74
|
|
75
|
+
# close all sockets.
|
76
|
+
def close_connections_with_reactor_not_running
|
77
|
+
@connections.each { |_, connection| connection.close }
|
78
|
+
ensure
|
79
|
+
@connections = {}
|
80
|
+
@channels = {}
|
81
|
+
end
|
82
|
+
|
83
|
+
# close all connections. this assumes the reactor is running
|
84
|
+
def close_all_connections
|
85
|
+
if @connections.empty?
|
86
|
+
EM.stop_event_loop
|
87
|
+
else
|
88
|
+
server, connection = @connections.shift
|
89
|
+
logger.debug "Beetle: closing connection to #{server}"
|
90
|
+
connection.close { close_all_connections }
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
# closes all channels. this needs to be the first action during a
|
95
|
+
# subscriber shutdown, so that susbscription callbacks can detect
|
96
|
+
# they should stop processing messages received from the prefetch
|
97
|
+
# queue.
|
98
|
+
def close_all_channels
|
99
|
+
return if @channels_closed
|
100
|
+
@channels.each do |server, channel|
|
101
|
+
logger.debug "Beetle: closing channel to server #{server}"
|
102
|
+
channel.close
|
103
|
+
end
|
104
|
+
@channels_closed = true
|
105
|
+
end
|
106
|
+
|
81
107
|
def exchanges_for_queues(queues)
|
82
108
|
@client.bindings.slice(*queues).map{|_, opts| opts.map{|opt| opt[:exchange]}}.flatten.uniq
|
83
109
|
end
|
@@ -136,8 +162,11 @@ module Beetle
|
|
136
162
|
def create_subscription_callback(queue_name, amqp_queue_name, handler, opts)
|
137
163
|
server = @server
|
138
164
|
lambda do |header, data|
|
165
|
+
if channel(server).closing?
|
166
|
+
logger.info "Beetle: ignoring message since channel to server #{server} already closed"
|
167
|
+
return
|
168
|
+
end
|
139
169
|
begin
|
140
|
-
@status = :busy
|
141
170
|
# logger.debug "Beetle: received message"
|
142
171
|
processor = Handler.create(handler, opts)
|
143
172
|
message_options = opts.merge(:server => server, :store => @client.deduplication_store)
|
@@ -167,10 +196,6 @@ module Beetle
|
|
167
196
|
ensure
|
168
197
|
# processing_completed swallows all exceptions, so we don't need to protect this call
|
169
198
|
processor.processing_completed
|
170
|
-
@status = :idle
|
171
|
-
if @request_stop
|
172
|
-
stop!
|
173
|
-
end
|
174
199
|
end
|
175
200
|
end
|
176
201
|
end
|
data/lib/beetle/version.rb
CHANGED
@@ -22,29 +22,32 @@ module Beetle
|
|
22
22
|
assert_equal channel, @sub.send(:channel, "donald:1")
|
23
23
|
end
|
24
24
|
|
25
|
-
test "stop! should close all amqp connections and then stop the event loop if
|
26
|
-
connection1 = mock('
|
25
|
+
test "stop! should close all amqp channels and connections and then stop the event loop if the reactor is running" do
|
26
|
+
connection1 = mock('conection1')
|
27
27
|
connection1.expects(:close).yields
|
28
|
-
connection2 = mock('
|
28
|
+
connection2 = mock('connection2')
|
29
29
|
connection2.expects(:close).yields
|
30
|
-
|
30
|
+
channel1 = mock('channel1')
|
31
|
+
channel1.expects(:close)
|
32
|
+
channel2 = mock('channel2')
|
33
|
+
channel2.expects(:close)
|
34
|
+
@sub.instance_variable_set "@connections", [["server1", connection1], ["server2", connection2]]
|
35
|
+
@sub.instance_variable_set "@channels", {"server1" => channel1, "server2" => channel2}
|
36
|
+
EM.expects(:reactor_running?).returns(true)
|
31
37
|
EM.expects(:stop_event_loop)
|
38
|
+
EM.expects(:add_timer).with(0).yields
|
32
39
|
@sub.send(:stop!)
|
33
|
-
assert !@sub.instance_variable_get("@request_stop")
|
34
40
|
end
|
35
41
|
|
36
|
-
test "stop! should
|
37
|
-
|
38
|
-
connection1
|
39
|
-
|
40
|
-
connection2
|
41
|
-
connection2
|
42
|
-
|
43
|
-
EM.expects(:stop_event_loop).never
|
42
|
+
test "stop! should close all connections if the reactor is not running" do
|
43
|
+
connection1 = mock('conection1')
|
44
|
+
connection1.expects(:close).yields
|
45
|
+
connection2 = mock('connection2')
|
46
|
+
connection2.expects(:close).yields
|
47
|
+
@sub.instance_variable_set "@connections", [["server1", connection1], ["server2", connection2]]
|
48
|
+
EM.expects(:reactor_running?).returns(false)
|
44
49
|
@sub.send(:stop!)
|
45
|
-
assert @sub.instance_variable_get("@request_stop")
|
46
50
|
end
|
47
|
-
|
48
51
|
end
|
49
52
|
|
50
53
|
class SubscriberPauseAndResumeTest < MiniTest::Unit::TestCase
|
@@ -219,13 +222,16 @@ module Beetle
|
|
219
222
|
end
|
220
223
|
|
221
224
|
|
222
|
-
class
|
225
|
+
class DeadLetteringCallBackExecutionTest < MiniTest::Unit::TestCase
|
223
226
|
def setup
|
224
227
|
@client = Client.new
|
225
228
|
@client.config.dead_lettering_enabled = true
|
226
229
|
@queue = "somequeue"
|
227
230
|
@client.register_queue(@queue)
|
228
231
|
@sub = @client.send(:subscriber)
|
232
|
+
mq = mock("MQ")
|
233
|
+
mq.expects(:closing?).returns(false)
|
234
|
+
@sub.expects(:channel).with(@sub.server).returns(mq)
|
229
235
|
@exception = Exception.new "murks"
|
230
236
|
@handler = Handler.create(lambda{|*args| raise @exception})
|
231
237
|
# handler method 'processing_completed' should be called under all circumstances
|
@@ -257,42 +263,52 @@ module Beetle
|
|
257
263
|
@sub = client.send(:subscriber)
|
258
264
|
@exception = Exception.new "murks"
|
259
265
|
@handler = Handler.create(lambda{|*args| raise @exception})
|
260
|
-
# handler method 'processing_completed' should be called under all circumstances
|
261
|
-
@handler.expects(:processing_completed).once
|
262
266
|
@callback = @sub.send(:create_subscription_callback, "my myessage", @queue, @handler, :exceptions => 1)
|
263
267
|
end
|
264
268
|
|
265
269
|
test "exceptions raised from message processing should be ignored" do
|
270
|
+
@handler.expects(:processing_completed).once
|
266
271
|
header = header_with_params({})
|
267
272
|
Message.any_instance.expects(:process).raises(Exception.new("don't worry"))
|
273
|
+
channel = mock("MQ")
|
274
|
+
channel.expects(:closing?).returns(false)
|
275
|
+
@sub.expects(:channel).with(@sub.server).returns(channel)
|
268
276
|
assert_nothing_raised { @callback.call(header, 'foo') }
|
269
277
|
end
|
270
278
|
|
271
|
-
test "should
|
279
|
+
test "callback should not process messages if the underlying channel has already been closed" do
|
280
|
+
@handler.expects(:processing_completed).never
|
272
281
|
header = header_with_params({})
|
273
|
-
Message.any_instance.expects(:process).
|
274
|
-
|
275
|
-
|
282
|
+
Message.any_instance.expects(:process).never
|
283
|
+
channel = mock("channel")
|
284
|
+
channel.expects(:closing?).returns(true)
|
285
|
+
@sub.expects(:channel).with(@sub.server).returns(channel)
|
276
286
|
assert_nothing_raised { @callback.call(header, 'foo') }
|
277
287
|
end
|
278
288
|
|
279
289
|
test "should call reject on the message header when processing the handler returns true on reject?" do
|
290
|
+
@handler.expects(:processing_completed).once
|
280
291
|
header = header_with_params({})
|
281
292
|
result = mock("result")
|
282
293
|
result.expects(:reject?).returns(true)
|
283
294
|
Message.any_instance.expects(:process).returns(result)
|
284
295
|
@sub.expects(:sleep).with(1)
|
296
|
+
mq = mock("MQ")
|
297
|
+
mq.expects(:closing?).returns(false)
|
298
|
+
@sub.expects(:channel).with(@sub.server).returns(mq)
|
285
299
|
header.expects(:reject).with(:requeue => true)
|
286
300
|
@callback.call(header, 'foo')
|
287
301
|
end
|
288
302
|
|
289
303
|
test "should sent a reply with status OK if the message reply_to header is set and processing the handler succeeds" do
|
304
|
+
@handler.expects(:processing_completed).once
|
290
305
|
header = header_with_params(:reply_to => "tmp-queue")
|
291
306
|
result = RC::OK
|
292
307
|
Message.any_instance.expects(:process).returns(result)
|
293
308
|
Message.any_instance.expects(:handler_result).returns("response-data")
|
294
309
|
mq = mock("MQ")
|
295
|
-
|
310
|
+
mq.expects(:closing?).returns(false)
|
311
|
+
@sub.expects(:channel).with(@sub.server).returns(mq).twice
|
296
312
|
exchange = mock("exchange")
|
297
313
|
exchange.expects(:publish).with("response-data", :routing_key => "tmp-queue", :headers => {:status => "OK"}, :persistent => false)
|
298
314
|
AMQP::Exchange.expects(:new).with(mq, :direct, "").returns(exchange)
|
@@ -300,12 +316,14 @@ module Beetle
|
|
300
316
|
end
|
301
317
|
|
302
318
|
test "should sent a reply with status FAILED if the message reply_to header is set and processing the handler fails" do
|
319
|
+
@handler.expects(:processing_completed).once
|
303
320
|
header = header_with_params(:reply_to => "tmp-queue")
|
304
321
|
result = RC::AttemptsLimitReached
|
305
322
|
Message.any_instance.expects(:process).returns(result)
|
306
323
|
Message.any_instance.expects(:handler_result).returns(nil)
|
307
324
|
mq = mock("MQ")
|
308
|
-
|
325
|
+
mq.expects(:closing?).returns(false)
|
326
|
+
@sub.expects(:channel).with(@sub.server).returns(mq).twice
|
309
327
|
exchange = mock("exchange")
|
310
328
|
exchange.expects(:publish).with("", :routing_key => "tmp-queue", :headers => {:status => "FAILED"}, :persistent => false)
|
311
329
|
AMQP::Exchange.expects(:new).with(mq, :direct, "").returns(exchange)
|
@@ -323,6 +341,9 @@ module Beetle
|
|
323
341
|
test "subscribe should subscribe with a subscription callback created from the registered block and remember the subscription" do
|
324
342
|
@client.register_queue(:some_queue, :exchange => "some_exchange", :key => "some_key")
|
325
343
|
server = @sub.server
|
344
|
+
channel = mock("channel")
|
345
|
+
channel.expects(:closing?).returns(false)
|
346
|
+
@sub.expects(:channel).with(server).returns(channel)
|
326
347
|
header = header_with_params({})
|
327
348
|
header.expects(:ack)
|
328
349
|
block_called = false
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: beetle
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.4.
|
4
|
+
version: 0.4.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Stefan Kaes
|
@@ -12,7 +12,7 @@ authors:
|
|
12
12
|
autorequire:
|
13
13
|
bindir: bin
|
14
14
|
cert_chain: []
|
15
|
-
date:
|
15
|
+
date: 2016-02-22 00:00:00.000000000 Z
|
16
16
|
dependencies:
|
17
17
|
- !ruby/object:Gem::Dependency
|
18
18
|
name: uuid4r
|
@@ -202,12 +202,14 @@ files:
|
|
202
202
|
- bin/beetle
|
203
203
|
- examples/README.rdoc
|
204
204
|
- examples/attempts.rb
|
205
|
+
- examples/consume_many_messages_and_shutdown_randomly.rb
|
205
206
|
- examples/handler_class.rb
|
206
207
|
- examples/handling_exceptions.rb
|
207
208
|
- examples/multiple_exchanges.rb
|
208
209
|
- examples/multiple_queues.rb
|
209
210
|
- examples/nonexistent_server.rb
|
210
211
|
- examples/pause_and_resume.rb
|
212
|
+
- examples/publish_many_messages.rb
|
211
213
|
- examples/redundant.rb
|
212
214
|
- examples/rpc.rb
|
213
215
|
- examples/simple.rb
|
@@ -284,7 +286,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
284
286
|
version: 1.3.7
|
285
287
|
requirements: []
|
286
288
|
rubyforge_project:
|
287
|
-
rubygems_version: 2.4.
|
289
|
+
rubygems_version: 2.4.8
|
288
290
|
signing_key:
|
289
291
|
specification_version: 3
|
290
292
|
summary: High Availability AMQP Messaging with Redundant Queues
|