beetle 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +5 -0
- data/MIT-LICENSE +20 -0
- data/README.rdoc +82 -0
- data/Rakefile +114 -0
- data/TODO +7 -0
- data/beetle.gemspec +127 -0
- data/etc/redis-master.conf +189 -0
- data/etc/redis-slave.conf +189 -0
- data/examples/README.rdoc +14 -0
- data/examples/attempts.rb +66 -0
- data/examples/handler_class.rb +64 -0
- data/examples/handling_exceptions.rb +73 -0
- data/examples/multiple_exchanges.rb +48 -0
- data/examples/multiple_queues.rb +43 -0
- data/examples/redis_failover.rb +65 -0
- data/examples/redundant.rb +65 -0
- data/examples/rpc.rb +45 -0
- data/examples/simple.rb +39 -0
- data/lib/beetle.rb +57 -0
- data/lib/beetle/base.rb +78 -0
- data/lib/beetle/client.rb +252 -0
- data/lib/beetle/configuration.rb +31 -0
- data/lib/beetle/deduplication_store.rb +152 -0
- data/lib/beetle/handler.rb +95 -0
- data/lib/beetle/message.rb +336 -0
- data/lib/beetle/publisher.rb +187 -0
- data/lib/beetle/r_c.rb +40 -0
- data/lib/beetle/subscriber.rb +144 -0
- data/script/start_rabbit +29 -0
- data/snafu.rb +55 -0
- data/test/beetle.yml +81 -0
- data/test/beetle/base_test.rb +52 -0
- data/test/beetle/bla.rb +0 -0
- data/test/beetle/client_test.rb +305 -0
- data/test/beetle/configuration_test.rb +5 -0
- data/test/beetle/deduplication_store_test.rb +90 -0
- data/test/beetle/handler_test.rb +105 -0
- data/test/beetle/message_test.rb +744 -0
- data/test/beetle/publisher_test.rb +407 -0
- data/test/beetle/r_c_test.rb +9 -0
- data/test/beetle/subscriber_test.rb +263 -0
- data/test/beetle_test.rb +5 -0
- data/test/test_helper.rb +20 -0
- data/tmp/master/.gitignore +2 -0
- data/tmp/slave/.gitignore +3 -0
- metadata +192 -0
@@ -0,0 +1,31 @@
|
|
1
|
+
module Beetle
|
2
|
+
class Configuration
|
3
|
+
# default logger (defaults to <tt>Logger.new(STDOUT)</tt>)
|
4
|
+
attr_accessor :logger
|
5
|
+
# number of seconds after which keys are removed form the message deduplication store (defaults to <tt>3.days</tt>)
|
6
|
+
attr_accessor :gc_threshold
|
7
|
+
# the machines where the deduplication store lives (defaults to <tt>"localhost:6379"</tt>)
|
8
|
+
attr_accessor :redis_hosts
|
9
|
+
# redis database number to use for the message deduplication store (defaults to <tt>4</tt>)
|
10
|
+
attr_accessor :redis_db
|
11
|
+
# list of amqp servers to use (defaults to <tt>"localhost:5672"</tt>)
|
12
|
+
attr_accessor :servers
|
13
|
+
# the virtual host to use on the AMQP servers
|
14
|
+
attr_accessor :vhost
|
15
|
+
# the AMQP user to use when connecting to the AMQP servers
|
16
|
+
attr_accessor :user
|
17
|
+
# the password to use when connectiong to the AMQP servers
|
18
|
+
attr_accessor :password
|
19
|
+
|
20
|
+
def initialize #:nodoc:
|
21
|
+
self.logger = Logger.new(STDOUT)
|
22
|
+
self.gc_threshold = 3.days
|
23
|
+
self.redis_hosts = "localhost:6379"
|
24
|
+
self.redis_db = 4
|
25
|
+
self.servers = "localhost:5672"
|
26
|
+
self.vhost = "/"
|
27
|
+
self.user = "guest"
|
28
|
+
self.password = "guest"
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,152 @@
|
|
1
|
+
module Beetle
|
2
|
+
# The deduplication store is used internally by Beetle::Client to store information on
|
3
|
+
# the status of message processing. This includes:
|
4
|
+
# * how often a message has already been seen by some consumer
|
5
|
+
# * whether a message has been processed successfully
|
6
|
+
# * how many attempts have been made to execute a message handler for a given message
|
7
|
+
# * how long we should wait before trying to execute the message handler after a failure
|
8
|
+
# * how many exceptions have been raised during previous execution attempts
|
9
|
+
# * how long we should wait before trying to perform the next execution attempt
|
10
|
+
# * whether some other process is already trying to execute the message handler
|
11
|
+
#
|
12
|
+
# It also provides a method to garbage collect keys for expired messages.
|
13
|
+
class DeduplicationStore
|
14
|
+
# creates a new deduplication store
|
15
|
+
def initialize(hosts = "localhost:6379", db = 4)
|
16
|
+
@hosts = hosts
|
17
|
+
@db = db
|
18
|
+
end
|
19
|
+
|
20
|
+
# get the Redis instance
|
21
|
+
def redis
|
22
|
+
@redis ||= find_redis_master
|
23
|
+
end
|
24
|
+
|
25
|
+
# list of key suffixes to use for storing values in Redis.
|
26
|
+
KEY_SUFFIXES = [:status, :ack_count, :timeout, :delay, :attempts, :exceptions, :mutex, :expires]
|
27
|
+
|
28
|
+
# build a Redis key out of a message id and a given suffix
|
29
|
+
def key(msg_id, suffix)
|
30
|
+
"#{msg_id}:#{suffix}"
|
31
|
+
end
|
32
|
+
|
33
|
+
# list of keys which potentially exist in Redis for the given message id
|
34
|
+
def keys(msg_id)
|
35
|
+
KEY_SUFFIXES.map{|suffix| key(msg_id, suffix)}
|
36
|
+
end
|
37
|
+
|
38
|
+
# extract message id from a given Redis key
|
39
|
+
def msg_id(key)
|
40
|
+
key =~ /^(msgid:[^:]*:[-0-9a-f]*):.*$/ && $1
|
41
|
+
end
|
42
|
+
|
43
|
+
# garbage collect keys in Redis (always assume the worst!)
|
44
|
+
def garbage_collect_keys(now = Time.now.to_i)
|
45
|
+
keys = redis.keys("msgid:*:expires")
|
46
|
+
threshold = now + Beetle.config.gc_threshold
|
47
|
+
keys.each do |key|
|
48
|
+
expires_at = redis.get key
|
49
|
+
if expires_at && expires_at.to_i < threshold
|
50
|
+
msg_id = msg_id(key)
|
51
|
+
redis.del(keys(msg_id))
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# unconditionally store a key <tt>value></tt> with given <tt>suffix</tt> for given <tt>msg_id</tt>.
|
57
|
+
def set(msg_id, suffix, value)
|
58
|
+
with_failover { redis.set(key(msg_id, suffix), value) }
|
59
|
+
end
|
60
|
+
|
61
|
+
# store a key <tt>value></tt> with given <tt>suffix</tt> for given <tt>msg_id</tt> if it doesn't exists yet.
|
62
|
+
def setnx(msg_id, suffix, value)
|
63
|
+
with_failover { redis.setnx(key(msg_id, suffix), value) }
|
64
|
+
end
|
65
|
+
|
66
|
+
# store some key/value pairs if none of the given keys exist.
|
67
|
+
def msetnx(msg_id, values)
|
68
|
+
values = values.inject({}){|h,(k,v)| h[key(msg_id, k)] = v; h}
|
69
|
+
with_failover { redis.msetnx(values) }
|
70
|
+
end
|
71
|
+
|
72
|
+
# increment counter for key with given <tt>suffix</tt> for given <tt>msg_id</tt>. returns an integer.
|
73
|
+
def incr(msg_id, suffix)
|
74
|
+
with_failover { redis.incr(key(msg_id, suffix)) }
|
75
|
+
end
|
76
|
+
|
77
|
+
# retrieve the value with given <tt>suffix</tt> for given <tt>msg_id</tt>. returns a string.
|
78
|
+
def get(msg_id, suffix)
|
79
|
+
with_failover { redis.get(key(msg_id, suffix)) }
|
80
|
+
end
|
81
|
+
|
82
|
+
# delete key with given <tt>suffix</tt> for given <tt>msg_id</tt>.
|
83
|
+
def del(msg_id, suffix)
|
84
|
+
with_failover { redis.del(key(msg_id, suffix)) }
|
85
|
+
end
|
86
|
+
|
87
|
+
# delete all keys associated with the given <tt>msg_id</tt>.
|
88
|
+
def del_keys(msg_id)
|
89
|
+
with_failover { redis.del(keys(msg_id)) }
|
90
|
+
end
|
91
|
+
|
92
|
+
# check whether key with given suffix exists for a given <tt>msg_id</tt>.
|
93
|
+
def exists(msg_id, suffix)
|
94
|
+
with_failover { redis.exists(key(msg_id, suffix)) }
|
95
|
+
end
|
96
|
+
|
97
|
+
# flush the configured redis database. useful for testing.
|
98
|
+
def flushdb
|
99
|
+
with_failover { redis.flushdb }
|
100
|
+
end
|
101
|
+
|
102
|
+
# performs redis operations by yielding a passed in block, waiting for a new master to
|
103
|
+
# show up on the network if the operation throws an exception. if a new master doesn't
|
104
|
+
# appear after 120 seconds, we raise an exception.
|
105
|
+
def with_failover #:nodoc:
|
106
|
+
tries = 0
|
107
|
+
begin
|
108
|
+
yield
|
109
|
+
rescue Exception => e
|
110
|
+
Beetle::reraise_expectation_errors!
|
111
|
+
logger.error "Beetle: redis connection error '#{e}'"
|
112
|
+
if (tries+=1) < 120
|
113
|
+
@redis = nil
|
114
|
+
sleep 1
|
115
|
+
logger.info "Beetle: retrying redis operation"
|
116
|
+
retry
|
117
|
+
else
|
118
|
+
raise NoRedisMaster.new(e.to_s)
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
# find the master redis instance
|
124
|
+
def find_redis_master
|
125
|
+
masters = []
|
126
|
+
redis_instances.each do |redis|
|
127
|
+
begin
|
128
|
+
masters << redis if redis.info[:role] == "master"
|
129
|
+
rescue Exception => e
|
130
|
+
logger.error "Beetle: could not determine status of redis instance #{redis.server}"
|
131
|
+
end
|
132
|
+
end
|
133
|
+
raise NoRedisMaster.new("unable to determine a new master redis instance") if masters.empty?
|
134
|
+
raise TwoRedisMasters.new("more than one redis master instances") if masters.size > 1
|
135
|
+
logger.info "Beetle: configured new redis master #{masters.first.server}"
|
136
|
+
masters.first
|
137
|
+
end
|
138
|
+
|
139
|
+
# returns the list of redis instances
|
140
|
+
def redis_instances
|
141
|
+
@redis_instances ||= @hosts.split(/ *, */).map{|s| s.split(':')}.map do |host, port|
|
142
|
+
Redis.new(:host => host, :port => port, :db => @db)
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
# returns the configured logger
|
147
|
+
def logger
|
148
|
+
Beetle.config.logger
|
149
|
+
end
|
150
|
+
|
151
|
+
end
|
152
|
+
end
|
@@ -0,0 +1,95 @@
|
|
1
|
+
module Beetle
|
2
|
+
# Instances of class Handler are created by the message processing logic in class
|
3
|
+
# Message. There should be no need to ever create them in client code, except for
|
4
|
+
# testing purposes.
|
5
|
+
#
|
6
|
+
# Most applications will define Handler subclasses and override the process, error and
|
7
|
+
# failure methods.
|
8
|
+
class Handler
|
9
|
+
# the Message instance which caused the handler to be created
|
10
|
+
attr_reader :message
|
11
|
+
|
12
|
+
def self.create(block_or_handler, opts={}) #:nodoc:
|
13
|
+
if block_or_handler.is_a? Handler
|
14
|
+
block_or_handler
|
15
|
+
elsif block_or_handler.is_a?(Class) && block_or_handler.ancestors.include?(Handler)
|
16
|
+
block_or_handler.new
|
17
|
+
else
|
18
|
+
new(block_or_handler, opts)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
# optionally capture processor, error and failure callbacks
|
23
|
+
def initialize(processor=nil, opts={}) #:notnew:
|
24
|
+
@processor = processor
|
25
|
+
@error_callback = opts[:errback]
|
26
|
+
@failure_callback = opts[:failback]
|
27
|
+
end
|
28
|
+
|
29
|
+
# called when a message should be processed. if the message was caused by an RPC, the
|
30
|
+
# return value will be sent back to the caller. calls the initialized processor proc
|
31
|
+
# if a processor proc was specified when creating the Handler instance. calls method
|
32
|
+
# process if no proc was given. make sure to call super if you override this method in
|
33
|
+
# a subclass.
|
34
|
+
def call(message)
|
35
|
+
@message = message
|
36
|
+
if @processor
|
37
|
+
@processor.call(message)
|
38
|
+
else
|
39
|
+
process
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# called for message processing if no processor was specfied when the handler instance
|
44
|
+
# was created
|
45
|
+
def process
|
46
|
+
logger.info "Beetle: received message #{message.inspect}"
|
47
|
+
end
|
48
|
+
|
49
|
+
# should not be overriden in subclasses
|
50
|
+
def process_exception(exception) #:nodoc:
|
51
|
+
if @error_callback
|
52
|
+
@error_callback.call(message, exception)
|
53
|
+
else
|
54
|
+
error(exception)
|
55
|
+
end
|
56
|
+
rescue Exception
|
57
|
+
Beetle::reraise_expectation_errors!
|
58
|
+
end
|
59
|
+
|
60
|
+
# should not be overriden in subclasses
|
61
|
+
def process_failure(result) #:nodoc:
|
62
|
+
if @failure_callback
|
63
|
+
@failure_callback.call(message, result)
|
64
|
+
else
|
65
|
+
failure(result)
|
66
|
+
end
|
67
|
+
rescue Exception
|
68
|
+
Beetle::reraise_expectation_errors!
|
69
|
+
end
|
70
|
+
|
71
|
+
# called when handler execution raised an exception and no error callback was
|
72
|
+
# specified when the handler instance was created
|
73
|
+
def error(exception)
|
74
|
+
logger.error "Beetle: handler execution raised an exception: #{exception}"
|
75
|
+
end
|
76
|
+
|
77
|
+
# called when message processing has finally failed (i.e., the number of allowed
|
78
|
+
# handler execution attempts or the number of allowed exceptions has been reached) and
|
79
|
+
# no failure callback was specified when this handler instance was created.
|
80
|
+
def failure(result)
|
81
|
+
logger.error "Beetle: handler has finally failed"
|
82
|
+
end
|
83
|
+
|
84
|
+
# returns the configured Beetle logger
|
85
|
+
def logger
|
86
|
+
Beetle.config.logger
|
87
|
+
end
|
88
|
+
|
89
|
+
# returns the configured Beetle logger
|
90
|
+
def self.logger
|
91
|
+
Beetle.config.logger
|
92
|
+
end
|
93
|
+
|
94
|
+
end
|
95
|
+
end
|
@@ -0,0 +1,336 @@
|
|
1
|
+
require "timeout"
|
2
|
+
|
3
|
+
module Beetle
|
4
|
+
# Instances of class Message are created when a scubscription callback fires. Class
|
5
|
+
# Message contains the code responsible for message deduplication and determining if it
|
6
|
+
# should retry executing the message handler after a handler has crashed (or forcefully
|
7
|
+
# aborted).
|
8
|
+
class Message
|
9
|
+
# current message format version
|
10
|
+
FORMAT_VERSION = 1
|
11
|
+
# flag for encoding redundant messages
|
12
|
+
FLAG_REDUNDANT = 1
|
13
|
+
# default lifetime of messages
|
14
|
+
DEFAULT_TTL = 1.day
|
15
|
+
# forcefully abort a running handler after this many seconds.
|
16
|
+
# can be overriden when registering a handler.
|
17
|
+
DEFAULT_HANDLER_TIMEOUT = 300.seconds
|
18
|
+
# how many times we should try to run a handler before giving up
|
19
|
+
DEFAULT_HANDLER_EXECUTION_ATTEMPTS = 1
|
20
|
+
# how many seconds we should wait before retrying handler execution
|
21
|
+
DEFAULT_HANDLER_EXECUTION_ATTEMPTS_DELAY = 10.seconds
|
22
|
+
# how many exceptions should be tolerated before giving up
|
23
|
+
DEFAULT_EXCEPTION_LIMIT = 0
|
24
|
+
|
25
|
+
# server from which the message was received
|
26
|
+
attr_reader :server
|
27
|
+
# name of the queue on which the message was received
|
28
|
+
attr_reader :queue
|
29
|
+
# the AMQP header received with the message
|
30
|
+
attr_reader :header
|
31
|
+
# the uuid of the message
|
32
|
+
attr_reader :uuid
|
33
|
+
# message payload
|
34
|
+
attr_reader :data
|
35
|
+
# the message format version of the message
|
36
|
+
attr_reader :format_version
|
37
|
+
# flags sent with the message
|
38
|
+
attr_reader :flags
|
39
|
+
# unix timestamp after which the message should be considered stale
|
40
|
+
attr_reader :expires_at
|
41
|
+
# how many seconds the handler is allowed to execute
|
42
|
+
attr_reader :timeout
|
43
|
+
# how long to wait before retrying the message handler
|
44
|
+
attr_reader :delay
|
45
|
+
# how many times we should try to run the handler
|
46
|
+
attr_reader :attempts_limit
|
47
|
+
# how many exceptions we should tolerate before giving up
|
48
|
+
attr_reader :exceptions_limit
|
49
|
+
# exception raised by handler execution
|
50
|
+
attr_reader :exception
|
51
|
+
# value returned by handler execution
|
52
|
+
attr_reader :handler_result
|
53
|
+
|
54
|
+
def initialize(queue, header, body, opts = {})
|
55
|
+
@queue = queue
|
56
|
+
@header = header
|
57
|
+
@data = body
|
58
|
+
setup(opts)
|
59
|
+
decode
|
60
|
+
end
|
61
|
+
|
62
|
+
def setup(opts) #:nodoc:
|
63
|
+
@server = opts[:server]
|
64
|
+
@timeout = opts[:timeout] || DEFAULT_HANDLER_TIMEOUT
|
65
|
+
@delay = opts[:delay] || DEFAULT_HANDLER_EXECUTION_ATTEMPTS_DELAY
|
66
|
+
@attempts_limit = opts[:attempts] || DEFAULT_HANDLER_EXECUTION_ATTEMPTS
|
67
|
+
@exceptions_limit = opts[:exceptions] || DEFAULT_EXCEPTION_LIMIT
|
68
|
+
@attempts_limit = @exceptions_limit + 1 if @attempts_limit <= @exceptions_limit
|
69
|
+
@store = opts[:store]
|
70
|
+
end
|
71
|
+
|
72
|
+
# extracts various values form the AMQP header properties
|
73
|
+
def decode #:nodoc:
|
74
|
+
amqp_headers = header.properties
|
75
|
+
@uuid = amqp_headers[:message_id]
|
76
|
+
headers = amqp_headers[:headers]
|
77
|
+
@format_version = headers[:format_version].to_i
|
78
|
+
@flags = headers[:flags].to_i
|
79
|
+
@expires_at = headers[:expires_at].to_i
|
80
|
+
end
|
81
|
+
|
82
|
+
# build hash with options for the publisher
|
83
|
+
def self.publishing_options(opts = {}) #:nodoc:
|
84
|
+
flags = 0
|
85
|
+
flags |= FLAG_REDUNDANT if opts[:redundant]
|
86
|
+
expires_at = now + (opts[:ttl] || DEFAULT_TTL)
|
87
|
+
opts = opts.slice(*PUBLISHING_KEYS)
|
88
|
+
opts[:message_id] = generate_uuid.to_s
|
89
|
+
opts[:headers] = {
|
90
|
+
:format_version => FORMAT_VERSION.to_s,
|
91
|
+
:flags => flags.to_s,
|
92
|
+
:expires_at => expires_at.to_s
|
93
|
+
}
|
94
|
+
opts
|
95
|
+
end
|
96
|
+
|
97
|
+
# unique message id. used to form various keys in the deduplication store.
|
98
|
+
def msg_id
|
99
|
+
@msg_id ||= "msgid:#{queue}:#{uuid}"
|
100
|
+
end
|
101
|
+
|
102
|
+
# current time (UNIX timestamp)
|
103
|
+
def now #:nodoc:
|
104
|
+
Time.now.to_i
|
105
|
+
end
|
106
|
+
|
107
|
+
# current time (UNIX timestamp)
|
108
|
+
def self.now #:nodoc:
|
109
|
+
Time.now.to_i
|
110
|
+
end
|
111
|
+
|
112
|
+
# a message has expired if the header expiration timestamp is msaller than the current time
|
113
|
+
def expired?
|
114
|
+
@expires_at < now
|
115
|
+
end
|
116
|
+
|
117
|
+
# generate uuid for publishing
|
118
|
+
def self.generate_uuid
|
119
|
+
UUID4R::uuid(1)
|
120
|
+
end
|
121
|
+
|
122
|
+
# whether the publisher has tried sending this message to two servers
|
123
|
+
def redundant?
|
124
|
+
@flags & FLAG_REDUNDANT == FLAG_REDUNDANT
|
125
|
+
end
|
126
|
+
|
127
|
+
# whether this is a message we can process without accessing the deduplication store
|
128
|
+
def simple?
|
129
|
+
!redundant? && attempts_limit == 1
|
130
|
+
end
|
131
|
+
|
132
|
+
# store handler timeout timestamp in the deduplication store
|
133
|
+
def set_timeout!
|
134
|
+
@store.set(msg_id, :timeout, now + timeout)
|
135
|
+
end
|
136
|
+
|
137
|
+
# handler timed out?
|
138
|
+
def timed_out?
|
139
|
+
(t = @store.get(msg_id, :timeout)) && t.to_i < now
|
140
|
+
end
|
141
|
+
|
142
|
+
# reset handler timeout in the deduplication store
|
143
|
+
def timed_out!
|
144
|
+
@store.set(msg_id, :timeout, 0)
|
145
|
+
end
|
146
|
+
|
147
|
+
# message handling completed?
|
148
|
+
def completed?
|
149
|
+
@store.get(msg_id, :status) == "completed"
|
150
|
+
end
|
151
|
+
|
152
|
+
# mark message handling complete in the deduplication store
|
153
|
+
def completed!
|
154
|
+
@store.set(msg_id, :status, "completed")
|
155
|
+
timed_out!
|
156
|
+
end
|
157
|
+
|
158
|
+
# whether we should wait before running the handler
|
159
|
+
def delayed?
|
160
|
+
(t = @store.get(msg_id, :delay)) && t.to_i > now
|
161
|
+
end
|
162
|
+
|
163
|
+
# store delay value in the deduplication store
|
164
|
+
def set_delay!
|
165
|
+
@store.set(msg_id, :delay, now + delay)
|
166
|
+
end
|
167
|
+
|
168
|
+
# how many times we already tried running the handler
|
169
|
+
def attempts
|
170
|
+
@store.get(msg_id, :attempts).to_i
|
171
|
+
end
|
172
|
+
|
173
|
+
# record the fact that we are trying to run the handler
|
174
|
+
def increment_execution_attempts!
|
175
|
+
@store.incr(msg_id, :attempts)
|
176
|
+
end
|
177
|
+
|
178
|
+
# whether we have already tried running the handler as often as specified when the handler was registered
|
179
|
+
def attempts_limit_reached?
|
180
|
+
(limit = @store.get(msg_id, :attempts)) && limit.to_i >= attempts_limit
|
181
|
+
end
|
182
|
+
|
183
|
+
# increment number of exception occurences in the deduplication store
|
184
|
+
def increment_exception_count!
|
185
|
+
@store.incr(msg_id, :exceptions)
|
186
|
+
end
|
187
|
+
|
188
|
+
# whether the number of exceptions has exceeded the limit set when the handler was registered
|
189
|
+
def exceptions_limit_reached?
|
190
|
+
@store.get(msg_id, :exceptions).to_i > exceptions_limit
|
191
|
+
end
|
192
|
+
|
193
|
+
# have we already seen this message? if not, set the status to "incomplete" and store
|
194
|
+
# the message exipration timestamp in the deduplication store.
|
195
|
+
def key_exists?
|
196
|
+
old_message = 0 == @store.msetnx(msg_id, :status =>"incomplete", :expires => @expires_at)
|
197
|
+
if old_message
|
198
|
+
logger.debug "Beetle: received duplicate message: #{msg_id} on queue: #{@queue}"
|
199
|
+
end
|
200
|
+
old_message
|
201
|
+
end
|
202
|
+
|
203
|
+
# aquire execution mutex before we run the handler (and delete it if we can't aquire it).
|
204
|
+
def aquire_mutex!
|
205
|
+
if mutex = @store.setnx(msg_id, :mutex, now)
|
206
|
+
logger.debug "Beetle: aquired mutex: #{msg_id}"
|
207
|
+
else
|
208
|
+
delete_mutex!
|
209
|
+
end
|
210
|
+
mutex
|
211
|
+
end
|
212
|
+
|
213
|
+
# delete execution mutex
|
214
|
+
def delete_mutex!
|
215
|
+
@store.del(msg_id, :mutex)
|
216
|
+
logger.debug "Beetle: deleted mutex: #{msg_id}"
|
217
|
+
end
|
218
|
+
|
219
|
+
# process this message and do not allow any exception to escape to the caller
|
220
|
+
def process(handler)
|
221
|
+
logger.debug "Beetle: processing message #{msg_id}"
|
222
|
+
result = nil
|
223
|
+
begin
|
224
|
+
result = process_internal(handler)
|
225
|
+
handler.process_exception(@exception) if @exception
|
226
|
+
handler.process_failure(result) if result.failure?
|
227
|
+
rescue Exception => e
|
228
|
+
Beetle::reraise_expectation_errors!
|
229
|
+
logger.warn "Beetle: exception '#{e}' during processing of message #{msg_id}"
|
230
|
+
logger.warn "Beetle: backtrace: #{e.backtrace.join("\n")}"
|
231
|
+
result = RC::InternalError
|
232
|
+
end
|
233
|
+
result
|
234
|
+
end
|
235
|
+
|
236
|
+
private
|
237
|
+
|
238
|
+
def process_internal(handler)
|
239
|
+
if expired?
|
240
|
+
logger.warn "Beetle: ignored expired message (#{msg_id})!"
|
241
|
+
ack!
|
242
|
+
RC::Ancient
|
243
|
+
elsif simple?
|
244
|
+
ack!
|
245
|
+
run_handler(handler) == RC::HandlerCrash ? RC::AttemptsLimitReached : RC::OK
|
246
|
+
elsif !key_exists?
|
247
|
+
set_timeout!
|
248
|
+
run_handler!(handler)
|
249
|
+
elsif completed?
|
250
|
+
ack!
|
251
|
+
RC::OK
|
252
|
+
elsif delayed?
|
253
|
+
logger.warn "Beetle: ignored delayed message (#{msg_id})!"
|
254
|
+
RC::Delayed
|
255
|
+
elsif !timed_out?
|
256
|
+
RC::HandlerNotYetTimedOut
|
257
|
+
elsif attempts_limit_reached?
|
258
|
+
ack!
|
259
|
+
logger.warn "Beetle: reached the handler execution attempts limit: #{attempts_limit} on #{msg_id}"
|
260
|
+
RC::AttemptsLimitReached
|
261
|
+
elsif exceptions_limit_reached?
|
262
|
+
ack!
|
263
|
+
logger.warn "Beetle: reached the handler exceptions limit: #{exceptions_limit} on #{msg_id}"
|
264
|
+
RC::ExceptionsLimitReached
|
265
|
+
else
|
266
|
+
set_timeout!
|
267
|
+
if aquire_mutex!
|
268
|
+
run_handler!(handler)
|
269
|
+
else
|
270
|
+
RC::MutexLocked
|
271
|
+
end
|
272
|
+
end
|
273
|
+
end
|
274
|
+
|
275
|
+
def run_handler(handler)
|
276
|
+
Timeout::timeout(@timeout) { @handler_result = handler.call(self) }
|
277
|
+
RC::OK
|
278
|
+
rescue Exception => @exception
|
279
|
+
Beetle::reraise_expectation_errors!
|
280
|
+
logger.debug "Beetle: message handler crashed on #{msg_id}"
|
281
|
+
RC::HandlerCrash
|
282
|
+
ensure
|
283
|
+
ActiveRecord::Base.clear_active_connections! if defined?(ActiveRecord)
|
284
|
+
end
|
285
|
+
|
286
|
+
def run_handler!(handler)
|
287
|
+
increment_execution_attempts!
|
288
|
+
case result = run_handler(handler)
|
289
|
+
when RC::OK
|
290
|
+
completed!
|
291
|
+
ack!
|
292
|
+
result
|
293
|
+
else
|
294
|
+
handler_failed!(result)
|
295
|
+
end
|
296
|
+
end
|
297
|
+
|
298
|
+
def handler_failed!(result)
|
299
|
+
increment_exception_count!
|
300
|
+
if attempts_limit_reached?
|
301
|
+
ack!
|
302
|
+
logger.debug "Beetle: reached the handler execution attempts limit: #{attempts_limit} on #{msg_id}"
|
303
|
+
RC::AttemptsLimitReached
|
304
|
+
elsif exceptions_limit_reached?
|
305
|
+
ack!
|
306
|
+
logger.debug "Beetle: reached the handler exceptions limit: #{exceptions_limit} on #{msg_id}"
|
307
|
+
RC::ExceptionsLimitReached
|
308
|
+
else
|
309
|
+
delete_mutex!
|
310
|
+
timed_out!
|
311
|
+
set_delay!
|
312
|
+
result
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
316
|
+
def logger
|
317
|
+
@logger ||= self.class.logger
|
318
|
+
end
|
319
|
+
|
320
|
+
def self.logger
|
321
|
+
Beetle.config.logger
|
322
|
+
end
|
323
|
+
|
324
|
+
# ack the message for rabbit. deletes all keys associated with this message in the
|
325
|
+
# deduplication store if we are sure this is the last message with the given msg_id.
|
326
|
+
def ack!
|
327
|
+
#:doc:
|
328
|
+
logger.debug "Beetle: ack! for message #{msg_id}"
|
329
|
+
header.ack
|
330
|
+
return if simple? # simple messages don't use the deduplication store
|
331
|
+
if !redundant? || @store.incr(msg_id, :ack_count) == 2
|
332
|
+
@store.del_keys(msg_id)
|
333
|
+
end
|
334
|
+
end
|
335
|
+
end
|
336
|
+
end
|