beetle 0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. data/.gitignore +5 -0
  2. data/MIT-LICENSE +20 -0
  3. data/README.rdoc +82 -0
  4. data/Rakefile +114 -0
  5. data/TODO +7 -0
  6. data/beetle.gemspec +127 -0
  7. data/etc/redis-master.conf +189 -0
  8. data/etc/redis-slave.conf +189 -0
  9. data/examples/README.rdoc +14 -0
  10. data/examples/attempts.rb +66 -0
  11. data/examples/handler_class.rb +64 -0
  12. data/examples/handling_exceptions.rb +73 -0
  13. data/examples/multiple_exchanges.rb +48 -0
  14. data/examples/multiple_queues.rb +43 -0
  15. data/examples/redis_failover.rb +65 -0
  16. data/examples/redundant.rb +65 -0
  17. data/examples/rpc.rb +45 -0
  18. data/examples/simple.rb +39 -0
  19. data/lib/beetle.rb +57 -0
  20. data/lib/beetle/base.rb +78 -0
  21. data/lib/beetle/client.rb +252 -0
  22. data/lib/beetle/configuration.rb +31 -0
  23. data/lib/beetle/deduplication_store.rb +152 -0
  24. data/lib/beetle/handler.rb +95 -0
  25. data/lib/beetle/message.rb +336 -0
  26. data/lib/beetle/publisher.rb +187 -0
  27. data/lib/beetle/r_c.rb +40 -0
  28. data/lib/beetle/subscriber.rb +144 -0
  29. data/script/start_rabbit +29 -0
  30. data/snafu.rb +55 -0
  31. data/test/beetle.yml +81 -0
  32. data/test/beetle/base_test.rb +52 -0
  33. data/test/beetle/bla.rb +0 -0
  34. data/test/beetle/client_test.rb +305 -0
  35. data/test/beetle/configuration_test.rb +5 -0
  36. data/test/beetle/deduplication_store_test.rb +90 -0
  37. data/test/beetle/handler_test.rb +105 -0
  38. data/test/beetle/message_test.rb +744 -0
  39. data/test/beetle/publisher_test.rb +407 -0
  40. data/test/beetle/r_c_test.rb +9 -0
  41. data/test/beetle/subscriber_test.rb +263 -0
  42. data/test/beetle_test.rb +5 -0
  43. data/test/test_helper.rb +20 -0
  44. data/tmp/master/.gitignore +2 -0
  45. data/tmp/slave/.gitignore +3 -0
  46. metadata +192 -0
@@ -0,0 +1,187 @@
1
+ module Beetle
2
+ # Provides the publishing logic implementation.
3
+ class Publisher < Base
4
+
5
+ def initialize(client, options = {}) #:nodoc:
6
+ super
7
+ @exchanges_with_bound_queues = {}
8
+ @dead_servers = {}
9
+ @bunnies = {}
10
+ end
11
+
12
+ def publish(message_name, data, opts={}) #:nodoc:
13
+ opts = @client.messages[message_name].merge(opts.symbolize_keys)
14
+ exchange_name = opts.delete(:exchange)
15
+ opts.delete(:queue)
16
+ recycle_dead_servers unless @dead_servers.empty?
17
+ if opts[:redundant]
18
+ publish_with_redundancy(exchange_name, message_name, data, opts)
19
+ else
20
+ publish_with_failover(exchange_name, message_name, data, opts)
21
+ end
22
+ end
23
+
24
+ def publish_with_failover(exchange_name, message_name, data, opts) #:nodoc:
25
+ tries = @servers.size
26
+ logger.debug "Beetle: sending #{message_name}"
27
+ published = 0
28
+ opts = Message.publishing_options(opts)
29
+ begin
30
+ select_next_server
31
+ bind_queues_for_exchange(exchange_name)
32
+ logger.debug "Beetle: trying to send message #{message_name}:#{opts[:message_id]} to #{@server}"
33
+ exchange(exchange_name).publish(data, opts)
34
+ logger.debug "Beetle: message sent!"
35
+ published = 1
36
+ rescue Bunny::ServerDownError, Bunny::ConnectionError
37
+ stop!
38
+ mark_server_dead
39
+ tries -= 1
40
+ retry if tries > 0
41
+ logger.error "Beetle: message could not be delivered: #{message_name}"
42
+ end
43
+ published
44
+ end
45
+
46
+ def publish_with_redundancy(exchange_name, message_name, data, opts) #:nodoc:
47
+ if @servers.size < 2
48
+ logger.error "Beetle: at least two active servers are required for redundant publishing"
49
+ return publish_with_failover(exchange_name, message_name, data, opts)
50
+ end
51
+ published = []
52
+ opts = Message.publishing_options(opts)
53
+ loop do
54
+ break if published.size == 2 || @servers.empty? || published == @servers
55
+ begin
56
+ select_next_server
57
+ next if published.include? @server
58
+ bind_queues_for_exchange(exchange_name)
59
+ logger.debug "Beetle: trying to send #{message_name}:#{opts[:message_id]} to #{@server}"
60
+ exchange(exchange_name).publish(data, opts)
61
+ published << @server
62
+ logger.debug "Beetle: message sent (#{published})!"
63
+ rescue Bunny::ServerDownError, Bunny::ConnectionError
64
+ stop!
65
+ mark_server_dead
66
+ end
67
+ end
68
+ case published.size
69
+ when 0
70
+ logger.error "Beetle: message could not be delivered: #{message_name}"
71
+ when 1
72
+ logger.warn "Beetle: failed to send message redundantly"
73
+ end
74
+ published.size
75
+ end
76
+
77
+ RPC_DEFAULT_TIMEOUT = 10 #:nodoc:
78
+
79
+ def rpc(message_name, data, opts={}) #:nodoc:
80
+ opts = @client.messages[message_name].merge(opts.symbolize_keys)
81
+ exchange_name = opts.delete(:exchange)
82
+ opts.delete(:queue)
83
+ recycle_dead_servers unless @dead_servers.empty?
84
+ tries = @servers.size
85
+ logger.debug "Beetle: performing rpc with message #{message_name}"
86
+ result = nil
87
+ status = "TIMEOUT"
88
+ begin
89
+ select_next_server
90
+ bind_queues_for_exchange(exchange_name)
91
+ # create non durable, autodeleted temporary queue with a server assigned name
92
+ queue = bunny.queue
93
+ opts = Message.publishing_options(opts.merge :reply_to => queue.name)
94
+ logger.debug "Beetle: trying to send #{message_name}:#{opts[:message_id]} to #{@server}"
95
+ exchange(exchange_name).publish(data, opts)
96
+ logger.debug "Beetle: message sent!"
97
+ logger.debug "Beetle: listening on reply queue #{queue.name}"
98
+ queue.subscribe(:message_max => 1, :timeout => opts[:timeout] || RPC_DEFAULT_TIMEOUT) do |msg|
99
+ logger.debug "Beetle: received reply!"
100
+ result = msg[:payload]
101
+ status = msg[:header].properties[:headers][:status]
102
+ end
103
+ logger.debug "Beetle: rpc complete!"
104
+ rescue Bunny::ServerDownError, Bunny::ConnectionError
105
+ stop!
106
+ mark_server_dead
107
+ tries -= 1
108
+ retry if tries > 0
109
+ logger.error "Beetle: message could not be delivered: #{message_name}"
110
+ end
111
+ [status, result]
112
+ end
113
+
114
+ def purge(queue_name) #:nodoc:
115
+ each_server { queue(queue_name).purge rescue nil }
116
+ end
117
+
118
+ def stop #:nodoc:
119
+ each_server { stop! }
120
+ end
121
+
122
+ private
123
+
124
+ def bunny
125
+ @bunnies[@server] ||= new_bunny
126
+ end
127
+
128
+ def new_bunny
129
+ b = Bunny.new(:host => current_host, :port => current_port, :logging => !!@options[:logging],
130
+ :user => Beetle.config.user, :pass => Beetle.config.password, :vhost => Beetle.config.vhost)
131
+ b.start
132
+ b
133
+ end
134
+
135
+ def recycle_dead_servers
136
+ recycle = []
137
+ @dead_servers.each do |s, dead_since|
138
+ recycle << s if dead_since < 10.seconds.ago
139
+ end
140
+ @servers.concat recycle
141
+ recycle.each {|s| @dead_servers.delete(s)}
142
+ end
143
+
144
+ def mark_server_dead
145
+ logger.info "Beetle: server #{@server} down: #{$!}"
146
+ @dead_servers[@server] = Time.now
147
+ @servers.delete @server
148
+ @server = @servers[rand @servers.size]
149
+ end
150
+
151
+ def select_next_server
152
+ return logger.error("Beetle: message could not be delivered - no server available") && 0 if @servers.empty?
153
+ set_current_server(@servers[((@servers.index(@server) || 0)+1) % @servers.size])
154
+ end
155
+
156
+ def create_exchange!(name, opts)
157
+ bunny.exchange(name, opts)
158
+ end
159
+
160
+ def bind_queues_for_exchange(exchange_name)
161
+ return if @exchanges_with_bound_queues.include?(exchange_name)
162
+ @client.exchanges[exchange_name][:queues].each {|q| queue(q) }
163
+ @exchanges_with_bound_queues[exchange_name] = true
164
+ end
165
+
166
+ # TODO: Refactor, fethch the keys and stuff itself
167
+ def bind_queue!(queue_name, creation_keys, exchange_name, binding_keys)
168
+ logger.debug("Creating queue with opts: #{creation_keys.inspect}")
169
+ queue = bunny.queue(queue_name, creation_keys)
170
+ logger.debug("Binding queue #{queue_name} to #{exchange_name} with opts: #{binding_keys.inspect}")
171
+ queue.bind(exchange(exchange_name), binding_keys)
172
+ queue
173
+ end
174
+
175
+ def stop!
176
+ begin
177
+ bunny.stop
178
+ rescue Exception
179
+ Beetle::reraise_expectation_errors!
180
+ ensure
181
+ @bunnies[@server] = nil
182
+ @exchanges[@server] = {}
183
+ @queues[@server] = {}
184
+ end
185
+ end
186
+ end
187
+ end
@@ -0,0 +1,40 @@
1
+ module Beetle
2
+ module RC #:nodoc:all
3
+
4
+ # message processing result return codes
5
+ class ReturnCode
6
+ def initialize(*args)
7
+ @recover = args.delete :recover
8
+ @failure = args.delete :failure
9
+ @name = args.first
10
+ end
11
+
12
+ def inspect
13
+ @name.blank? ? super : "Beetle::RC::#{@name}"
14
+ end
15
+
16
+ def recover?
17
+ @recover
18
+ end
19
+
20
+ def failure?
21
+ @failure
22
+ end
23
+ end
24
+
25
+ def self.rc(name, *args)
26
+ const_set name, ReturnCode.new(name, *args)
27
+ end
28
+
29
+ rc :OK
30
+ rc :Ancient, :failure
31
+ rc :AttemptsLimitReached, :failure
32
+ rc :ExceptionsLimitReached, :failure
33
+ rc :Delayed, :recover
34
+ rc :HandlerCrash, :recover
35
+ rc :HandlerNotYetTimedOut, :recover
36
+ rc :MutexLocked, :recover
37
+ rc :InternalError, :recover
38
+
39
+ end
40
+ end
@@ -0,0 +1,144 @@
1
+ module Beetle
2
+ # Manages subscriptions and message processing on the receiver side of things.
3
+ class Subscriber < Base
4
+
5
+ # create a new subscriber instance
6
+ def initialize(client, options = {}) #:nodoc:
7
+ super
8
+ @handlers = {}
9
+ @amqp_connections = {}
10
+ @mqs = {}
11
+ end
12
+
13
+ # the client calls this method to subcsribe to all queues on all servers which have
14
+ # handlers registered for the given list of messages. this method does the following
15
+ # things:
16
+ #
17
+ # * creates all exchanges which have been registered for the given messages
18
+ # * creates and binds queues which have been registered for the exchanges
19
+ # * subscribes the handlers for all these queues
20
+ #
21
+ # yields before entering the eventmachine loop (if a block was given)
22
+ def listen(messages) #:nodoc:
23
+ EM.run do
24
+ exchanges = exchanges_for_messages(messages)
25
+ create_exchanges(exchanges)
26
+ queues = queues_for_exchanges(exchanges)
27
+ bind_queues(queues)
28
+ subscribe_queues(queues)
29
+ yield if block_given?
30
+ end
31
+ end
32
+
33
+ # stops the eventmachine loop
34
+ def stop! #:nodoc:
35
+ EM.stop_event_loop
36
+ end
37
+
38
+ # register handler for the given queues (see Client#register_handler)
39
+ def register_handler(queues, opts={}, handler=nil, &block) #:nodoc:
40
+ Array(queues).each do |queue|
41
+ @handlers[queue] = [opts.symbolize_keys, handler || block]
42
+ end
43
+ end
44
+
45
+ private
46
+
47
+ def exchanges_for_messages(messages)
48
+ @client.messages.slice(*messages).map{|_, opts| opts[:exchange]}.uniq
49
+ end
50
+
51
+ def queues_for_exchanges(exchanges)
52
+ @client.exchanges.slice(*exchanges).map{|_, opts| opts[:queues]}.flatten.uniq
53
+ end
54
+
55
+ def create_exchanges(exchanges)
56
+ each_server do
57
+ exchanges.each { |name| exchange(name) }
58
+ end
59
+ end
60
+
61
+ def bind_queues(queues)
62
+ each_server do
63
+ queues.each { |name| queue(name) }
64
+ end
65
+ end
66
+
67
+ def subscribe_queues(queues)
68
+ each_server do
69
+ queues.each { |name| subscribe(name) if @handlers.include?(name) }
70
+ end
71
+ end
72
+
73
+ # returns the mq object for the given server or returns a new one created with the
74
+ # prefetch(1) option. this tells it to just send one message to the receiving buffer
75
+ # (instead of filling it). this is necesssary to ensure that one subscriber always just
76
+ # handles one single message. we cannot ensure reliability if the buffer is filled with
77
+ # messages and crashes.
78
+ def mq(server=@server)
79
+ @mqs[server] ||= MQ.new(amqp_connection).prefetch(1)
80
+ end
81
+
82
+ def subscribe(queue_name)
83
+ error("no handler for queue #{queue_name}") unless @handlers.include?(queue_name)
84
+ opts, handler = @handlers[queue_name]
85
+ queue_opts = @client.queues[queue_name][:amqp_name]
86
+ amqp_queue_name = queue_opts
87
+ callback = create_subscription_callback(queue_name, amqp_queue_name, handler, opts)
88
+ logger.debug "Beetle: subscribing to queue #{amqp_queue_name} with key # on server #{@server}"
89
+ begin
90
+ queues[queue_name].subscribe(opts.slice(*SUBSCRIPTION_KEYS).merge(:key => "#", :ack => true), &callback)
91
+ rescue MQ::Error
92
+ error("Beetle: binding multiple handlers for the same queue isn't possible.")
93
+ end
94
+ end
95
+
96
+ def create_subscription_callback(queue_name, amqp_queue_name, handler, opts)
97
+ server = @server
98
+ lambda do |header, data|
99
+ begin
100
+ processor = Handler.create(handler, opts)
101
+ message_options = opts.merge(:server => server, :store => @client.deduplication_store)
102
+ m = Message.new(amqp_queue_name, header, data, message_options)
103
+ result = m.process(processor)
104
+ if result.recover?
105
+ sleep 1
106
+ mq(server).recover
107
+ elsif reply_to = header.properties[:reply_to]
108
+ status = result == Beetle::RC::OK ? "OK" : "FAILED"
109
+ exchange = MQ::Exchange.new(mq(server), :direct, "", :key => reply_to)
110
+ exchange.publish(m.handler_result.to_s, :headers => {:status => status})
111
+ end
112
+ rescue Exception
113
+ Beetle::reraise_expectation_errors!
114
+ # swallow all exceptions
115
+ logger.error "Beetle: internal error during message processing: #{$!}: #{$!.backtrace.join("\n")}"
116
+ end
117
+ end
118
+ end
119
+
120
+ def create_exchange!(name, opts)
121
+ mq.__send__(opts[:type], name, opts.slice(*EXCHANGE_CREATION_KEYS))
122
+ end
123
+
124
+ def bind_queue!(queue_name, creation_keys, exchange_name, binding_keys)
125
+ queue = mq.queue(queue_name, creation_keys)
126
+ exchange = exchange(exchange_name)
127
+ queue.bind(exchange, binding_keys)
128
+ queue
129
+ end
130
+
131
+ def amqp_connection(server=@server)
132
+ @amqp_connections[server] ||= new_amqp_connection
133
+ end
134
+
135
+ def new_amqp_connection
136
+ # FIXME: wtf, how to test that reconnection feature....
137
+ con = AMQP.connect(:host => current_host, :port => current_port,
138
+ :user => Beetle.config.user, :pass => Beetle.config.password, :vhost => Beetle.config.vhost)
139
+ con.instance_variable_set("@on_disconnect", proc{ con.__send__(:reconnect) })
140
+ con
141
+ end
142
+
143
+ end
144
+ end
@@ -0,0 +1,29 @@
1
+ #!/bin/bash
2
+
3
+ # export RABBITMQ_MNESIA_BASE=/var/lib/rabbitmq/mnesia2
4
+ # Defaults to /var/lib/rabbitmq/mnesia. Set this to the directory where Mnesia
5
+ # database files should be placed.
6
+
7
+ # export RABBITMQ_LOG_BASE
8
+ # Defaults to /var/log/rabbitmq. Log files generated by the server will be placed
9
+ # in this directory.
10
+
11
+ export RABBITMQ_NODENAME=$1
12
+ # Defaults to rabbit. This can be useful if you want to run more than one node
13
+ # per machine - RABBITMQ_NODENAME should be unique per erlang-node-and-machine
14
+ # combination. See clustering on a single machine guide at <http://www.rab-
15
+ # bitmq.com/clustering.html#single-machine> for details.
16
+
17
+ # RABBITMQ_NODE_IP_ADDRESS
18
+ # Defaults to 0.0.0.0. This can be changed if you only want to bind to one net-
19
+ # work interface.
20
+
21
+ export RABBITMQ_NODE_PORT=$2
22
+ # Defaults to 5672.
23
+
24
+ # RABBITMQ_CLUSTER_CONFIG_FILE
25
+ # Defaults to /etc/rabbitmq/rabbitmq_cluster.config. If this file is present it
26
+ # is used by the server to auto-configure a RabbitMQ cluster. See the clustering
27
+ # guide at <http://www.rabbitmq.com/clustering.html> for details.
28
+
29
+ rabbitmq-server
@@ -0,0 +1,55 @@
1
+ # The simplest case
2
+ client.register_message(:something_happened) # => key: something_happened
3
+
4
+ # with options
5
+ client.register_message(:
6
+
7
+ ####################
8
+ # Message Grouping #
9
+ ####################
10
+
11
+ client.register_message(:delete_something, :group => :jobs) # => key: jobs.delete_something
12
+ client.register_message(:create_something, :group => :jobs) # => key: jobs.create_something
13
+
14
+ # You can register a handler for a message group
15
+ client.register_handler(JobsHandler, :group => :jobs) # bind queue with: jobs.*
16
+
17
+ # And still register on single messages
18
+ client.register_handler(DeletedJobHandler, :delete_something) # bind queue with: *.delete_something
19
+
20
+ ######################
21
+ # Handler Definition #
22
+ ######################
23
+
24
+ # With a Handler class that implements .process(message)
25
+ client.register_handler(MyProcessor, :something_happened) # => queue: my_processor
26
+
27
+ # With a String / Symbol and a block
28
+ client.register_handler("Other Processor", :delete_something, :something_happened) lambda { |message| foobar(message) } # => queue: other_processor, bound with: *.delete_something and *.something_happened
29
+
30
+ # With extra parameters
31
+ client.register_handler(VeryImportant, :delete_something, :immediate => true) # queue: very_important, :immediate => true
32
+
33
+ ###################################
34
+ # Wiring, Subscribing, Publishing #
35
+ ###################################
36
+ client.wire! # => all the binding magic happens
37
+
38
+ client.subscribe
39
+
40
+ client.publish(:delete_something, 'payload')
41
+
42
+ __END__
43
+
44
+ Whats happening when wire! is called? (pseudocode)
45
+ 1. all the messages are registered
46
+ messages = [{:name => :delete_something, :group => :jobs, :bound => false}, {:name => :something_happened, :bound => false}]
47
+ 2. all the queues for the handlers are created and bound...
48
+ my_processor_queue = queue(:my_processor).bind(exchange, :key => '*.something_happened')
49
+ jobs_handler_queue = queue(:jobs_handler).bind(exchange, :key => 'jobs.*')
50
+ handlers_with_queues = [[jobs_handler_queue, JobsHandler], [my_processor_queue, block_or_class]]
51
+ 3. every handler definition binds a queue for the handler to a list of messages and marks the message as bound.
52
+ 4. If in the end a message isn't bound to a queue at least once, an exception is raised
53
+
54
+ Exceptions will be thrown if:
55
+ * after all m