beetle 0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (46) hide show
  1. data/.gitignore +5 -0
  2. data/MIT-LICENSE +20 -0
  3. data/README.rdoc +82 -0
  4. data/Rakefile +114 -0
  5. data/TODO +7 -0
  6. data/beetle.gemspec +127 -0
  7. data/etc/redis-master.conf +189 -0
  8. data/etc/redis-slave.conf +189 -0
  9. data/examples/README.rdoc +14 -0
  10. data/examples/attempts.rb +66 -0
  11. data/examples/handler_class.rb +64 -0
  12. data/examples/handling_exceptions.rb +73 -0
  13. data/examples/multiple_exchanges.rb +48 -0
  14. data/examples/multiple_queues.rb +43 -0
  15. data/examples/redis_failover.rb +65 -0
  16. data/examples/redundant.rb +65 -0
  17. data/examples/rpc.rb +45 -0
  18. data/examples/simple.rb +39 -0
  19. data/lib/beetle.rb +57 -0
  20. data/lib/beetle/base.rb +78 -0
  21. data/lib/beetle/client.rb +252 -0
  22. data/lib/beetle/configuration.rb +31 -0
  23. data/lib/beetle/deduplication_store.rb +152 -0
  24. data/lib/beetle/handler.rb +95 -0
  25. data/lib/beetle/message.rb +336 -0
  26. data/lib/beetle/publisher.rb +187 -0
  27. data/lib/beetle/r_c.rb +40 -0
  28. data/lib/beetle/subscriber.rb +144 -0
  29. data/script/start_rabbit +29 -0
  30. data/snafu.rb +55 -0
  31. data/test/beetle.yml +81 -0
  32. data/test/beetle/base_test.rb +52 -0
  33. data/test/beetle/bla.rb +0 -0
  34. data/test/beetle/client_test.rb +305 -0
  35. data/test/beetle/configuration_test.rb +5 -0
  36. data/test/beetle/deduplication_store_test.rb +90 -0
  37. data/test/beetle/handler_test.rb +105 -0
  38. data/test/beetle/message_test.rb +744 -0
  39. data/test/beetle/publisher_test.rb +407 -0
  40. data/test/beetle/r_c_test.rb +9 -0
  41. data/test/beetle/subscriber_test.rb +263 -0
  42. data/test/beetle_test.rb +5 -0
  43. data/test/test_helper.rb +20 -0
  44. data/tmp/master/.gitignore +2 -0
  45. data/tmp/slave/.gitignore +3 -0
  46. metadata +192 -0
@@ -0,0 +1,189 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize no
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile ./tmp/redis-slave.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 6380
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ # bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 300
21
+
22
+ # Set server verbosity to 'debug'
23
+ # it can be one of:
24
+ # debug (a lot of information, useful for development/testing)
25
+ # notice (moderately verbose, what you want in production probably)
26
+ # warning (only very important / critical messages are logged)
27
+ loglevel debug
28
+
29
+ # Specify the log file name. Also 'stdout' can be used to force
30
+ # the demon to log on the standard output. Note that if you use standard
31
+ # output for logging but daemonize, logs will be sent to /dev/null
32
+ logfile stdout
33
+
34
+ # Set the number of databases. The default database is DB 0, you can select
35
+ # a different one on a per-connection basis using SELECT <dbid> where
36
+ # dbid is a number between 0 and 'databases'-1
37
+ databases 16
38
+
39
+ ################################ SNAPSHOTTING #################################
40
+ #
41
+ # Save the DB on disk:
42
+ #
43
+ # save <seconds> <changes>
44
+ #
45
+ # Will save the DB if both the given number of seconds and the given
46
+ # number of write operations against the DB occurred.
47
+ #
48
+ # In the example below the behaviour will be to save:
49
+ # after 900 sec (15 min) if at least 1 key changed
50
+ # after 300 sec (5 min) if at least 10 keys changed
51
+ # after 60 sec if at least 10000 keys changed
52
+ save 900 1
53
+ save 300 10
54
+ save 60 10000
55
+
56
+ # Compress string objects using LZF when dump .rdb databases?
57
+ # For default that's set to 'yes' as it's almost always a win.
58
+ # If you want to save some CPU in the saving child set it to 'no' but
59
+ # the dataset will likely be bigger if you have compressible values or keys.
60
+ rdbcompression yes
61
+
62
+ # The filename where to dump the DB
63
+ dbfilename dump.rdb
64
+
65
+ # For default save/load DB in/from the working directory
66
+ # Note that you must specify a directory not a file name.
67
+ dir ./tmp/slave/
68
+
69
+ ################################# REPLICATION #################################
70
+
71
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
72
+ # another Redis server. Note that the configuration is local to the slave
73
+ # so for example it is possible to configure the slave to save the DB with a
74
+ # different interval, or to listen to another port, and so on.
75
+ #
76
+ slaveof 127.0.0.1 6379
77
+
78
+ # If the master is password protected (using the "requirepass" configuration
79
+ # directive below) it is possible to tell the slave to authenticate before
80
+ # starting the replication synchronization process, otherwise the master will
81
+ # refuse the slave request.
82
+ #
83
+ # masterauth <master-password>
84
+
85
+ ################################## SECURITY ###################################
86
+
87
+ # Require clients to issue AUTH <PASSWORD> before processing any other
88
+ # commands. This might be useful in environments in which you do not trust
89
+ # others with access to the host running redis-server.
90
+ #
91
+ # This should stay commented out for backward compatibility and because most
92
+ # people do not need auth (e.g. they run their own servers).
93
+ #
94
+ # requirepass foobared
95
+
96
+ ################################### LIMITS ####################################
97
+
98
+ # Set the max number of connected clients at the same time. By default there
99
+ # is no limit, and it's up to the number of file descriptors the Redis process
100
+ # is able to open. The special value '0' means no limts.
101
+ # Once the limit is reached Redis will close all the new connections sending
102
+ # an error 'max number of clients reached'.
103
+ #
104
+ # maxclients 128
105
+
106
+ # Don't use more memory than the specified amount of bytes.
107
+ # When the memory limit is reached Redis will try to remove keys with an
108
+ # EXPIRE set. It will try to start freeing keys that are going to expire
109
+ # in little time and preserve keys with a longer time to live.
110
+ # Redis will also try to remove objects from free lists if possible.
111
+ #
112
+ # If all this fails, Redis will start to reply with errors to commands
113
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
114
+ # to reply to most read-only commands like GET.
115
+ #
116
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
117
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
118
+ # database the memory usage will grow over the weeks, it will be obvious if
119
+ # it is going to use too much memory in the long run, and you'll have the time
120
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
121
+ # errors for write operations, and this may even lead to DB inconsistency.
122
+ #
123
+ # maxmemory <bytes>
124
+
125
+ ############################## APPEND ONLY MODE ###############################
126
+
127
+ # By default Redis asynchronously dumps the dataset on disk. If you can live
128
+ # with the idea that the latest records will be lost if something like a crash
129
+ # happens this is the preferred way to run Redis. If instead you care a lot
130
+ # about your data and don't want to that a single record can get lost you should
131
+ # enable the append only mode: when this mode is enabled Redis will append
132
+ # every write operation received in the file appendonly.log. This file will
133
+ # be read on startup in order to rebuild the full dataset in memory.
134
+ #
135
+ # Note that you can have both the async dumps and the append only file if you
136
+ # like (you have to comment the "save" statements above to disable the dumps).
137
+ # Still if append only mode is enabled Redis will load the data from the
138
+ # log file at startup ignoring the dump.rdb file.
139
+ #
140
+ # The name of the append only file is "appendonly.log"
141
+ #
142
+ # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
143
+ # log file in background when it gets too big.
144
+
145
+ appendonly yes
146
+
147
+ # The fsync() call tells the Operating System to actually write data on disk
148
+ # instead to wait for more data in the output buffer. Some OS will really flush
149
+ # data on disk, some other OS will just try to do it ASAP.
150
+ #
151
+ # Redis supports three different modes:
152
+ #
153
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
154
+ # always: fsync after every write to the append only log . Slow, Safest.
155
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
156
+ #
157
+ # The default is "always" that's the safer of the options. It's up to you to
158
+ # understand if you can relax this to "everysec" that will fsync every second
159
+ # or to "no" that will let the operating system flush the output buffer when
160
+ # it want, for better performances (but if you can live with the idea of
161
+ # some data loss consider the default persistence mode that's snapshotting).
162
+
163
+ # appendfsync always
164
+ appendfsync everysec
165
+ # appendfsync no
166
+
167
+ ############################### ADVANCED CONFIG ###############################
168
+
169
+ # Glue small output buffers together in order to send small replies in a
170
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
171
+ # in terms of number of queries per second. Use 'yes' if unsure.
172
+ glueoutputbuf yes
173
+
174
+ # Use object sharing. Can save a lot of memory if you have many common
175
+ # string in your dataset, but performs lookups against the shared objects
176
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
177
+ # idea.
178
+ #
179
+ # When object sharing is enabled (shareobjects yes) you can use
180
+ # shareobjectspoolsize to control the size of the pool used in order to try
181
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
182
+ # In general you want this value to be at least the double of the number of
183
+ # very common strings you have in your dataset.
184
+ #
185
+ # WARNING: object sharing is experimental, don't enable this feature
186
+ # in production before of Redis 1.0-stable. Still please try this feature in
187
+ # your development environment so that we can test it better.
188
+ shareobjects no
189
+ shareobjectspoolsize 1024
@@ -0,0 +1,14 @@
1
+ === Examples
2
+
3
+ Beetle ships with a number of {example scripts}[http://github.com/xing/beetle/tree/master/examples/].
4
+
5
+ The top level Rakefile comes with targets to start several RabbitMQ and redis instances
6
+ locally. Make sure the corresponding binaries are in your search path. Open four new shell
7
+ windows and execute the following commands:
8
+
9
+ rake rabbit:start1
10
+ rake rabbit:start2
11
+ rake redis:start1
12
+ rake redis:start2
13
+
14
+ After running the redis_failover.rb script you will need to restart both redis servers.
@@ -0,0 +1,66 @@
1
+ # attempts.rb
2
+ # this example shows you how to use the exception limiting feature of beetle
3
+ # it allows you to control the number of retries your handler will go through
4
+ # with one message before giving up on it
5
+ #
6
+ # ! check the examples/README.rdoc for information on starting your redis/rabbit !
7
+ #
8
+ # start it with ruby attempts.rb
9
+
10
+ require "rubygems"
11
+ require File.expand_path("../lib/beetle", File.dirname(__FILE__))
12
+
13
+ # set Beetle log level to info, less noisy than debug
14
+ Beetle.config.logger.level = Logger::INFO
15
+
16
+ # setup client
17
+ client = Beetle::Client.new
18
+ client.register_queue(:test)
19
+ client.register_message(:test)
20
+
21
+ # purge the test queue
22
+ client.purge(:test)
23
+
24
+ # empty the dedup store
25
+ client.deduplication_store.flushdb
26
+
27
+ # we're starting with 0 exceptions and expect our handler to process the message until the exception count has reached 10
28
+ $exceptions = 0
29
+ $max_exceptions = 10
30
+
31
+ # declare a handler class for message processing
32
+ # in this example we've not only overwritten the process method but also the
33
+ # error and failure methods of the handler baseclass
34
+ class Handler < Beetle::Handler
35
+
36
+ # called when the handler receives the message - fail everytime
37
+ def process
38
+ raise "failed #{$exceptions += 1} times"
39
+ end
40
+
41
+ # called when handler process raised an exception
42
+ def error(exception)
43
+ logger.info "execution failed: #{exception}"
44
+ end
45
+
46
+ # called when the handler has finally failed
47
+ # we're stopping the event loop so this script stops after that
48
+ def failure(result)
49
+ super
50
+ EM.stop_event_loop
51
+ end
52
+ end
53
+
54
+ # register our handler to the message, configure it to our max_exceptions limit, we configure a delay of 0 to have it not wait before retrying
55
+ client.register_handler(:test, Handler, :exceptions => $max_exceptions, :delay => 0)
56
+
57
+ # publish a our test message
58
+ client.publish(:test, "snafu")
59
+
60
+ # and start our listening loop...
61
+ client.listen
62
+
63
+ # error handling, if everything went right this shouldn't happen.
64
+ if $exceptions != $max_exceptions + 1
65
+ raise "something is fishy. Failed #{$exceptions} times"
66
+ end
@@ -0,0 +1,64 @@
1
+ # handler.rb
2
+ # this example shows you how to create a simple Beetle::Handler to process your messages
3
+ #
4
+ #
5
+ #
6
+ # ! check the examples/README.rdoc for information on starting your redis/rabbit !
7
+ #
8
+ # start it with ruby handler_class.rb
9
+
10
+ require "rubygems"
11
+ require File.expand_path("../lib/beetle", File.dirname(__FILE__))
12
+
13
+ # set Beetle log level to info, less noisy than debug
14
+ Beetle.config.logger.level = Logger::INFO
15
+
16
+ # setup client
17
+ client = Beetle::Client.new
18
+ client.register_queue(:test)
19
+ client.register_message(:test)
20
+
21
+ # purge the test queue
22
+ client.purge(:test)
23
+
24
+ # empty the dedup store
25
+ client.deduplication_store.flushdb
26
+
27
+ # setup our counter
28
+ $counter = 0
29
+
30
+ # declare a handler class for message processing
31
+ # this is a very basic example, subclass your own implementation
32
+ # process is the only method required and the message accessor is
33
+ # already implemented - see message.rb for more documentation on what you
34
+ # can do with it
35
+ class Handler < Beetle::Handler
36
+
37
+ # called when the handler receives the message
38
+ def process
39
+ i = message.data.to_i
40
+ logger.info "adding #{i}"
41
+ $counter += i
42
+ end
43
+
44
+ end
45
+
46
+ # register our handler to the message
47
+ client.register_handler(:test, Handler)
48
+
49
+ # publish 10 test messages
50
+ message_count = 10
51
+ published = 0
52
+ message_count.times {|i| published += client.publish(:test, i) } # publish returns the number of servers the message has been sent to
53
+ puts "published #{published} test messages"
54
+
55
+ # start our listening loop and stop it with a timer -
56
+ # the 0.1 figure should be well above the time necessary to handle
57
+ # all 10 messages
58
+ client.listen do
59
+ EM.add_timer(0.1) { client.stop_listening }
60
+ end
61
+
62
+ # the counter should now be message_count*(message_count-1)/2 if it's not something went wrong
63
+ puts "Result: #{$counter}"
64
+ raise "something is fishy" unless $counter == message_count*(message_count-1)/2
@@ -0,0 +1,73 @@
1
+ # handling_exceptions.rb
2
+ # this examples shows you how beetle retries and exception handling works in general
3
+ # as you will see in the Beetle::Handler example every message will raise an exception
4
+ # once and be succesful on the next attempt, the error callback is called on every process
5
+ # exception
6
+ # ! check the examples/README.rdoc for information on starting your redis/rabbit !
7
+ #
8
+ # start it with ruby attempts.rb
9
+
10
+ require "rubygems"
11
+ require File.expand_path("../lib/beetle", File.dirname(__FILE__))
12
+
13
+ # set Beetle log level to info, less noisy than debug
14
+ Beetle.config.logger.level = Logger::INFO
15
+
16
+ # setup client
17
+ client = Beetle::Client.new
18
+ client.register_queue(:test)
19
+ client.register_message(:test)
20
+
21
+ # purge the test queue
22
+ client.purge(:test)
23
+
24
+ # empty the dedup store
25
+ client.deduplication_store.flushdb
26
+
27
+ # setup our counter
28
+ $completed = 0
29
+
30
+ # declare a handler class for message processing
31
+ # handler fails on the first execution attempt, then succeeds
32
+ class Handler < Beetle::Handler
33
+
34
+ # called when the handler receives the message, fails on first attempt
35
+ # succeeds on the next and counts up our counter
36
+ def process
37
+ raise "first attempt for message #{message.data}" if message.attempts == 1
38
+ logger.info "processing of message #{message.data} succeeded on second attempt. completed: #{$completed+=1}"
39
+ end
40
+
41
+ # called when handler process raised an exception
42
+ def error(exception)
43
+ logger.info "execution failed: #{exception}"
44
+ end
45
+
46
+ end
47
+
48
+ # register our handler to the message, configure it to our max_exceptions limit, we configure a delay of 0 to have it not wait before retrying
49
+ client.register_handler(:test, Handler, :exceptions => 1, :delay => 0)
50
+
51
+ # publish 10 test messages
52
+ n = 0
53
+ 10.times { |i| n += client.publish(:test, i+1) } # publish returns the number of servers the message has been sent to
54
+ puts "published #{n} test messages"
55
+
56
+ # start the listening loop
57
+ client.listen do
58
+ # catch INT-signal and stop listening
59
+ trap("INT") { client.stop_listening }
60
+ # we're adding a periodic timer to check wether all 10 messages have been processed without exceptions
61
+ timer = EM.add_periodic_timer(1) do
62
+ if $completed == n
63
+ timer.cancel
64
+ client.stop_listening
65
+ end
66
+ end
67
+ end
68
+
69
+ puts "Handled #{$completed} messages"
70
+ if $completed != n
71
+ raise "Did not handle the correct number of messages"
72
+ end
73
+
@@ -0,0 +1,48 @@
1
+ # multiple_exchanges.rb
2
+ # this example shows you how to create a queue that is bound to two different exchanges
3
+ # we'll create the queue foobar and bind it to the exchange foo and the exchange bar with different
4
+ # routing keys (info tidbit: different exchanges allow better loadbalacing on multi-core machines)
5
+ # your handler will then receive messages from both exchanges
6
+ # ! check the examples/README.rdoc for information on starting your redis/rabbit !
7
+ #
8
+ # start it with ruby multiple_exchanges.rb
9
+
10
+ require "rubygems"
11
+ require File.expand_path("../lib/beetle", File.dirname(__FILE__))
12
+
13
+ # set Beetle log level to info, less noisy than debug
14
+ Beetle.config.logger.level = Logger::INFO
15
+
16
+ # setup client
17
+ client = Beetle::Client.new
18
+
19
+ # create two exchanges
20
+ client.register_exchange(:foo)
21
+ client.register_exchange(:bar)
22
+
23
+ # create a queue foobar bound to the exchange foo with the key foo
24
+ client.register_queue(:foobar, :exchange => :foo, :key => "foo")
25
+ # create an additional binding for the foobar queue, this time to the bar exchange and with the bar key
26
+ client.register_binding(:foobar, :exchange => :bar, :key => "bar")
27
+
28
+ # register two messages foo and bar remember that the defaults for exchange and key is the message name
29
+ client.register_message(:foo)
30
+ client.register_message(:bar)
31
+
32
+ # declare a handler class for message processing
33
+ # this one just gives us a debug output
34
+ client.register_handler(:foobar) do |message|
35
+ puts "handler received: #{message.data}"
36
+ end
37
+
38
+ # and publish our two messages
39
+ client.publish(:foo, "message from foo exchange")
40
+ client.publish(:bar, "message from bar exchange")
41
+
42
+ # this starts the event machine event loop using EM.run
43
+ # stop listening after 0.1 seconds this should be more than enough time
44
+ # to finish processing our messages
45
+ client.listen do
46
+ EM.add_timer(0.1) { client.stop_listening }
47
+ end
48
+