beetle 0.1 → 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (48) hide show
  1. data/README.rdoc +18 -8
  2. data/beetle.gemspec +37 -121
  3. data/bin/beetle +9 -0
  4. data/examples/README.rdoc +0 -2
  5. data/examples/rpc.rb +3 -2
  6. data/ext/mkrf_conf.rb +19 -0
  7. data/lib/beetle.rb +2 -2
  8. data/lib/beetle/base.rb +1 -8
  9. data/lib/beetle/client.rb +16 -14
  10. data/lib/beetle/commands.rb +30 -0
  11. data/lib/beetle/commands/configuration_client.rb +73 -0
  12. data/lib/beetle/commands/configuration_server.rb +85 -0
  13. data/lib/beetle/configuration.rb +70 -7
  14. data/lib/beetle/deduplication_store.rb +50 -38
  15. data/lib/beetle/handler.rb +2 -5
  16. data/lib/beetle/logging.rb +7 -0
  17. data/lib/beetle/message.rb +11 -13
  18. data/lib/beetle/publisher.rb +12 -4
  19. data/lib/beetle/r_c.rb +2 -1
  20. data/lib/beetle/redis_configuration_client.rb +136 -0
  21. data/lib/beetle/redis_configuration_server.rb +301 -0
  22. data/lib/beetle/redis_ext.rb +79 -0
  23. data/lib/beetle/redis_master_file.rb +35 -0
  24. data/lib/beetle/redis_server_info.rb +65 -0
  25. data/lib/beetle/subscriber.rb +4 -1
  26. data/test/beetle/configuration_test.rb +14 -2
  27. data/test/beetle/deduplication_store_test.rb +61 -43
  28. data/test/beetle/message_test.rb +28 -4
  29. data/test/beetle/publisher_test.rb +17 -3
  30. data/test/beetle/redis_configuration_client_test.rb +97 -0
  31. data/test/beetle/redis_configuration_server_test.rb +278 -0
  32. data/test/beetle/redis_ext_test.rb +71 -0
  33. data/test/beetle/redis_master_file_test.rb +39 -0
  34. data/test/test_helper.rb +13 -1
  35. metadata +162 -69
  36. data/.gitignore +0 -5
  37. data/MIT-LICENSE +0 -20
  38. data/Rakefile +0 -114
  39. data/TODO +0 -7
  40. data/etc/redis-master.conf +0 -189
  41. data/etc/redis-slave.conf +0 -189
  42. data/examples/redis_failover.rb +0 -65
  43. data/script/start_rabbit +0 -29
  44. data/snafu.rb +0 -55
  45. data/test/beetle.yml +0 -81
  46. data/test/beetle/bla.rb +0 -0
  47. data/tmp/master/.gitignore +0 -2
  48. data/tmp/slave/.gitignore +0 -3
data/TODO DELETED
@@ -1,7 +0,0 @@
1
- - docs
2
- - example scripts
3
- - let handlers decide dynamically how long to wait for a retry
4
- - review log levels in all logger statements
5
- - optimize redis accesses:
6
- - combine multiple redis accesses into one
7
- - ack redundant messages with attempts limit 1 immediately?
@@ -1,189 +0,0 @@
1
- # Redis configuration file example
2
-
3
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
- daemonize no
6
-
7
- # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
- # You can specify a custom pid file location here.
9
- pidfile ./tmp/redis-master.pid
10
-
11
- # Accept connections on the specified port, default is 6379
12
- port 6379
13
-
14
- # If you want you can bind a single interface, if the bind option is not
15
- # specified all the interfaces will listen for connections.
16
- #
17
- # bind 127.0.0.1
18
-
19
- # Close the connection after a client is idle for N seconds (0 to disable)
20
- timeout 300
21
-
22
- # Set server verbosity to 'debug'
23
- # it can be one of:
24
- # debug (a lot of information, useful for development/testing)
25
- # notice (moderately verbose, what you want in production probably)
26
- # warning (only very important / critical messages are logged)
27
- loglevel debug
28
-
29
- # Specify the log file name. Also 'stdout' can be used to force
30
- # the demon to log on the standard output. Note that if you use standard
31
- # output for logging but daemonize, logs will be sent to /dev/null
32
- logfile stdout
33
-
34
- # Set the number of databases. The default database is DB 0, you can select
35
- # a different one on a per-connection basis using SELECT <dbid> where
36
- # dbid is a number between 0 and 'databases'-1
37
- databases 16
38
-
39
- ################################ SNAPSHOTTING #################################
40
- #
41
- # Save the DB on disk:
42
- #
43
- # save <seconds> <changes>
44
- #
45
- # Will save the DB if both the given number of seconds and the given
46
- # number of write operations against the DB occurred.
47
- #
48
- # In the example below the behaviour will be to save:
49
- # after 900 sec (15 min) if at least 1 key changed
50
- # after 300 sec (5 min) if at least 10 keys changed
51
- # after 60 sec if at least 10000 keys changed
52
- save 900 1
53
- save 300 10
54
- save 60 10000
55
-
56
- # Compress string objects using LZF when dump .rdb databases?
57
- # For default that's set to 'yes' as it's almost always a win.
58
- # If you want to save some CPU in the saving child set it to 'no' but
59
- # the dataset will likely be bigger if you have compressible values or keys.
60
- rdbcompression yes
61
-
62
- # The filename where to dump the DB
63
- dbfilename dump.rdb
64
-
65
- # For default save/load DB in/from the working directory
66
- # Note that you must specify a directory not a file name.
67
- dir ./tmp/master/
68
-
69
- ################################# REPLICATION #################################
70
-
71
- # Master-Slave replication. Use slaveof to make a Redis instance a copy of
72
- # another Redis server. Note that the configuration is local to the slave
73
- # so for example it is possible to configure the slave to save the DB with a
74
- # different interval, or to listen to another port, and so on.
75
- #
76
- # slaveof <masterip> <masterport>
77
-
78
- # If the master is password protected (using the "requirepass" configuration
79
- # directive below) it is possible to tell the slave to authenticate before
80
- # starting the replication synchronization process, otherwise the master will
81
- # refuse the slave request.
82
- #
83
- # masterauth <master-password>
84
-
85
- ################################## SECURITY ###################################
86
-
87
- # Require clients to issue AUTH <PASSWORD> before processing any other
88
- # commands. This might be useful in environments in which you do not trust
89
- # others with access to the host running redis-server.
90
- #
91
- # This should stay commented out for backward compatibility and because most
92
- # people do not need auth (e.g. they run their own servers).
93
- #
94
- # requirepass foobared
95
-
96
- ################################### LIMITS ####################################
97
-
98
- # Set the max number of connected clients at the same time. By default there
99
- # is no limit, and it's up to the number of file descriptors the Redis process
100
- # is able to open. The special value '0' means no limts.
101
- # Once the limit is reached Redis will close all the new connections sending
102
- # an error 'max number of clients reached'.
103
- #
104
- # maxclients 128
105
-
106
- # Don't use more memory than the specified amount of bytes.
107
- # When the memory limit is reached Redis will try to remove keys with an
108
- # EXPIRE set. It will try to start freeing keys that are going to expire
109
- # in little time and preserve keys with a longer time to live.
110
- # Redis will also try to remove objects from free lists if possible.
111
- #
112
- # If all this fails, Redis will start to reply with errors to commands
113
- # that will use more memory, like SET, LPUSH, and so on, and will continue
114
- # to reply to most read-only commands like GET.
115
- #
116
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
117
- # 'state' server or cache, not as a real DB. When Redis is used as a real
118
- # database the memory usage will grow over the weeks, it will be obvious if
119
- # it is going to use too much memory in the long run, and you'll have the time
120
- # to upgrade. With maxmemory after the limit is reached you'll start to get
121
- # errors for write operations, and this may even lead to DB inconsistency.
122
- #
123
- # maxmemory <bytes>
124
-
125
- ############################## APPEND ONLY MODE ###############################
126
-
127
- # By default Redis asynchronously dumps the dataset on disk. If you can live
128
- # with the idea that the latest records will be lost if something like a crash
129
- # happens this is the preferred way to run Redis. If instead you care a lot
130
- # about your data and don't want to that a single record can get lost you should
131
- # enable the append only mode: when this mode is enabled Redis will append
132
- # every write operation received in the file appendonly.log. This file will
133
- # be read on startup in order to rebuild the full dataset in memory.
134
- #
135
- # Note that you can have both the async dumps and the append only file if you
136
- # like (you have to comment the "save" statements above to disable the dumps).
137
- # Still if append only mode is enabled Redis will load the data from the
138
- # log file at startup ignoring the dump.rdb file.
139
- #
140
- # The name of the append only file is "appendonly.log"
141
- #
142
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
143
- # log file in background when it gets too big.
144
-
145
- appendonly yes
146
-
147
- # The fsync() call tells the Operating System to actually write data on disk
148
- # instead to wait for more data in the output buffer. Some OS will really flush
149
- # data on disk, some other OS will just try to do it ASAP.
150
- #
151
- # Redis supports three different modes:
152
- #
153
- # no: don't fsync, just let the OS flush the data when it wants. Faster.
154
- # always: fsync after every write to the append only log . Slow, Safest.
155
- # everysec: fsync only if one second passed since the last fsync. Compromise.
156
- #
157
- # The default is "always" that's the safer of the options. It's up to you to
158
- # understand if you can relax this to "everysec" that will fsync every second
159
- # or to "no" that will let the operating system flush the output buffer when
160
- # it want, for better performances (but if you can live with the idea of
161
- # some data loss consider the default persistence mode that's snapshotting).
162
-
163
- # appendfsync always
164
- appendfsync everysec
165
- # appendfsync no
166
-
167
- ############################### ADVANCED CONFIG ###############################
168
-
169
- # Glue small output buffers together in order to send small replies in a
170
- # single TCP packet. Uses a bit more CPU but most of the times it is a win
171
- # in terms of number of queries per second. Use 'yes' if unsure.
172
- glueoutputbuf yes
173
-
174
- # Use object sharing. Can save a lot of memory if you have many common
175
- # string in your dataset, but performs lookups against the shared objects
176
- # pool so it uses more CPU and can be a bit slower. Usually it's a good
177
- # idea.
178
- #
179
- # When object sharing is enabled (shareobjects yes) you can use
180
- # shareobjectspoolsize to control the size of the pool used in order to try
181
- # object sharing. A bigger pool size will lead to better sharing capabilities.
182
- # In general you want this value to be at least the double of the number of
183
- # very common strings you have in your dataset.
184
- #
185
- # WARNING: object sharing is experimental, don't enable this feature
186
- # in production before of Redis 1.0-stable. Still please try this feature in
187
- # your development environment so that we can test it better.
188
- shareobjects no
189
- shareobjectspoolsize 1024
data/etc/redis-slave.conf DELETED
@@ -1,189 +0,0 @@
1
- # Redis configuration file example
2
-
3
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
- daemonize no
6
-
7
- # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
- # You can specify a custom pid file location here.
9
- pidfile ./tmp/redis-slave.pid
10
-
11
- # Accept connections on the specified port, default is 6379
12
- port 6380
13
-
14
- # If you want you can bind a single interface, if the bind option is not
15
- # specified all the interfaces will listen for connections.
16
- #
17
- # bind 127.0.0.1
18
-
19
- # Close the connection after a client is idle for N seconds (0 to disable)
20
- timeout 300
21
-
22
- # Set server verbosity to 'debug'
23
- # it can be one of:
24
- # debug (a lot of information, useful for development/testing)
25
- # notice (moderately verbose, what you want in production probably)
26
- # warning (only very important / critical messages are logged)
27
- loglevel debug
28
-
29
- # Specify the log file name. Also 'stdout' can be used to force
30
- # the demon to log on the standard output. Note that if you use standard
31
- # output for logging but daemonize, logs will be sent to /dev/null
32
- logfile stdout
33
-
34
- # Set the number of databases. The default database is DB 0, you can select
35
- # a different one on a per-connection basis using SELECT <dbid> where
36
- # dbid is a number between 0 and 'databases'-1
37
- databases 16
38
-
39
- ################################ SNAPSHOTTING #################################
40
- #
41
- # Save the DB on disk:
42
- #
43
- # save <seconds> <changes>
44
- #
45
- # Will save the DB if both the given number of seconds and the given
46
- # number of write operations against the DB occurred.
47
- #
48
- # In the example below the behaviour will be to save:
49
- # after 900 sec (15 min) if at least 1 key changed
50
- # after 300 sec (5 min) if at least 10 keys changed
51
- # after 60 sec if at least 10000 keys changed
52
- save 900 1
53
- save 300 10
54
- save 60 10000
55
-
56
- # Compress string objects using LZF when dump .rdb databases?
57
- # For default that's set to 'yes' as it's almost always a win.
58
- # If you want to save some CPU in the saving child set it to 'no' but
59
- # the dataset will likely be bigger if you have compressible values or keys.
60
- rdbcompression yes
61
-
62
- # The filename where to dump the DB
63
- dbfilename dump.rdb
64
-
65
- # For default save/load DB in/from the working directory
66
- # Note that you must specify a directory not a file name.
67
- dir ./tmp/slave/
68
-
69
- ################################# REPLICATION #################################
70
-
71
- # Master-Slave replication. Use slaveof to make a Redis instance a copy of
72
- # another Redis server. Note that the configuration is local to the slave
73
- # so for example it is possible to configure the slave to save the DB with a
74
- # different interval, or to listen to another port, and so on.
75
- #
76
- slaveof 127.0.0.1 6379
77
-
78
- # If the master is password protected (using the "requirepass" configuration
79
- # directive below) it is possible to tell the slave to authenticate before
80
- # starting the replication synchronization process, otherwise the master will
81
- # refuse the slave request.
82
- #
83
- # masterauth <master-password>
84
-
85
- ################################## SECURITY ###################################
86
-
87
- # Require clients to issue AUTH <PASSWORD> before processing any other
88
- # commands. This might be useful in environments in which you do not trust
89
- # others with access to the host running redis-server.
90
- #
91
- # This should stay commented out for backward compatibility and because most
92
- # people do not need auth (e.g. they run their own servers).
93
- #
94
- # requirepass foobared
95
-
96
- ################################### LIMITS ####################################
97
-
98
- # Set the max number of connected clients at the same time. By default there
99
- # is no limit, and it's up to the number of file descriptors the Redis process
100
- # is able to open. The special value '0' means no limts.
101
- # Once the limit is reached Redis will close all the new connections sending
102
- # an error 'max number of clients reached'.
103
- #
104
- # maxclients 128
105
-
106
- # Don't use more memory than the specified amount of bytes.
107
- # When the memory limit is reached Redis will try to remove keys with an
108
- # EXPIRE set. It will try to start freeing keys that are going to expire
109
- # in little time and preserve keys with a longer time to live.
110
- # Redis will also try to remove objects from free lists if possible.
111
- #
112
- # If all this fails, Redis will start to reply with errors to commands
113
- # that will use more memory, like SET, LPUSH, and so on, and will continue
114
- # to reply to most read-only commands like GET.
115
- #
116
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
117
- # 'state' server or cache, not as a real DB. When Redis is used as a real
118
- # database the memory usage will grow over the weeks, it will be obvious if
119
- # it is going to use too much memory in the long run, and you'll have the time
120
- # to upgrade. With maxmemory after the limit is reached you'll start to get
121
- # errors for write operations, and this may even lead to DB inconsistency.
122
- #
123
- # maxmemory <bytes>
124
-
125
- ############################## APPEND ONLY MODE ###############################
126
-
127
- # By default Redis asynchronously dumps the dataset on disk. If you can live
128
- # with the idea that the latest records will be lost if something like a crash
129
- # happens this is the preferred way to run Redis. If instead you care a lot
130
- # about your data and don't want to that a single record can get lost you should
131
- # enable the append only mode: when this mode is enabled Redis will append
132
- # every write operation received in the file appendonly.log. This file will
133
- # be read on startup in order to rebuild the full dataset in memory.
134
- #
135
- # Note that you can have both the async dumps and the append only file if you
136
- # like (you have to comment the "save" statements above to disable the dumps).
137
- # Still if append only mode is enabled Redis will load the data from the
138
- # log file at startup ignoring the dump.rdb file.
139
- #
140
- # The name of the append only file is "appendonly.log"
141
- #
142
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
143
- # log file in background when it gets too big.
144
-
145
- appendonly yes
146
-
147
- # The fsync() call tells the Operating System to actually write data on disk
148
- # instead to wait for more data in the output buffer. Some OS will really flush
149
- # data on disk, some other OS will just try to do it ASAP.
150
- #
151
- # Redis supports three different modes:
152
- #
153
- # no: don't fsync, just let the OS flush the data when it wants. Faster.
154
- # always: fsync after every write to the append only log . Slow, Safest.
155
- # everysec: fsync only if one second passed since the last fsync. Compromise.
156
- #
157
- # The default is "always" that's the safer of the options. It's up to you to
158
- # understand if you can relax this to "everysec" that will fsync every second
159
- # or to "no" that will let the operating system flush the output buffer when
160
- # it want, for better performances (but if you can live with the idea of
161
- # some data loss consider the default persistence mode that's snapshotting).
162
-
163
- # appendfsync always
164
- appendfsync everysec
165
- # appendfsync no
166
-
167
- ############################### ADVANCED CONFIG ###############################
168
-
169
- # Glue small output buffers together in order to send small replies in a
170
- # single TCP packet. Uses a bit more CPU but most of the times it is a win
171
- # in terms of number of queries per second. Use 'yes' if unsure.
172
- glueoutputbuf yes
173
-
174
- # Use object sharing. Can save a lot of memory if you have many common
175
- # string in your dataset, but performs lookups against the shared objects
176
- # pool so it uses more CPU and can be a bit slower. Usually it's a good
177
- # idea.
178
- #
179
- # When object sharing is enabled (shareobjects yes) you can use
180
- # shareobjectspoolsize to control the size of the pool used in order to try
181
- # object sharing. A bigger pool size will lead to better sharing capabilities.
182
- # In general you want this value to be at least the double of the number of
183
- # very common strings you have in your dataset.
184
- #
185
- # WARNING: object sharing is experimental, don't enable this feature
186
- # in production before of Redis 1.0-stable. Still please try this feature in
187
- # your development environment so that we can test it better.
188
- shareobjects no
189
- shareobjectspoolsize 1024
@@ -1,65 +0,0 @@
1
- # Testing redis failover functionality
2
- require "rubygems"
3
- require File.expand_path(File.dirname(__FILE__)+"/../lib/beetle")
4
-
5
- Beetle.config.logger.level = Logger::INFO
6
- Beetle.config.redis_hosts = "localhost:6379, localhost:6380"
7
- Beetle.config.servers = "localhost:5672, localhost:5673"
8
-
9
- # instantiate a client
10
- client = Beetle::Client.new
11
-
12
- # register a durable queue named 'test'
13
- # this implicitly registers a durable topic exchange called 'test'
14
- client.register_queue(:test)
15
- client.purge(:test)
16
- client.register_message(:test, :redundant => true)
17
-
18
- # publish some test messages
19
- # at this point, the exchange will be created on the server and the queue will be bound to the exchange
20
- N = 10
21
- n = 0
22
- N.times do |i|
23
- n += client.publish(:test, "Hello#{i+1}")
24
- end
25
- puts "published #{n} test messages"
26
- puts
27
-
28
- # check whether we were able to publish all messages
29
- if n != 2*N
30
- puts "could not publish all messages"
31
- exit 1
32
- end
33
-
34
- # register a handler for the test message, listing on queue "test"
35
- k = 0
36
- client.register_handler(:test) do |m|
37
- k += 1
38
- puts "Received test message from server #{m.server}"
39
- puts "Message content: #{m.data}"
40
- puts
41
- sleep 1
42
- end
43
-
44
- # hack to switch redis programmatically
45
- class Beetle::DeduplicationStore
46
- def switch_redis
47
- slave = redis_instances.find{|r| r.server != redis.server}
48
- redis.shutdown rescue nil
49
- logger.info "Beetle: shut down master #{redis.server}"
50
- slave.slaveof("no one")
51
- logger.info "Beetle: enabled master mode on #{slave.server}"
52
- end
53
- end
54
-
55
- # start listening
56
- # this starts the event machine loop using EM.run
57
- # the block passed to listen will be yielded as the last step of the setup process
58
- client.listen do
59
- trap("INT") { client.stop_listening }
60
- EM.add_timer(5) { client.deduplication_store.switch_redis }
61
- EM.add_timer(11) { client.stop_listening }
62
- end
63
-
64
- puts "Received #{k} test messages"
65
- raise "Your setup is borked" if N != k