beetle 0.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. data/README.rdoc +18 -8
  2. data/beetle.gemspec +37 -121
  3. data/bin/beetle +9 -0
  4. data/examples/README.rdoc +0 -2
  5. data/examples/rpc.rb +3 -2
  6. data/ext/mkrf_conf.rb +19 -0
  7. data/lib/beetle/base.rb +1 -8
  8. data/lib/beetle/client.rb +16 -14
  9. data/lib/beetle/commands/configuration_client.rb +73 -0
  10. data/lib/beetle/commands/configuration_server.rb +85 -0
  11. data/lib/beetle/commands.rb +30 -0
  12. data/lib/beetle/configuration.rb +70 -7
  13. data/lib/beetle/deduplication_store.rb +50 -38
  14. data/lib/beetle/handler.rb +2 -5
  15. data/lib/beetle/logging.rb +7 -0
  16. data/lib/beetle/message.rb +11 -13
  17. data/lib/beetle/publisher.rb +2 -2
  18. data/lib/beetle/r_c.rb +2 -1
  19. data/lib/beetle/redis_configuration_client.rb +136 -0
  20. data/lib/beetle/redis_configuration_server.rb +301 -0
  21. data/lib/beetle/redis_ext.rb +79 -0
  22. data/lib/beetle/redis_master_file.rb +35 -0
  23. data/lib/beetle/redis_server_info.rb +65 -0
  24. data/lib/beetle/subscriber.rb +4 -1
  25. data/lib/beetle.rb +2 -2
  26. data/test/beetle/configuration_test.rb +14 -2
  27. data/test/beetle/deduplication_store_test.rb +61 -43
  28. data/test/beetle/message_test.rb +28 -4
  29. data/test/beetle/redis_configuration_client_test.rb +97 -0
  30. data/test/beetle/redis_configuration_server_test.rb +278 -0
  31. data/test/beetle/redis_ext_test.rb +71 -0
  32. data/test/beetle/redis_master_file_test.rb +39 -0
  33. data/test/test_helper.rb +13 -1
  34. metadata +59 -50
  35. data/.gitignore +0 -5
  36. data/MIT-LICENSE +0 -20
  37. data/Rakefile +0 -114
  38. data/TODO +0 -7
  39. data/doc/redundant_queues.graffle +0 -7744
  40. data/etc/redis-master.conf +0 -189
  41. data/etc/redis-slave.conf +0 -189
  42. data/examples/redis_failover.rb +0 -65
  43. data/script/start_rabbit +0 -29
  44. data/snafu.rb +0 -55
  45. data/test/beetle/bla.rb +0 -0
  46. data/test/beetle.yml +0 -81
  47. data/tmp/master/.gitignore +0 -2
  48. data/tmp/slave/.gitignore +0 -3
@@ -1,189 +0,0 @@
1
- # Redis configuration file example
2
-
3
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
- daemonize no
6
-
7
- # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
- # You can specify a custom pid file location here.
9
- pidfile ./tmp/redis-master.pid
10
-
11
- # Accept connections on the specified port, default is 6379
12
- port 6379
13
-
14
- # If you want you can bind a single interface, if the bind option is not
15
- # specified all the interfaces will listen for connections.
16
- #
17
- # bind 127.0.0.1
18
-
19
- # Close the connection after a client is idle for N seconds (0 to disable)
20
- timeout 300
21
-
22
- # Set server verbosity to 'debug'
23
- # it can be one of:
24
- # debug (a lot of information, useful for development/testing)
25
- # notice (moderately verbose, what you want in production probably)
26
- # warning (only very important / critical messages are logged)
27
- loglevel debug
28
-
29
- # Specify the log file name. Also 'stdout' can be used to force
30
- # the demon to log on the standard output. Note that if you use standard
31
- # output for logging but daemonize, logs will be sent to /dev/null
32
- logfile stdout
33
-
34
- # Set the number of databases. The default database is DB 0, you can select
35
- # a different one on a per-connection basis using SELECT <dbid> where
36
- # dbid is a number between 0 and 'databases'-1
37
- databases 16
38
-
39
- ################################ SNAPSHOTTING #################################
40
- #
41
- # Save the DB on disk:
42
- #
43
- # save <seconds> <changes>
44
- #
45
- # Will save the DB if both the given number of seconds and the given
46
- # number of write operations against the DB occurred.
47
- #
48
- # In the example below the behaviour will be to save:
49
- # after 900 sec (15 min) if at least 1 key changed
50
- # after 300 sec (5 min) if at least 10 keys changed
51
- # after 60 sec if at least 10000 keys changed
52
- save 900 1
53
- save 300 10
54
- save 60 10000
55
-
56
- # Compress string objects using LZF when dump .rdb databases?
57
- # For default that's set to 'yes' as it's almost always a win.
58
- # If you want to save some CPU in the saving child set it to 'no' but
59
- # the dataset will likely be bigger if you have compressible values or keys.
60
- rdbcompression yes
61
-
62
- # The filename where to dump the DB
63
- dbfilename dump.rdb
64
-
65
- # For default save/load DB in/from the working directory
66
- # Note that you must specify a directory not a file name.
67
- dir ./tmp/master/
68
-
69
- ################################# REPLICATION #################################
70
-
71
- # Master-Slave replication. Use slaveof to make a Redis instance a copy of
72
- # another Redis server. Note that the configuration is local to the slave
73
- # so for example it is possible to configure the slave to save the DB with a
74
- # different interval, or to listen to another port, and so on.
75
- #
76
- # slaveof <masterip> <masterport>
77
-
78
- # If the master is password protected (using the "requirepass" configuration
79
- # directive below) it is possible to tell the slave to authenticate before
80
- # starting the replication synchronization process, otherwise the master will
81
- # refuse the slave request.
82
- #
83
- # masterauth <master-password>
84
-
85
- ################################## SECURITY ###################################
86
-
87
- # Require clients to issue AUTH <PASSWORD> before processing any other
88
- # commands. This might be useful in environments in which you do not trust
89
- # others with access to the host running redis-server.
90
- #
91
- # This should stay commented out for backward compatibility and because most
92
- # people do not need auth (e.g. they run their own servers).
93
- #
94
- # requirepass foobared
95
-
96
- ################################### LIMITS ####################################
97
-
98
- # Set the max number of connected clients at the same time. By default there
99
- # is no limit, and it's up to the number of file descriptors the Redis process
100
- # is able to open. The special value '0' means no limts.
101
- # Once the limit is reached Redis will close all the new connections sending
102
- # an error 'max number of clients reached'.
103
- #
104
- # maxclients 128
105
-
106
- # Don't use more memory than the specified amount of bytes.
107
- # When the memory limit is reached Redis will try to remove keys with an
108
- # EXPIRE set. It will try to start freeing keys that are going to expire
109
- # in little time and preserve keys with a longer time to live.
110
- # Redis will also try to remove objects from free lists if possible.
111
- #
112
- # If all this fails, Redis will start to reply with errors to commands
113
- # that will use more memory, like SET, LPUSH, and so on, and will continue
114
- # to reply to most read-only commands like GET.
115
- #
116
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
117
- # 'state' server or cache, not as a real DB. When Redis is used as a real
118
- # database the memory usage will grow over the weeks, it will be obvious if
119
- # it is going to use too much memory in the long run, and you'll have the time
120
- # to upgrade. With maxmemory after the limit is reached you'll start to get
121
- # errors for write operations, and this may even lead to DB inconsistency.
122
- #
123
- # maxmemory <bytes>
124
-
125
- ############################## APPEND ONLY MODE ###############################
126
-
127
- # By default Redis asynchronously dumps the dataset on disk. If you can live
128
- # with the idea that the latest records will be lost if something like a crash
129
- # happens this is the preferred way to run Redis. If instead you care a lot
130
- # about your data and don't want to that a single record can get lost you should
131
- # enable the append only mode: when this mode is enabled Redis will append
132
- # every write operation received in the file appendonly.log. This file will
133
- # be read on startup in order to rebuild the full dataset in memory.
134
- #
135
- # Note that you can have both the async dumps and the append only file if you
136
- # like (you have to comment the "save" statements above to disable the dumps).
137
- # Still if append only mode is enabled Redis will load the data from the
138
- # log file at startup ignoring the dump.rdb file.
139
- #
140
- # The name of the append only file is "appendonly.log"
141
- #
142
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
143
- # log file in background when it gets too big.
144
-
145
- appendonly yes
146
-
147
- # The fsync() call tells the Operating System to actually write data on disk
148
- # instead to wait for more data in the output buffer. Some OS will really flush
149
- # data on disk, some other OS will just try to do it ASAP.
150
- #
151
- # Redis supports three different modes:
152
- #
153
- # no: don't fsync, just let the OS flush the data when it wants. Faster.
154
- # always: fsync after every write to the append only log . Slow, Safest.
155
- # everysec: fsync only if one second passed since the last fsync. Compromise.
156
- #
157
- # The default is "always" that's the safer of the options. It's up to you to
158
- # understand if you can relax this to "everysec" that will fsync every second
159
- # or to "no" that will let the operating system flush the output buffer when
160
- # it want, for better performances (but if you can live with the idea of
161
- # some data loss consider the default persistence mode that's snapshotting).
162
-
163
- # appendfsync always
164
- appendfsync everysec
165
- # appendfsync no
166
-
167
- ############################### ADVANCED CONFIG ###############################
168
-
169
- # Glue small output buffers together in order to send small replies in a
170
- # single TCP packet. Uses a bit more CPU but most of the times it is a win
171
- # in terms of number of queries per second. Use 'yes' if unsure.
172
- glueoutputbuf yes
173
-
174
- # Use object sharing. Can save a lot of memory if you have many common
175
- # string in your dataset, but performs lookups against the shared objects
176
- # pool so it uses more CPU and can be a bit slower. Usually it's a good
177
- # idea.
178
- #
179
- # When object sharing is enabled (shareobjects yes) you can use
180
- # shareobjectspoolsize to control the size of the pool used in order to try
181
- # object sharing. A bigger pool size will lead to better sharing capabilities.
182
- # In general you want this value to be at least the double of the number of
183
- # very common strings you have in your dataset.
184
- #
185
- # WARNING: object sharing is experimental, don't enable this feature
186
- # in production before of Redis 1.0-stable. Still please try this feature in
187
- # your development environment so that we can test it better.
188
- shareobjects no
189
- shareobjectspoolsize 1024
data/etc/redis-slave.conf DELETED
@@ -1,189 +0,0 @@
1
- # Redis configuration file example
2
-
3
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
- daemonize no
6
-
7
- # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
- # You can specify a custom pid file location here.
9
- pidfile ./tmp/redis-slave.pid
10
-
11
- # Accept connections on the specified port, default is 6379
12
- port 6380
13
-
14
- # If you want you can bind a single interface, if the bind option is not
15
- # specified all the interfaces will listen for connections.
16
- #
17
- # bind 127.0.0.1
18
-
19
- # Close the connection after a client is idle for N seconds (0 to disable)
20
- timeout 300
21
-
22
- # Set server verbosity to 'debug'
23
- # it can be one of:
24
- # debug (a lot of information, useful for development/testing)
25
- # notice (moderately verbose, what you want in production probably)
26
- # warning (only very important / critical messages are logged)
27
- loglevel debug
28
-
29
- # Specify the log file name. Also 'stdout' can be used to force
30
- # the demon to log on the standard output. Note that if you use standard
31
- # output for logging but daemonize, logs will be sent to /dev/null
32
- logfile stdout
33
-
34
- # Set the number of databases. The default database is DB 0, you can select
35
- # a different one on a per-connection basis using SELECT <dbid> where
36
- # dbid is a number between 0 and 'databases'-1
37
- databases 16
38
-
39
- ################################ SNAPSHOTTING #################################
40
- #
41
- # Save the DB on disk:
42
- #
43
- # save <seconds> <changes>
44
- #
45
- # Will save the DB if both the given number of seconds and the given
46
- # number of write operations against the DB occurred.
47
- #
48
- # In the example below the behaviour will be to save:
49
- # after 900 sec (15 min) if at least 1 key changed
50
- # after 300 sec (5 min) if at least 10 keys changed
51
- # after 60 sec if at least 10000 keys changed
52
- save 900 1
53
- save 300 10
54
- save 60 10000
55
-
56
- # Compress string objects using LZF when dump .rdb databases?
57
- # For default that's set to 'yes' as it's almost always a win.
58
- # If you want to save some CPU in the saving child set it to 'no' but
59
- # the dataset will likely be bigger if you have compressible values or keys.
60
- rdbcompression yes
61
-
62
- # The filename where to dump the DB
63
- dbfilename dump.rdb
64
-
65
- # For default save/load DB in/from the working directory
66
- # Note that you must specify a directory not a file name.
67
- dir ./tmp/slave/
68
-
69
- ################################# REPLICATION #################################
70
-
71
- # Master-Slave replication. Use slaveof to make a Redis instance a copy of
72
- # another Redis server. Note that the configuration is local to the slave
73
- # so for example it is possible to configure the slave to save the DB with a
74
- # different interval, or to listen to another port, and so on.
75
- #
76
- slaveof 127.0.0.1 6379
77
-
78
- # If the master is password protected (using the "requirepass" configuration
79
- # directive below) it is possible to tell the slave to authenticate before
80
- # starting the replication synchronization process, otherwise the master will
81
- # refuse the slave request.
82
- #
83
- # masterauth <master-password>
84
-
85
- ################################## SECURITY ###################################
86
-
87
- # Require clients to issue AUTH <PASSWORD> before processing any other
88
- # commands. This might be useful in environments in which you do not trust
89
- # others with access to the host running redis-server.
90
- #
91
- # This should stay commented out for backward compatibility and because most
92
- # people do not need auth (e.g. they run their own servers).
93
- #
94
- # requirepass foobared
95
-
96
- ################################### LIMITS ####################################
97
-
98
- # Set the max number of connected clients at the same time. By default there
99
- # is no limit, and it's up to the number of file descriptors the Redis process
100
- # is able to open. The special value '0' means no limts.
101
- # Once the limit is reached Redis will close all the new connections sending
102
- # an error 'max number of clients reached'.
103
- #
104
- # maxclients 128
105
-
106
- # Don't use more memory than the specified amount of bytes.
107
- # When the memory limit is reached Redis will try to remove keys with an
108
- # EXPIRE set. It will try to start freeing keys that are going to expire
109
- # in little time and preserve keys with a longer time to live.
110
- # Redis will also try to remove objects from free lists if possible.
111
- #
112
- # If all this fails, Redis will start to reply with errors to commands
113
- # that will use more memory, like SET, LPUSH, and so on, and will continue
114
- # to reply to most read-only commands like GET.
115
- #
116
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
117
- # 'state' server or cache, not as a real DB. When Redis is used as a real
118
- # database the memory usage will grow over the weeks, it will be obvious if
119
- # it is going to use too much memory in the long run, and you'll have the time
120
- # to upgrade. With maxmemory after the limit is reached you'll start to get
121
- # errors for write operations, and this may even lead to DB inconsistency.
122
- #
123
- # maxmemory <bytes>
124
-
125
- ############################## APPEND ONLY MODE ###############################
126
-
127
- # By default Redis asynchronously dumps the dataset on disk. If you can live
128
- # with the idea that the latest records will be lost if something like a crash
129
- # happens this is the preferred way to run Redis. If instead you care a lot
130
- # about your data and don't want to that a single record can get lost you should
131
- # enable the append only mode: when this mode is enabled Redis will append
132
- # every write operation received in the file appendonly.log. This file will
133
- # be read on startup in order to rebuild the full dataset in memory.
134
- #
135
- # Note that you can have both the async dumps and the append only file if you
136
- # like (you have to comment the "save" statements above to disable the dumps).
137
- # Still if append only mode is enabled Redis will load the data from the
138
- # log file at startup ignoring the dump.rdb file.
139
- #
140
- # The name of the append only file is "appendonly.log"
141
- #
142
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
143
- # log file in background when it gets too big.
144
-
145
- appendonly yes
146
-
147
- # The fsync() call tells the Operating System to actually write data on disk
148
- # instead to wait for more data in the output buffer. Some OS will really flush
149
- # data on disk, some other OS will just try to do it ASAP.
150
- #
151
- # Redis supports three different modes:
152
- #
153
- # no: don't fsync, just let the OS flush the data when it wants. Faster.
154
- # always: fsync after every write to the append only log . Slow, Safest.
155
- # everysec: fsync only if one second passed since the last fsync. Compromise.
156
- #
157
- # The default is "always" that's the safer of the options. It's up to you to
158
- # understand if you can relax this to "everysec" that will fsync every second
159
- # or to "no" that will let the operating system flush the output buffer when
160
- # it want, for better performances (but if you can live with the idea of
161
- # some data loss consider the default persistence mode that's snapshotting).
162
-
163
- # appendfsync always
164
- appendfsync everysec
165
- # appendfsync no
166
-
167
- ############################### ADVANCED CONFIG ###############################
168
-
169
- # Glue small output buffers together in order to send small replies in a
170
- # single TCP packet. Uses a bit more CPU but most of the times it is a win
171
- # in terms of number of queries per second. Use 'yes' if unsure.
172
- glueoutputbuf yes
173
-
174
- # Use object sharing. Can save a lot of memory if you have many common
175
- # string in your dataset, but performs lookups against the shared objects
176
- # pool so it uses more CPU and can be a bit slower. Usually it's a good
177
- # idea.
178
- #
179
- # When object sharing is enabled (shareobjects yes) you can use
180
- # shareobjectspoolsize to control the size of the pool used in order to try
181
- # object sharing. A bigger pool size will lead to better sharing capabilities.
182
- # In general you want this value to be at least the double of the number of
183
- # very common strings you have in your dataset.
184
- #
185
- # WARNING: object sharing is experimental, don't enable this feature
186
- # in production before of Redis 1.0-stable. Still please try this feature in
187
- # your development environment so that we can test it better.
188
- shareobjects no
189
- shareobjectspoolsize 1024
@@ -1,65 +0,0 @@
1
- # Testing redis failover functionality
2
- require "rubygems"
3
- require File.expand_path(File.dirname(__FILE__)+"/../lib/beetle")
4
-
5
- Beetle.config.logger.level = Logger::INFO
6
- Beetle.config.redis_hosts = "localhost:6379, localhost:6380"
7
- Beetle.config.servers = "localhost:5672, localhost:5673"
8
-
9
- # instantiate a client
10
- client = Beetle::Client.new
11
-
12
- # register a durable queue named 'test'
13
- # this implicitly registers a durable topic exchange called 'test'
14
- client.register_queue(:test)
15
- client.purge(:test)
16
- client.register_message(:test, :redundant => true)
17
-
18
- # publish some test messages
19
- # at this point, the exchange will be created on the server and the queue will be bound to the exchange
20
- N = 10
21
- n = 0
22
- N.times do |i|
23
- n += client.publish(:test, "Hello#{i+1}")
24
- end
25
- puts "published #{n} test messages"
26
- puts
27
-
28
- # check whether we were able to publish all messages
29
- if n != 2*N
30
- puts "could not publish all messages"
31
- exit 1
32
- end
33
-
34
- # register a handler for the test message, listing on queue "test"
35
- k = 0
36
- client.register_handler(:test) do |m|
37
- k += 1
38
- puts "Received test message from server #{m.server}"
39
- puts "Message content: #{m.data}"
40
- puts
41
- sleep 1
42
- end
43
-
44
- # hack to switch redis programmatically
45
- class Beetle::DeduplicationStore
46
- def switch_redis
47
- slave = redis_instances.find{|r| r.server != redis.server}
48
- redis.shutdown rescue nil
49
- logger.info "Beetle: shut down master #{redis.server}"
50
- slave.slaveof("no one")
51
- logger.info "Beetle: enabled master mode on #{slave.server}"
52
- end
53
- end
54
-
55
- # start listening
56
- # this starts the event machine loop using EM.run
57
- # the block passed to listen will be yielded as the last step of the setup process
58
- client.listen do
59
- trap("INT") { client.stop_listening }
60
- EM.add_timer(5) { client.deduplication_store.switch_redis }
61
- EM.add_timer(11) { client.stop_listening }
62
- end
63
-
64
- puts "Received #{k} test messages"
65
- raise "Your setup is borked" if N != k
data/script/start_rabbit DELETED
@@ -1,29 +0,0 @@
1
- #!/bin/bash
2
-
3
- # export RABBITMQ_MNESIA_BASE=/var/lib/rabbitmq/mnesia2
4
- # Defaults to /var/lib/rabbitmq/mnesia. Set this to the directory where Mnesia
5
- # database files should be placed.
6
-
7
- # export RABBITMQ_LOG_BASE
8
- # Defaults to /var/log/rabbitmq. Log files generated by the server will be placed
9
- # in this directory.
10
-
11
- export RABBITMQ_NODENAME=$1
12
- # Defaults to rabbit. This can be useful if you want to run more than one node
13
- # per machine - RABBITMQ_NODENAME should be unique per erlang-node-and-machine
14
- # combination. See clustering on a single machine guide at <http://www.rab-
15
- # bitmq.com/clustering.html#single-machine> for details.
16
-
17
- # RABBITMQ_NODE_IP_ADDRESS
18
- # Defaults to 0.0.0.0. This can be changed if you only want to bind to one net-
19
- # work interface.
20
-
21
- export RABBITMQ_NODE_PORT=$2
22
- # Defaults to 5672.
23
-
24
- # RABBITMQ_CLUSTER_CONFIG_FILE
25
- # Defaults to /etc/rabbitmq/rabbitmq_cluster.config. If this file is present it
26
- # is used by the server to auto-configure a RabbitMQ cluster. See the clustering
27
- # guide at <http://www.rabbitmq.com/clustering.html> for details.
28
-
29
- rabbitmq-server
data/snafu.rb DELETED
@@ -1,55 +0,0 @@
1
- # The simplest case
2
- client.register_message(:something_happened) # => key: something_happened
3
-
4
- # with options
5
- client.register_message(:
6
-
7
- ####################
8
- # Message Grouping #
9
- ####################
10
-
11
- client.register_message(:delete_something, :group => :jobs) # => key: jobs.delete_something
12
- client.register_message(:create_something, :group => :jobs) # => key: jobs.create_something
13
-
14
- # You can register a handler for a message group
15
- client.register_handler(JobsHandler, :group => :jobs) # bind queue with: jobs.*
16
-
17
- # And still register on single messages
18
- client.register_handler(DeletedJobHandler, :delete_something) # bind queue with: *.delete_something
19
-
20
- ######################
21
- # Handler Definition #
22
- ######################
23
-
24
- # With a Handler class that implements .process(message)
25
- client.register_handler(MyProcessor, :something_happened) # => queue: my_processor
26
-
27
- # With a String / Symbol and a block
28
- client.register_handler("Other Processor", :delete_something, :something_happened) lambda { |message| foobar(message) } # => queue: other_processor, bound with: *.delete_something and *.something_happened
29
-
30
- # With extra parameters
31
- client.register_handler(VeryImportant, :delete_something, :immediate => true) # queue: very_important, :immediate => true
32
-
33
- ###################################
34
- # Wiring, Subscribing, Publishing #
35
- ###################################
36
- client.wire! # => all the binding magic happens
37
-
38
- client.subscribe
39
-
40
- client.publish(:delete_something, 'payload')
41
-
42
- __END__
43
-
44
- Whats happening when wire! is called? (pseudocode)
45
- 1. all the messages are registered
46
- messages = [{:name => :delete_something, :group => :jobs, :bound => false}, {:name => :something_happened, :bound => false}]
47
- 2. all the queues for the handlers are created and bound...
48
- my_processor_queue = queue(:my_processor).bind(exchange, :key => '*.something_happened')
49
- jobs_handler_queue = queue(:jobs_handler).bind(exchange, :key => 'jobs.*')
50
- handlers_with_queues = [[jobs_handler_queue, JobsHandler], [my_processor_queue, block_or_class]]
51
- 3. every handler definition binds a queue for the handler to a list of messages and marks the message as bound.
52
- 4. If in the end a message isn't bound to a queue at least once, an exception is raised
53
-
54
- Exceptions will be thrown if:
55
- * after all m
data/test/beetle/bla.rb DELETED
File without changes
data/test/beetle.yml DELETED
@@ -1,81 +0,0 @@
1
- # list all standard exchanges used by the main xing app, along with their options for declaration
2
- # used by producers and consumers
3
- exchanges:
4
- test:
5
- type: "topic"
6
- durable: true
7
- deadletter:
8
- type: "topic"
9
- durable: true
10
- redundant:
11
- type: "topic"
12
- durable: true
13
-
14
- # list all standard queues along with their binding declaration
15
- # this section is only used by consumers
16
- queues:
17
- test: # binding options
18
- exchange: "test" # Bandersnatch default is the name of the queue
19
- passive: false # amqp default is false
20
- durable: true # amqp default is false
21
- exclusive: false # amqp default is false
22
- auto_delete: false # amqp default is false
23
- nowait: true # amqp default is true
24
- key: "#" # listen to every message
25
- deadletter:
26
- exchange: "deadletter"
27
- durable: true
28
- key: "#"
29
- redundant:
30
- exchange: "redundant"
31
- durable: true
32
- key: "#"
33
- additional_queue:
34
- exchange: "redundant"
35
- durable: true
36
- key: "#"
37
-
38
- # list all messages we can publish
39
- messages:
40
- test:
41
- queue: "test"
42
- # Spefify the queue for listeners (default is message name)
43
- key: "test"
44
- # Specifies the routing key pattern for message subscription.
45
- ttl: <%= 1.hour %>
46
- # Specifies the time interval after which messages are silently dropped (seconds)
47
- mandatory: true
48
- # default is false
49
- # Tells the server how to react if the message
50
- # cannot be routed to a queue. If set to _true_, the server will return an unroutable message
51
- # with a Return method. If this flag is zero, the server silently drops the message.
52
- immediate: false
53
- # default is false
54
- # Tells the server how to react if the message
55
- # cannot be routed to a queue consumer immediately. If set to _true_, the server will return an
56
- # undeliverable message with a Return method. If set to _false_, the server will queue the message,
57
- # but with no guarantee that it will ever be consumed.
58
- persistent: true
59
- # default is false
60
- # Tells the server whether to persist the message
61
- # If set to _true_, the message will be persisted to disk and not lost if the server restarts.
62
- # If set to _false_, the message will not be persisted across server restart. Setting to _true_
63
- # incurs a performance penalty as there is an extra cost associated with disk access.
64
- deadletter:
65
- key: "deadletter"
66
- persistent: true
67
- redundant:
68
- key: "redundant"
69
- persistent: true
70
- redundant: true
71
-
72
- development: &development
73
- hostname: localhost:5672, localhost:5673
74
- # hostname: localhost:5672
75
- msg_id_store:
76
- host: localhost
77
- db: 4
78
-
79
- test:
80
- <<: *development
81
- hostname: localhost:5672
@@ -1,2 +0,0 @@
1
- *.rdb
2
- appendonly.aof
data/tmp/slave/.gitignore DELETED
@@ -1,3 +0,0 @@
1
- *.rdb
2
- appendonly.aof
3
-