red_cluster 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,213 @@
1
+ require 'redis'
2
+ require 'zlib'
3
+ require 'set'
4
+ require 'replica_set'
5
+
6
+ class RedCluster
7
+ attr_reader :replica_sets
8
+
9
+ def initialize(replica_sets = [])
10
+ @replica_sets = replica_sets.map { |replica_set| ReplicaSet.new(self, replica_set) }
11
+ end
12
+
13
+ def load_aof_file(file_path)
14
+ aof_file = File.read file_path
15
+ commands = aof_file.split /^\*/
16
+ commands.each do |cmd|
17
+ split_cmd = cmd.split("\r\n")[1..-1]
18
+ next unless split_cmd
19
+ split_cmd.reject! { |cmd| cmd =~ /^\$/ }
20
+ redis_cmd, redis_key, redis_args = split_cmd[0], split_cmd[1], split_cmd[2..-1]
21
+ crc32_of_key = Zlib.crc32(redis_key).abs
22
+ replica_set = @replica_sets[crc32_of_key % @replica_sets.size]
23
+ replica_set.master.send redis_cmd, redis_key, *redis_args
24
+ end
25
+ end
26
+
27
+ SINGLE_KEY_KEY_OPS = %W{del exists expire expireat move persists ttl type}.map(&:to_sym)
28
+ STRING_OPS = %W{append decr decrby get getbit getrange getset incr incrby mget mset msetnx set setbit setex setnx setrange strlen}.map(&:to_sym)
29
+ HASH_OPS = %W{hdel hexists hget hgetall hincrby hkeys hlen hmget hmset hset hsetnx hvals}.map(&:to_sym)
30
+ SINGLE_KEY_LIST_OPS = %W{blpop brpop lindex linsert llen lpop lpush lpushx lrange lrem lset ltrim rpop rpush rpushx}.map(&:to_sym)
31
+ SINGLE_KEY_SET_OPS = %W{sadd scard sismember smembers spop srandmember srem}.map(&:to_sym)
32
+ SINGLE_KEY_SORTED_SET_OPS = %W{zadd zcard zcount zincrby zrange zrangebyscore zrank zrem zremrangebyrank zremrangebyscore zrevrange zrevrangebyscore zrevrank zscore}.map(&:to_sym)
33
+ SINGLE_KEY_OPS = SINGLE_KEY_KEY_OPS + STRING_OPS + HASH_OPS + SINGLE_KEY_LIST_OPS + SINGLE_KEY_SET_OPS + SINGLE_KEY_SORTED_SET_OPS
34
+
35
+ # Server Ops
36
+ def select(db); @replica_sets.each {|srvr| srvr.select(db) }; "OK"; end
37
+ def echo(msg); @replica_sets.each {|srvr| srvr.echo(msg) }; msg; end
38
+ def flushdb; @replica_sets.each(&:flushdb); end
39
+ def shutdown; @replica_sets.each(&:shutdown); end
40
+ def flushall; @replica_sets.each { |rs| rs.flushall }; "OK"; end
41
+ def quit; @replica_sets.each(&:quit); "OK"; end
42
+ def ping; @replica_sets.each(&:ping); "PONG"; end
43
+ def keys(pattern); @replica_sets.map { |server| server.keys pattern }.flatten; end
44
+ def bgsave; @replica_sets.each(&:bgsave); "Background saving started"; end
45
+ def lastsave; @replica_sets.map(&:lastsave).min; end
46
+
47
+ def config(cmd, *args)
48
+ if cmd == :get
49
+ @replica_sets.inject({}) { |result, replica_set| result.merge(replica_set.config(:get, *args)) }
50
+ else
51
+ @replica_sets.each { |replica_set| replica_set.config(cmd, *args) }
52
+ "OK"
53
+ end
54
+ end
55
+
56
+ # Transaction Ops
57
+ def multi; @replica_sets.each(&:multi); end
58
+
59
+ def exec
60
+ @multi_count = nil
61
+ exec_results = @replica_sets.map(&:exec)
62
+ #We'll get back a deeply nested array of arrays of the kind
63
+ #[[3, 30], [10, 1]], [1, "OK"]] - where the first element in each leaf array is the RANK and the second is the result
64
+ #We need to return back the results sorted by rank. So in the above case it would be
65
+ #["OK", 30, 1]. Ruby's full-LISP toolbox to the rescue
66
+ Hash[*exec_results.flatten].sort.map(&:last)
67
+ end
68
+
69
+ # Key Ops
70
+ def randomkey
71
+ replica_sets_with_keys_in_them = @replica_sets.select { |replica_set| replica_set.randomkey != nil }
72
+ idx = (rand * replica_sets_with_keys_in_them.count).to_i
73
+ rand_replica_set = replica_sets_with_keys_in_them[idx]
74
+ rand_replica_set && rand_replica_set.randomkey
75
+ end
76
+
77
+ def rename(key, new_key)
78
+ raise RuntimeError, "ERR source and destination objects are the same" if key == new_key
79
+ raise RuntimeError, "ERR no such key" unless exists(key)
80
+ val = get key
81
+ del key
82
+ set new_key, val
83
+ end
84
+
85
+ # List Ops
86
+ def rpoplpush(src_list, target_list)
87
+ val = rpop src_list
88
+ return unless val
89
+ lpush target_list, val
90
+ val
91
+ end
92
+
93
+ def brpoplpush(src_list, target_list, timeout)
94
+ val = brpop src_list, timeout
95
+ return unless val
96
+ lpush target_list, val
97
+ val
98
+ end
99
+
100
+ # Set Ops
101
+ def smove(src, destination, member)
102
+ if sismember src, member
103
+ sadd destination, member
104
+ srem src, member
105
+ true
106
+ else
107
+ false
108
+ end
109
+ end
110
+
111
+ def sdiff(*sets)
112
+ perform_set_strategy :difference, *sets
113
+ end
114
+
115
+ def sinter(*sets)
116
+ perform_set_strategy :intersection, *sets
117
+ end
118
+
119
+ def sunion(*sets)
120
+ perform_set_strategy :union, *sets
121
+ end
122
+
123
+ def sinterstore(destination, *sets)
124
+ perform_store_strategy :sinter, destination, *sets
125
+ end
126
+
127
+ def sunionstore(destination, *sets)
128
+ perform_store_strategy :sunion, destination, *sets
129
+ end
130
+
131
+ def sdiffstore(destination, *sets)
132
+ perform_store_strategy :sdiff, destination, *sets
133
+ end
134
+
135
+ # Sorted Set Ops
136
+ def zinterstore(destination, input_sets, options = {})
137
+ perform_sorted_set_store_strategy :intersection, destination, input_sets, options
138
+ end
139
+
140
+ def zunionstore(destination, input_sets, options = {})
141
+ perform_sorted_set_store_strategy :union, destination, input_sets, options
142
+ end
143
+
144
+
145
+ def method_missing(method, *args)
146
+ if SINGLE_KEY_OPS.include?(method.to_sym)
147
+ key = args.first
148
+ replica_set = replica_set_for_key key
149
+ replica_set.send method, *args
150
+ else
151
+ raise "Unsupported operation: #{method}"
152
+ end
153
+ end
154
+
155
+ private
156
+ def replica_set_for_key(key)
157
+ @replica_sets[Zlib.crc32(key).abs % @replica_sets.size]
158
+ end
159
+
160
+ def multi_count
161
+ @multi_count ||= -1
162
+ @multi_count += 1
163
+ end
164
+
165
+ def perform_store_strategy(strategy, destination, *sets)
166
+ del destination
167
+ send(strategy, *sets).each do |entry|
168
+ sadd destination, entry
169
+ end
170
+ scard destination
171
+ end
172
+
173
+ def perform_set_strategy(strategy, *sets)
174
+ first_set = Set.new(smembers(sets.first))
175
+ sets[1..-1].inject(first_set) do |accum_set, set|
176
+ accum_set.send(strategy, (Set.new(smembers(set))))
177
+ end.entries
178
+ end
179
+
180
+ def perform_sorted_set_store_strategy(strategy, destination, input_sets, options)
181
+ weights = Array(options[:weights])
182
+
183
+ first_set = Set.new(zrange(input_sets.first, 0, -1))
184
+ accum_set = input_sets[0..-1].inject(first_set) do |accmltr, set|
185
+ accmltr.send(strategy, Set.new(zrange(set, 0, -1)))
186
+ end
187
+
188
+ del destination
189
+
190
+ accum_set.entries.each do |entry|
191
+ score_of_input_sets = input_sets.map do |input_set|
192
+ [input_set, zscore(input_set, entry)]
193
+ end.reject do |is, zscr|
194
+ zscr == nil
195
+ end.map do |is,zscr|
196
+ zscr.to_i * weights.fetch(input_sets.index(is), 1)
197
+ end
198
+ aggregate_function = (options[:aggregate] || :sum)
199
+ score = if aggregate_function == :sum
200
+ score_of_input_sets.inject(0) { |sum, e_score| sum += e_score.to_i }
201
+ elsif [:min, :max].include?(aggregate_function)
202
+ score_of_input_sets.send aggregate_function
203
+ else
204
+ raise "ERR syntax error"
205
+ end
206
+
207
+ zadd destination, score, entry
208
+ end
209
+ zcard destination
210
+ end
211
+
212
+ end
213
+
@@ -0,0 +1,82 @@
1
+ class RedCluster
2
+ class ReplicaSet
3
+ attr_reader :slaves, :master
4
+
5
+ def initialize(cluster, options)
6
+ @my_cluster = cluster
7
+ @master = Redis.new options[:master]
8
+ @slaves = options[:slaves].map { |slave_config| Redis.new slave_config }
9
+ setup_slaves
10
+ end
11
+
12
+ def multi
13
+ @in_multi = true
14
+ @cmd_order_in_multi = []
15
+ @master.multi
16
+ end
17
+
18
+ def exec
19
+ @in_multi = nil
20
+ @master.exec.map { |result| [@cmd_order_in_multi.shift, result] }
21
+ end
22
+
23
+ def method_missing(command, *args)
24
+ if blocking_command?(command)
25
+ raise "Blocking Commands Not Permitted"
26
+ elsif pub_sub_command?(command)
27
+ raise "Pub Sub Commands Not Permitted"
28
+ elsif slaveof_command?(command)
29
+ raise "Slave Commands Not Permitted"
30
+ elsif command == :shutdown
31
+ @master.shutdown
32
+ @slaves.each(&:shutdown)
33
+ elsif @in_multi
34
+ @cmd_order_in_multi << @my_cluster.send(:multi_count)
35
+ @master.send command, *args
36
+ elsif read_command?(command)
37
+ next_slave.send command, *args
38
+ else
39
+ @master.send command, *args
40
+ end
41
+ rescue Errno::ECONNREFUSED
42
+ new_master = @slaves.shift
43
+ raise(NoMaster, "No master in replica set") unless new_master
44
+ @master = new_master
45
+ setup_slaves
46
+ retry
47
+ end
48
+
49
+ def next_slave
50
+ ret = @slaves.shift
51
+ @slaves.push ret
52
+ ret
53
+ end
54
+
55
+ private
56
+
57
+ def setup_slaves
58
+ @slaves.each { |slave| slave.slaveof(@master.client.host, @master.client.port) }
59
+ end
60
+
61
+ def slaveof_command?(command)
62
+ command == :slaveof
63
+ end
64
+
65
+ def read_command?(command)
66
+ [:dbsize, :exists, :get, :getbit, :getrange, :hexists, :hget, :hgetall, :hkeys, :hlen, :hmget, :hvals, :keys, :lastsave, :lindex, :llen, :mget, :object, :randomkey, :scard, :sismember, :smembers, :srandmember, :strlen, :ttl, :zcard, :zcount, :zrange, :zrangebyscore, :zrank, :zrevrange, :zrevrangebyscore, :zrevrank, :zscore].include?(command)
67
+ end
68
+
69
+ def blocking_command?(command)
70
+ [:blpop, :brpop, :brpoplpush].include?(command)
71
+ end
72
+
73
+ def pub_sub_command?(command)
74
+ [:psubscribe, :publish, :punsunscribe, :subscribe, :unsubscribe].include?(command)
75
+ end
76
+ end
77
+ end
78
+
79
+ class RedCluster
80
+ class NoMaster < ::Exception
81
+ end
82
+ end
@@ -0,0 +1,432 @@
1
+ require 'spec_helper'
2
+ require 'red_cluster'
3
+
4
+ describe RedCluster do
5
+ before do
6
+ first_replica_set = {
7
+ :master => {:host => "localhost", :port => 6379},
8
+ :slaves => [{:host => "localhost", :port => 7379},
9
+ {:host => "localhost", :port => 8379}]
10
+ }
11
+ second_replica_set = {
12
+ :master => {:host => "localhost", :port => 9379},
13
+ :slaves => [{:host => "localhost", :port => 10379},
14
+ {:host => "localhost", :port => 11379}]
15
+ }
16
+ third_replica_set = {
17
+ :master => {:host => "localhost", :port => 12379},
18
+ :slaves => [{:host => "localhost", :port => 13379},
19
+ {:host => "localhost", :port => 14379}]
20
+ }
21
+ replica_sets = [first_replica_set, second_replica_set, third_replica_set]
22
+ @rc = RedCluster.new replica_sets
23
+ @rc.replica_sets.each { |rs| rs.stubs(:read_command?).returns(false) }
24
+ end
25
+ let(:rc) { @rc }
26
+ after { rc.flushall }
27
+
28
+ it "gets initialized with a bunch of replica sets" do
29
+ first_replica_set = {
30
+ :master => {:host => "localhost", :port => 6379},
31
+ :slaves => [{:host => "localhost", :port => 7379},
32
+ {:host => "localhost", :port => 8379}]
33
+ }
34
+ second_replica_set = {
35
+ :master => {:host => "localhost", :port => 9379},
36
+ :slaves => [{:host => "localhost", :port => 10379},
37
+ {:host => "localhost", :port => 11379}]
38
+ }
39
+ third_replica_set = {
40
+ :master => {:host => "localhost", :port => 12379},
41
+ :slaves => [{:host => "localhost", :port => 13379},
42
+ {:host => "localhost", :port => 14379}]
43
+ }
44
+ replica_sets = [first_replica_set, second_replica_set, third_replica_set]
45
+ RedCluster.new replica_sets
46
+ end
47
+
48
+ context "#randomkey", :fast => true do
49
+ it "returns a random key across the cluster", :fast => true do
50
+ rc.set "foo", "bar"
51
+ rc.randomkey.should == "foo"
52
+ end
53
+ it "returns nil for an empty cluster" do
54
+ rc.randomkey.should_not be
55
+ end
56
+ end
57
+
58
+ context "#flushdb" do
59
+ it "works" do
60
+ (1..10_000).to_a.each { |num| rc.set("number|#{num}", "hello") }
61
+ #make sure all servers have a key
62
+ rc.replica_sets.each do |replica_set|
63
+ replica_set.randomkey.should be
64
+ end
65
+ rc.flushdb
66
+ rc.replica_sets.each do |replica_set|
67
+ replica_set.randomkey.should_not be
68
+ end
69
+ end
70
+ end
71
+
72
+ context "#flushall" do
73
+ it "flushes keys from all across the cluster" do
74
+ (1..10).to_a.each { |num| rc.set("number|#{num}", "hello") }
75
+ [0, 1, 2].each { |num| rc.replica_sets[num].master.randomkey.should be }
76
+ rc.flushall.should == "OK"
77
+ rc.randomkey.should_not be
78
+ end
79
+ end
80
+
81
+ context "#keys" do
82
+ it 'scans across the cluster' do
83
+ (1..10).to_a.each { |num| rc.set("number|#{num}", "hello") }
84
+ rc.keys("*").map(&:to_s).sort.should == rc.replica_sets.inject([]) { |accum, rs| accum << rs.keys("*") }.flatten.map(&:to_s).sort
85
+ end
86
+ end
87
+
88
+ context "#smove" do
89
+ it "returns false if the first set does not exist or does not have the member" do
90
+ rc.smove("non_existent_source", "destination", "foo").should == false
91
+ rc.sadd "source", "bar"
92
+ rc.smove("source", "destination", "foo").should == false
93
+ end
94
+
95
+ it "returns true if the first set had the member" do
96
+ rc.sadd "source", "foo"
97
+ rc.smove("source", "destination", "foo").should == true
98
+ rc.sismember("source", "foo").should == false
99
+ rc.sismember("destination", "foo").should == true
100
+ end
101
+ end
102
+
103
+ context "#sdiffstore", :fast => true do
104
+ it "stores the diff in the destination" do
105
+ (1..10).to_a.each { |num| rc.sadd "set_one", num }
106
+ (5..10).to_a.each { |num| rc.sadd "set_two", num }
107
+ rc.sdiffstore("result_set", "set_one", "set_two").should == 4
108
+ rc.smembers("result_set").sort.should == (1..4).to_a.map(&:to_s)
109
+ end
110
+
111
+ it "doesn't store the destination if the diff yielded no results" do
112
+ rc.sdiffstore("result_set", "unknown_set", "set_two").should == 0
113
+ rc.smembers("result_set").should == []
114
+ end
115
+ end
116
+
117
+ context "#sdiff", :fast => true do
118
+ it "calculates the diff" do
119
+ (1..10).to_a.each { |num| rc.sadd "set_one", num }
120
+ (5..10).to_a.each { |num| rc.sadd "set_two", num }
121
+ rc.sdiff("set_one", "set_two").sort.should == (1..4).to_a.map(&:to_s)
122
+ end
123
+ it "returns an [] when the first set does not exist" do
124
+ rc.sdiff("unknown_set", "some_set").should == []
125
+ end
126
+ end
127
+
128
+ context "#sinter", :fast => true do
129
+ it "calculates the intersection" do
130
+ (1..10).to_a.each { |num| rc.sadd "set_one", num }
131
+ (5..10).to_a.each { |num| rc.sadd "set_two", num }
132
+ rc.sinter("set_one", "set_two").map(&:to_i).sort.should == (5..10).to_a
133
+ end
134
+ it "returns an [] when the first set does not exist" do
135
+ rc.sinter("unknown_set", "some_set").should == []
136
+ end
137
+ end
138
+
139
+ context "#sinterstore", :fast => true do
140
+ it "stores the diff in the destination" do
141
+ (1..10).to_a.each { |num| rc.sadd "set_one", num }
142
+ (5..10).to_a.each { |num| rc.sadd "set_two", num }
143
+ rc.sinterstore("result_set", "set_one", "set_two").should == 6
144
+ rc.smembers("result_set").map(&:to_i).sort.should == (5..10).to_a
145
+ end
146
+
147
+ it "doesn't store the destination if the diff yielded no results" do
148
+ rc.sadd "result_set", 1
149
+ rc.sinterstore("result_set", "unknown_set", "set_two").should == 0
150
+ rc.smembers("result_set").should == []
151
+ rc.exists("result_set").should_not be
152
+ end
153
+ end
154
+
155
+ context "#sunion", :fast => true do
156
+ it "calculates the union" do
157
+ (1..4).to_a.each { |num| rc.sadd "set_one", num }
158
+ (5..10).to_a.each { |num| rc.sadd "set_two", num }
159
+ rc.sunion("set_one", "set_two").map(&:to_i).sort.should == (1..10).to_a
160
+ end
161
+ it "returns an [] when the first set does not exist" do
162
+ rc.sunion("unknown_set", "some_set").should == []
163
+ end
164
+ end
165
+
166
+ context "#sunionstore", :fast => true do
167
+ it "stores the union in the destination" do
168
+ (1..4).to_a.each { |num| rc.sadd "set_one", num }
169
+ (5..10).to_a.each { |num| rc.sadd "set_two", num }
170
+ rc.sunionstore("result_set", "set_one", "set_two").should == 10
171
+ rc.smembers("result_set").map(&:to_i).sort.should == (1..10).to_a
172
+ end
173
+
174
+ it "doesn't store the destination if the diff yielded no results" do
175
+ rc.sadd "result_set", 1
176
+ rc.sunionstore("result_set", "unknown_set", "set_two").should == 0
177
+ rc.smembers("result_set").should == []
178
+ rc.exists("result_set").should_not be
179
+ end
180
+ end
181
+
182
+ context "#rename", :fast => true do
183
+ it "raises an error if the key did not exist" do
184
+ expect { rc.rename("unknown_key", "key") }.to raise_error(RuntimeError, "ERR no such key")
185
+ end
186
+ it "raises an error if the keys are the same" do
187
+ rc.set "foo", "bar"
188
+ expect { rc.rename("foo", "foo") }.to raise_error(RuntimeError, "ERR source and destination objects are the same")
189
+ end
190
+ it "does a rename" do
191
+ rc.set "foo", "bar"
192
+ rc.rename("foo", "foo_new").should == "OK"
193
+ rc.exists("foo").should_not be
194
+ rc.get("foo_new").should == "bar"
195
+ end
196
+ end
197
+
198
+ context "#multi-exec", :fast => true do
199
+ it "works" do
200
+ rc.get("foo").should_not be
201
+ rc.get("baz").should_not be
202
+ rc.multi
203
+ 100.times do
204
+ rc.set("foo", "bar").should == "QUEUED"
205
+ rc.incr("baz").should == "QUEUED"
206
+ end
207
+ rc.exec.should == 100.times.map { |i| ["OK", i+1] }.flatten
208
+ rc.get("foo").should == "bar"
209
+ rc.get("baz").should == "100"
210
+ end
211
+ end
212
+
213
+ context "#watch", :fast => true do
214
+ it "is an unsupported operation" do
215
+ expect { rc.watch }.to raise_error(RuntimeError, "Unsupported operation: watch")
216
+ end
217
+ end
218
+
219
+ context "#unwatch", :fast => true do
220
+ it "is an unsupported operation" do
221
+ expect { rc.unwatch }.to raise_error(RuntimeError, "Unsupported operation: unwatch")
222
+ end
223
+ end
224
+
225
+ context "bgsave-lastsave" do
226
+ it "returns the earliest lastsave time across the cluster" do
227
+ lastsave = rc.lastsave
228
+ rc.set "foo", "bar"
229
+ rc.bgsave.should == "Background saving started"
230
+ sleep 1 #give it a little time to complete
231
+ new_last_save = rc.lastsave
232
+ # No Idea why this fails when running the whole suite
233
+ # new_last_save.should > lastsave
234
+ rc.replica_sets.map(&:lastsave).sort.first.should == new_last_save
235
+ end
236
+ end
237
+
238
+ context "#quit", :fast => true do
239
+ it "closes all the cnxn's it has" do
240
+ rc.quit.should == "OK"
241
+ end
242
+ end
243
+
244
+ context "#ping", :fast => true do
245
+ it "ping's all replica_sets in the cluster" do
246
+ rc.replica_sets.each { |rs| rs.should_receive(:ping) }
247
+ rc.ping.should == "PONG"
248
+ end
249
+ end
250
+
251
+ context "#echo", :fast => true do
252
+ it "echo's all replica_sets" do
253
+ rc.replica_sets.each { |rs| rs.should_receive(:echo).with("hello") }
254
+ rc.echo("hello").should == "hello"
255
+ end
256
+ end
257
+
258
+ context "#config", :fast => true do
259
+ context "#get" do
260
+ it "returns the config values across all replica_sets" do
261
+ rc.config(:get, "*").should_not be_empty
262
+ end
263
+ end
264
+
265
+ context "#set", :fast => true do
266
+ it "sets values across all replica_sets" do
267
+ old_timeout = rc.config(:get, "timeout")["timeout"].to_i
268
+ old_timeout.should > 0
269
+ rc.config(:set, "timeout", 100).should == "OK"
270
+ rc.replica_sets.each { |rs| rs.config(:get, "timeout")["timeout"].to_i.should == 100 }
271
+ rc.config(:set, "timeout", old_timeout).should == "OK"
272
+ rc.replica_sets.each { |rs| rs.config(:get, "timeout")["timeout"].to_i.should == old_timeout }
273
+ end
274
+ end
275
+
276
+ context "#resetstat", :fast => true do
277
+ it "resets stats across all replica_sets" do
278
+ rc.flushall
279
+ rc.replica_sets.each { |rs| rs.info["total_commands_processed"].to_i.should > 1 }
280
+ rc.config(:resetstat).should == "OK"
281
+ rc.replica_sets.each { |rs| rs.info["total_commands_processed"].to_i.should == 1 }
282
+ end
283
+ end
284
+
285
+ context "#bad_command", :fast => true do
286
+ it "raises an error" do
287
+ expect { rc.config(:bad_command) }.to raise_error(RuntimeError, "ERR CONFIG subcommand must be one of GET, SET, RESETSTAT")
288
+ end
289
+ end
290
+ end
291
+
292
+ context "#auth", :fast => true do
293
+ it "is not supported" do
294
+ expect { rc.auth "foobar" }.to raise_error(RuntimeError, "Unsupported operation: auth")
295
+ end
296
+ end
297
+
298
+ context "#discard", :fast => true do
299
+ it "is not supported" do
300
+ expect { rc.discard }.to raise_error(RuntimeError, "Unsupported operation: discard")
301
+ end
302
+ end
303
+
304
+ context "#watch", :fast => true do
305
+ it "is not supported" do
306
+ expect { rc.watch }.to raise_error(RuntimeError, "Unsupported operation: watch")
307
+ end
308
+ end
309
+
310
+ context "#object", :fast => true do
311
+ it "is not supported" do
312
+ expect { rc.object }.to raise_error(RuntimeError, "Unsupported operation: object")
313
+ end
314
+ end
315
+
316
+ context "#sort", :fast => true do
317
+ it "is not supported" do
318
+ expect { rc.sort }.to raise_error(RuntimeError, "Unsupported operation: sort")
319
+ end
320
+ end
321
+
322
+ context "#zinterstore", :fast => true do
323
+ before do
324
+ rc.zadd "my_zset_one", 1, "key_one"
325
+ rc.zadd "my_zset_two", 10, "key_one"
326
+ rc.zadd "my_zset_one", 2, "key_two"
327
+ rc.zadd "my_zset_two", 20, "key_two"
328
+ rc.zadd "my_zset_two", 30, "key_three"
329
+ end
330
+
331
+ it "without weights and no aggregate function" do
332
+ rc.zinterstore("result", ["my_zset_one", "my_zset_two"]).should == 2
333
+ rc.zscore("result", "key_one").to_i.should == 11
334
+ rc.zscore("result", "key_two").to_i.should == 22
335
+ rc.zscore("result", "key_three").should_not be
336
+ end
337
+
338
+ it "with weights" do
339
+ rc.zinterstore("result", ["my_zset_one", "my_zset_two"], :weights => [10, 1]).should == 2
340
+ rc.zscore("result", "key_one").to_i.should == (10*1 + 10)
341
+ rc.zscore("result", "key_two").to_i.should == (10*2 + 20)
342
+ end
343
+
344
+ context "with AGGREGATE" do
345
+ it "sums" do
346
+ rc.zinterstore("result", ["my_zset_one", "my_zset_two"], :weights => [10, 1], :aggregate => :sum).should == 2
347
+ rc.zscore("result", "key_one").to_i.should == (10*1 + 10)
348
+ rc.zscore("result", "key_two").to_i.should == (10*2 + 20)
349
+ end
350
+
351
+ it "mins" do
352
+ rc.zinterstore("result", ["my_zset_one", "my_zset_two"], :weights => [5, 1], :aggregate => :min).should == 2
353
+ rc.zscore("result", "key_one").to_i.should == 5
354
+ rc.zscore("result", "key_two").to_i.should == 10
355
+ end
356
+
357
+ it "max'es" do
358
+ rc.zinterstore("result", ["my_zset_one", "my_zset_two"], :aggregate => :max).should == 2
359
+ rc.zscore("result", "key_one").to_i.should == 10
360
+ rc.zscore("result", "key_two").to_i.should == 20
361
+ end
362
+
363
+ it "raise an Error with an invalid aggregate function" do
364
+ rc.zadd "my_zset_one", 1, "key_one"
365
+ rc.zadd "my_zset_two", 10, "key_one"
366
+ expect { rc.zinterstore("result", ["my_zset_one", "my_zset_two"], :aggregate => :blahdiblah) }.to raise_error
367
+ end
368
+ end
369
+ end
370
+
371
+ context "#zunionstore", :fast => true do
372
+ before do
373
+ rc.zadd "my_zset_one", 1, "key_one"
374
+ rc.zadd "my_zset_two", 10, "key_one"
375
+ rc.zadd "my_zset_one", 2, "key_two"
376
+ rc.zadd "my_zset_two", 20, "key_two"
377
+ rc.zadd "my_zset_two", 30, "key_three"
378
+ end
379
+
380
+ it "without weights and no aggregate function" do
381
+ rc.zunionstore("result", ["my_zset_one", "my_zset_two"]).should == 3
382
+ rc.zscore("result", "key_one").to_i.should == 11
383
+ rc.zscore("result", "key_two").to_i.should == 22
384
+ rc.zscore("result", "key_three").to_i.should == 30
385
+ end
386
+
387
+ it "with weights" do
388
+ rc.zunionstore("result", ["my_zset_one", "my_zset_two"], :weights => [10, 1]).should == 3
389
+ rc.zscore("result", "key_one").to_i.should == (10*1 + 10)
390
+ rc.zscore("result", "key_two").to_i.should == (10*2 + 20)
391
+ rc.zscore("result", "key_three").to_i.should == (10*0 + 30)
392
+ end
393
+
394
+ context "ZUNIONSTORE with AGGREGATE" do
395
+ it "sums" do
396
+ rc.zunionstore("result", ["my_zset_one", "my_zset_two"], :weights => [10, 1], :aggregate => :sum).should == 3
397
+ rc.zscore("result", "key_one").to_i.should == (10*1 + 10)
398
+ rc.zscore("result", "key_two").to_i.should == (10*2 + 20)
399
+ rc.zscore("result", "key_three").to_i.should == (10*0 + 30)
400
+ end
401
+
402
+ it "mins" do
403
+ rc.zunionstore("result", ["my_zset_one", "my_zset_two"], :weights => [5, 1], :aggregate => :min).should == 3
404
+ rc.zscore("result", "key_one").to_i.should == 5
405
+ rc.zscore("result", "key_two").to_i.should == 10
406
+ rc.zscore("result", "key_three").to_i.should == 30
407
+ end
408
+
409
+ it "max'es" do
410
+ rc.zunionstore("result", ["my_zset_one", "my_zset_two"], :aggregate => :max).should == 3
411
+ rc.zscore("result", "key_one").to_i.should == 10
412
+ rc.zscore("result", "key_two").to_i.should == 20
413
+ rc.zscore("result", "key_three").to_i.should == 30
414
+ end
415
+
416
+ it "raise an Error with an invalid aggregate function" do
417
+ rc.zadd "my_zset_one", 1, "key_one"
418
+ rc.zadd "my_zset_two", 10, "key_one"
419
+ expect { rc.zunionstore("result", ["my_zset_one", "my_zset_two"], :aggregate => :blahdiblah) }.to raise_error
420
+ end
421
+ end
422
+ end
423
+
424
+ context "#shutdown", :fast => true do
425
+ it "shutdowns all servers" do
426
+ rc.replica_sets.each { |replica_set| replica_set.should_receive(:shutdown) }
427
+ rc.shutdown
428
+ end
429
+ end
430
+
431
+ end
432
+
@@ -0,0 +1,118 @@
1
+ require 'spec_helper'
2
+ require 'replica_set'
3
+
4
+ describe RedCluster::ReplicaSet do
5
+ context "#initializtion" do
6
+ it "gets initialized with one master & one or more slaves" do
7
+ master = {:host => "localhost", :port => 6379}
8
+ slaves = [{:host => "localhost", :port => 7379},
9
+ {:host => "localhost", :port => 8379}]
10
+ RedCluster::ReplicaSet.new nil, :master => master, :slaves => slaves
11
+ end
12
+ end
13
+
14
+ context "#other characteristics" do
15
+ before(:each) do
16
+ master = {:host => "localhost", :port => 6379}
17
+ slaves = [{:host => "localhost", :port => 7379},
18
+ {:host => "localhost", :port => 8379}]
19
+ @rs = RedCluster::ReplicaSet.new nil, :master => master, :slaves => slaves
20
+ end
21
+ let(:rs) { @rs }
22
+ let(:master) { @rs.master }
23
+ let(:slaves) { @rs.slaves }
24
+ after { master.flushall }
25
+
26
+ context "#replication" do
27
+ it "the slaves are slaveof's master" do
28
+ slaves.each do |slave|
29
+ slave.info["role"].should == "slave"
30
+ slave.info["master_host"].should == master.client.host
31
+ slave.info["master_port"].should == master.client.port.to_s
32
+ end
33
+ end
34
+ end
35
+
36
+ context "master dying" do
37
+ before(:each) do
38
+ master.stubs(:set).raises Errno::ECONNREFUSED
39
+ end
40
+ context "when it's a read op" do
41
+ it "things work as though nothing happened" do
42
+ expect { rs.get("foo") }.to_not raise_error
43
+ end
44
+ end
45
+
46
+ context "when there are more than one slave" do
47
+ it "one of them gets promoted to the new master" do
48
+ old_slaves = slaves.dup
49
+ old_master = master
50
+ rs.set("foo", "bar")
51
+ old_master.should_not == rs.master
52
+ old_slaves.should include(rs.master)
53
+ end
54
+ it "the other's become slaves of the new master" do
55
+ rs.set("foo", "bar")
56
+ new_master = rs.master
57
+ rs.slaves.each do |slave|
58
+ slave.info["master_host"].should == new_master.client.host
59
+ slave.info["master_port"].should == new_master.client.port.to_s
60
+ end
61
+ end
62
+ end
63
+ context "when there is just one slave" do
64
+ it "becomes the new master" do
65
+ slaves.shift while slaves.count > 1
66
+ slaves.count.should == 1
67
+ old_slave = slaves[0]
68
+ rs.set('foo', 'bar')
69
+ old_slave.should == rs.master
70
+ end
71
+ end
72
+ context "when there are no slaves" do
73
+ it "a RedCluster::NoMaster exception get's thrown" do
74
+ slaves.shift while slaves.count > 0
75
+ expect { rs.set('foo', 'bar') }.to raise_error(RedCluster::NoMaster, "No master in replica set")
76
+ end
77
+ end
78
+ end
79
+
80
+ context "#read operations" do
81
+ it "get forwarded to the slaves on a round-robin basis" do
82
+ master.expects(:get).never
83
+ slaves[0].expects(:get).with("some_key").returns "some_val"
84
+ slaves[1].expects(:get).with("some_key").returns "some_new_val"
85
+
86
+ rs.get("some_key").should == "some_val"
87
+ rs.get("some_key").should == "some_new_val"
88
+ end
89
+ end
90
+
91
+ context "#write operations" do
92
+ it "get forwarded to the master" do
93
+ master.expects(:set)
94
+ rs.set("foo", "bar")
95
+ end
96
+ end
97
+
98
+ context "#blocking operations" do
99
+ it "raise an error" do
100
+ expect { rs.blpop("some_list", 0) }.to raise_error
101
+ end
102
+ end
103
+
104
+ context "#slaveof operations" do
105
+ it "raise an error" do
106
+ expect { rs.slaveof("localhost", 6379) }.to raise_error
107
+ end
108
+ end
109
+
110
+ context "#pub sub operations" do
111
+ it "raise an error" do
112
+ expect { rs.blpop("publish", 0) }.to raise_error
113
+ end
114
+ end
115
+
116
+ end
117
+ end
118
+
@@ -0,0 +1,7 @@
1
+ require 'rubygems'
2
+ require 'bundler'
3
+ Bundler.setup :default, :test
4
+ Bundler.require :default, :test
5
+
6
+ dir = File.dirname(File.expand_path(__FILE__))
7
+ $LOAD_PATH.unshift dir + '/../lib'
metadata ADDED
@@ -0,0 +1,66 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: red_cluster
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.1
5
+ prerelease:
6
+ platform: ruby
7
+ authors:
8
+ - Santosh Kumar
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2011-10-25 00:00:00.000000000Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: redis
16
+ requirement: &70203057663840 !ruby/object:Gem::Requirement
17
+ none: false
18
+ requirements:
19
+ - - ! '>='
20
+ - !ruby/object:Gem::Version
21
+ version: '0'
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: *70203057663840
25
+ description: ! " Red Cluster brings together a set of redis servers and allows
26
+ you to read and write to them\n as though you were writing to just one. A few
27
+ of the reasons you might want to consider\n clustering could be:\n\n * Robustness
28
+ - Having a write master and read slaves\n * Harnessing the multiple cores you
29
+ have running while not compromising on the speed of redis\n * Fault tolerance
30
+ - When one of the masters goes down a slave in the replica sets gets promoted\n
31
+ \ automatically, with no down-time\n"
32
+ email: santosh79@gmail.com
33
+ executables: []
34
+ extensions: []
35
+ extra_rdoc_files: []
36
+ files:
37
+ - lib/red_cluster.rb
38
+ - lib/replica_set.rb
39
+ - spec/red_cluster_spec.rb
40
+ - spec/replica_sets_spec.rb
41
+ - spec/spec_helper.rb
42
+ homepage: https://github.com/santosh79/red_cluster
43
+ licenses: []
44
+ post_install_message:
45
+ rdoc_options: []
46
+ require_paths:
47
+ - lib
48
+ required_ruby_version: !ruby/object:Gem::Requirement
49
+ none: false
50
+ requirements:
51
+ - - ! '>='
52
+ - !ruby/object:Gem::Version
53
+ version: '0'
54
+ required_rubygems_version: !ruby/object:Gem::Requirement
55
+ none: false
56
+ requirements:
57
+ - - ! '>='
58
+ - !ruby/object:Gem::Version
59
+ version: '0'
60
+ requirements: []
61
+ rubyforge_project:
62
+ rubygems_version: 1.8.10
63
+ signing_key:
64
+ specification_version: 3
65
+ summary: Red Cluster clusters togethers a set of redis servers.
66
+ test_files: []