ruster 0.0.3 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: c35baea7d1a295313170c4ef1345c6f22f872697
4
- data.tar.gz: 76f8ba9ddf5ae1cf2cc5df865fe733770786fc21
3
+ metadata.gz: 96027b4bdf2c2795b11fd73f8810481dc3f8216b
4
+ data.tar.gz: 95eb3adb68b769c09c72413f6579815ab1e11cc9
5
5
  SHA512:
6
- metadata.gz: ef371dc30165f7284d3e0d1c1b250125320f675cc27391790195147f0ddd17d53b287ac130b10e23c1ffe48052fea74ca0cfebb641d4d0d0d86c5900ddf26609
7
- data.tar.gz: d5c88f5c3cc8389231029ceea8a69153457056cc5c452eb1113b53a5c921d9d45d5df1b6dae60de1180b18fd0e0626aa78347a3f0ac765eb178f47010e6db5bf
6
+ metadata.gz: 04da50f39fdf89bb5030fe9044df8a67e60f8ca62471e83c6bed6d587a771d0b7659de17780dcd8f7e2161c9d6f6b6f7de47c51fbba59fa958a3d8eba75385ae
7
+ data.tar.gz: 6f71ed0fb738302ffb98c10bfb4929c68e6a72b52a2874fffc8f722c79e4ca0b39c95c24f4d8b2d198bf08a882816ad773036d70a532e5906770ae4c136d036c
data/README.md CHANGED
@@ -107,6 +107,8 @@ Also, I'd like to thank to [Eruca Sativa][eruca] and [Cirse][cirse]
107
107
  for the music that's currently blasting my speakers while I write
108
108
  this.
109
109
 
110
+ Who said programming shouldn't be [fun][lovestory]? [Discuss on Hacker News][lovestoryhn].
111
+
110
112
  [redis]: http://redis.io/
111
113
  [redis-cluster]: http://redis.io/topics/cluster-tutorial
112
114
  [redic]: https://github.com/amakawa/redic
@@ -120,3 +122,5 @@ this.
120
122
  [@pote]: https://twitter.com/poteland
121
123
  [@lucasefe]: https://twitter.com/lucasefe
122
124
  [nameme]: https://twitter.com/inkel/status/444638064393326592
125
+ [lovestory]: https://github.com/inkel/ruster/blob/90f7da1c281bfc1a5fe01ccf8057f948278b3685/test/node.rb#L150-198
126
+ [lovestoryhn]: https://news.ycombinator.com/item?id=7406297
data/bin/ruster CHANGED
@@ -1,7 +1,7 @@
1
1
  #! /usr/bin/env ruby
2
2
 
3
- require "redic"
4
3
  require "clap"
4
+ require_relative "../lib/ruster"
5
5
 
6
6
  $verbose = 0
7
7
 
@@ -41,306 +41,46 @@ EOU
41
41
 
42
42
  abort USAGE if action.nil? or args.nil? or args.empty?
43
43
 
44
- module UI
45
- def err msg
46
- $stderr.puts msg
47
- end
48
-
49
- def abort msg, backtrace=[]
50
- err msg
51
- backtrace.each { |line| err line } if $verbose > 1
52
- exit 1
53
- end
54
-
55
- def info *args
56
- $stdout.puts args.join(" ")
57
- end
58
-
59
- def log *args
60
- $stdout.puts args.join(" ") if $verbose > 0
61
- end
62
-
63
- def debug *args
64
- $stdout.puts args.join(" ") if $verbose > 1
65
- end
66
-
67
- extend self
68
- end
69
-
70
- class Node
71
- [:id, :ip_port, :flags, :master_id, :ping, :pong, :config, :state, :slots].each do |a|
72
- attr a
73
- end
74
-
75
- def initialize(ip_port)
76
- @ip_port = ip_port
77
- load_info!
78
- end
79
-
80
- def cluster_enabled?
81
- call("INFO", "cluster").include?("cluster_enabled:1")
82
- end
83
-
84
- def only_node?
85
- call("CLUSTER", "INFO").include?("cluster_known_nodes:1")
86
- end
87
-
88
- def empty?
89
- call("INFO", "keyspace").strip == "# Keyspace"
90
- end
91
-
92
- def load_info!
93
- call("CLUSTER", "NODES").split("\n").each do |line|
94
- parts = line.split
95
- next unless parts[2].include?("myself")
96
- set_info!(*parts)
97
- end
98
- end
99
-
100
- def set_info!(id, ip_port, flags, master_id, ping, pong, config, state, *slots)
101
- @id = id
102
- @flags = flags.split(",")
103
- @master_id = master_id
104
- @ping = ping
105
- @pong = pong
106
- @config = config
107
- @state = state
108
- @slots = slots
109
- @ip_port = ip_port unless flags.include?("myself")
110
- end
111
-
112
- def ip
113
- @ip_port.split(":").first
114
- end
115
-
116
- def port
117
- @ip_port.split(":").last
118
- end
119
-
120
- def client
121
- @client ||= Redic.new("redis://#{@ip_port}")
122
- end
123
-
124
- def call(*args)
125
- UI.debug ">", *args
126
- client.call(*args)
127
- end
128
-
129
- def dead?
130
- %w{ disconnected fail noaddr }.any? do |flag|
131
- flags.include?(flag)
132
- end
133
- end
134
-
135
- def alive?
136
- p [ip_port, flags, state]
137
- !dead?
138
- end
139
-
140
- def to_s
141
- "#{@id} [#{@ip_port}]"
142
- end
143
-
144
- def slots
145
- return @_slots if @_slots
146
-
147
- slots = { slots: [], migrating: {}, importing: {} }
148
-
149
- @slots.each do |data|
150
- if data[0] == /\[(\d+)-([<>])-(\d+)\]/
151
- if $2 == ">"
152
- slots[:migrating][$1] = $2
153
- else
154
- slots[:importing][$1] = $2
155
- end
156
- elsif data =~ /(\d+)-(\d+)/
157
- b, e = $1.to_i, $2.to_i
158
- (b..e).each { |slot| slots[:slots] << slot }
159
- else
160
- slots[:slots] << data.to_i
161
- end
162
- end
163
-
164
- @_slots = slots
165
- end
166
- end
167
-
168
- class Cluster
169
- SLOTS = 16384
170
-
171
- def initialize(addrs)
172
- @addrs = Array(addrs)
173
- end
174
-
175
- def nodes
176
- @nodes ||= @addrs.map { |addr| Node.new(addr) }
177
- end
178
-
179
- def allocate_slots(node, slots)
180
- UI.log "Allocating #{slots.size} slots (#{slots.first}..#{slots.last}) in node #{node}"
181
-
182
- UI.debug "> CLUSTER ADDSLOTS #{slots.first}..#{slots.last}"
183
-
184
- res = node.client.call("CLUSTER", "ADDSLOTS", *slots)
185
-
186
- UI.abort res.message if res.is_a?(RuntimeError)
187
- end
188
-
189
- def add_node(node)
190
- default = nodes.first
191
-
192
- UI.log "Joining node #{node} to node #{default}"
193
-
194
- ip, port = node.ip_port.split(":")
195
-
196
- res = default.call("CLUSTER", "MEET", ip, port)
197
-
198
- UI.abort res.message if res.is_a?(RuntimeError)
199
- end
200
-
201
- def create!
202
- nodes.each do |node|
203
- raise ArgumentError, "Redis Server at #{node.ip_port} not running in cluster mode" unless node.cluster_enabled?
204
- raise ArgumentError, "Redis Server at #{node.ip_port} already exists in a cluster" unless node.only_node?
205
- raise ArgumentError, "Redis Server at #{node.ip_port} is not empty" unless node.empty?
206
- end
207
-
208
- UI.log "Allocating #{SLOTS} slots in #{nodes.length} nodes"
209
-
210
- available_slots = 0.upto(SLOTS - 1).each_slice((SLOTS.to_f / nodes.length).ceil)
211
-
212
- nodes.each do |node|
213
- slots = available_slots.next.to_a
214
-
215
- allocate_slots(node, slots)
216
- end
217
-
218
- nodes.each { |node| add_node node }
219
- end
220
-
221
- def remove_node(node)
222
- default = nodes.first
223
-
224
- UI.log "Removing node #{node} from cluster"
225
-
226
- res = default.call("CLUSTER", "FORGET", node.id)
227
-
228
- UI.abort res.message if res.is_a?(RuntimeError)
229
- end
230
-
231
- def nodes!
232
- node = nodes.sample
233
-
234
- node.call("CLUSTER", "NODES").split("\n").map do |line|
235
- _, ip_port, flags, _ = line.split
236
-
237
- if flags.include?("myself")
238
- node
239
- else
240
- Node.new(ip_port)
241
- end
242
- end
243
- end
244
-
245
- def each(*args)
246
- nodes!.each do |node|
247
- UI.info "#{node}: #{args.join(' ')}"
248
-
249
- res = node.call(*args)
250
- UI.info res
251
- UI.info "--"
252
- end
253
- end
254
-
255
- def reshard(target_addr, slots, sources, opts={})
256
- options = { timeout: 1_000, db: 0 }.merge(opts)
257
-
258
- target = Node.new(target_addr)
259
-
260
- from = sources.map{ |addr| Node.new(addr) } \
261
- .sort{ |a, b| b.slots[:slots].size <=> a.slots[:slots].size }
262
-
263
- total_slots = from.inject(0) do |sum, source|
264
- sum + source.slots[:slots].size
265
- end
266
-
267
- UI.abort "No slots found to migrate" unless total_slots > 0
268
-
269
- from.each do |source|
270
- # Proportional number of slots, based on current assigned slots
271
- node_slots = (slots.to_f / total_slots * source.slots[:slots].size).to_i
272
-
273
- UI.info "Moving #{node_slots} slots from #{source} to #{target}"
274
-
275
- source.slots[:slots].take(node_slots).each do |slot|
276
- count = source.call("CLUSTER", "COUNTKEYSINSLOT", slot)
277
-
278
- UI.log " Moving slot #{slot} (#{count} keys)"
279
-
280
- target.call("CLUSTER", "SETSLOT", slot, "IMPORTING", source.id)
281
- source.call("CLUSTER", "SETSLOT", slot, "MIGRATING", target.id)
282
-
283
- done = false
284
-
285
- until done
286
- keys = source.call("CLUSTER", "GETKEYSINSLOT", slot, 10)
287
-
288
- done = keys.empty?
289
-
290
- keys.each do |key|
291
- res = source.call("MIGRATE", target.ip, target.port, key, options[:db], options[:timeout])
292
-
293
- UI.abort res.message if res.is_a?(RuntimeError)
294
-
295
- $stdout.print '.' if $verbose > 2
296
- end
297
- end
298
-
299
- nodes!.each do |node|
300
- res = node.call("CLUSTER", "SETSLOT", slot, "NODE", target.id)
301
-
302
- UI.err res.message if res.is_a?(RuntimeError)
303
- end
304
- end
305
- end
306
- end
307
- end
308
-
309
44
  begin
310
45
  case action
311
46
  when "create"
312
- cluster = Cluster.new(args)
313
- cluster.create!
47
+ Ruster::Cluster.create!(args)
314
48
  when "add"
315
- cluster = Cluster.new(args.shift)
49
+ cluster = Ruster::Cluster.new(Ruster::Node.new(args.shift))
316
50
 
317
51
  args.each do |addr|
318
- cluster.add_node(Node.new(addr))
52
+ ip, port = addr.split(":")
53
+ cluster.add_node(ip, port)
319
54
  end
320
55
  when "remove"
321
- cluster = Cluster.new(args.shift)
56
+ cluster = Ruster::Cluster.new(Ruster::Node.new(args.shift))
322
57
 
323
58
  args.each do |addr|
324
- cluster.remove_node(Node.new(addr))
59
+ node = Ruster::Node.new(addr)
60
+ node.load!
61
+ cluster.remove_node(node)
325
62
  end
326
63
  when "each"
327
- cluster = Cluster.new(args.shift)
64
+ cluster = Ruster::Cluster.new(Ruster::Node.new(args.shift))
328
65
 
329
- cluster.each(*args)
66
+ cluster.each(*args) do |node, res|
67
+ puts "> #{node}"
68
+ puts res
69
+ end
330
70
  when "reshard"
331
- options = {}
71
+ cluster = Ruster::Cluster.new(Ruster::Node.new(args.shift))
332
72
 
333
- cluster_addr, slots, target_addr, *sources = Clap.run args, {
334
- "-t" => ->(ms) { options[:timeout] = Integer(ms) },
335
- "-n" => ->(db) { options[:db] = db }
336
- }
73
+ num_slots, target_addr, *sources_addr = args
337
74
 
338
- cluster = Cluster.new(cluster_addr)
75
+ target = Ruster::Node.new(target_addr)
76
+ sources = sources_addr.map{ |addr| Ruster::Node.new(addr) }
339
77
 
340
- cluster.reshard(target_addr, slots, sources, options)
78
+ cluster.reshard(target, num_slots.to_i, sources)
341
79
  else
342
- UI.abort "Unrecognized action `#{action}'\n#{USAGE}"
80
+ abort "Unrecognized action `#{action}'\n#{USAGE}"
343
81
  end
344
82
  rescue => ex
345
- UI.abort ex.message, ex.backtrace
83
+ $stderr.puts ex.message
84
+ ex.backtrace.each{ |line| $stderr.puts line } if $verbose > 1
85
+ exit 2
346
86
  end
@@ -0,0 +1,8 @@
1
+ require "redic"
2
+
3
+ class Ruster
4
+ end
5
+
6
+ require_relative "ruster/util"
7
+ require_relative "ruster/node"
8
+ require_relative "ruster/cluster"
@@ -0,0 +1,143 @@
1
+ class Ruster::Cluster
2
+ include Ruster::Util
3
+
4
+ attr :entry
5
+
6
+ SLOTS = 16384
7
+
8
+ def initialize(entry)
9
+ @entry = entry
10
+ end
11
+
12
+ def info
13
+ @entry.cluster_info
14
+ end
15
+
16
+ def state
17
+ info[:cluster_state]
18
+ end
19
+
20
+ def ok?
21
+ state == "ok"
22
+ end
23
+
24
+ def fail?
25
+ state == "fail"
26
+ end
27
+
28
+ def slots_assigned
29
+ info[:cluster_slots_assigned].to_i
30
+ end
31
+
32
+ def slots_ok
33
+ info[:cluster_slots_ok].to_i
34
+ end
35
+
36
+ def slots_pfail
37
+ info[:cluster_slots_pfail].to_i
38
+ end
39
+
40
+ def slots_fail
41
+ info[:cluster_slots_fail].to_i
42
+ end
43
+
44
+ def known_nodes
45
+ info[:cluster_known_nodes].to_i
46
+ end
47
+
48
+ def size
49
+ info[:cluster_size].to_i
50
+ end
51
+
52
+ def current_epoch
53
+ info[:cluster_current_epoch].to_i
54
+ end
55
+
56
+ def stats_messages_sent
57
+ info[:cluster_stats_messages_sent].to_i
58
+ end
59
+
60
+ def stats_messages_received
61
+ info[:stats_messages_received].to_i
62
+ end
63
+
64
+ def nodes
65
+ @entry.load!
66
+ [@entry] + @entry.friends
67
+ end
68
+
69
+ def add_node(ip, port)
70
+ @entry.meet(ip, port)
71
+ end
72
+
73
+ def remove_node(bye)
74
+ nodes.each do |node|
75
+ next if node.id == bye.id
76
+ node.forget(bye)
77
+ end
78
+ end
79
+
80
+ def self.create!(addrs)
81
+ # Check nodes
82
+ nodes = addrs.map do |addr|
83
+ node = ::Ruster::Node.new(addr)
84
+
85
+ raise ArgumentError, "Redis Server at #{addr} not running in cluster mode" unless node.enabled?
86
+ raise ArgumentError, "Redis Server at #{addr} already exists in a cluster" unless node.only_node?
87
+ raise ArgumentError, "Redis Server at #{addr} is not empty" unless node.empty?
88
+
89
+ node
90
+ end
91
+
92
+ # Allocate slots evenly among all nodes
93
+ slots_by_node = 0.upto(SLOTS - 1).each_slice((SLOTS.to_f / nodes.length).ceil)
94
+
95
+ nodes.each do |node|
96
+ slots = slots_by_node.next.to_a
97
+
98
+ node.add_slots(*slots)
99
+ end
100
+
101
+ # Create cluster by meeting nodes
102
+ entry = nodes.shift
103
+
104
+ nodes.each { |node| entry.meet node.ip, node.port }
105
+
106
+ new(entry)
107
+ end
108
+
109
+ def each(*args, &block)
110
+ nodes.each do |node|
111
+ yield node, node.call(*args)
112
+ end
113
+ end
114
+
115
+ def reshard(target, num_slots, sources)
116
+ raise ArgumentError, "Target node #{target} is not part of the cluster" unless in_cluster?(target)
117
+ target.load!
118
+
119
+ sources.each do |source|
120
+ raise ArgumentError, "Source node #{source} is not part of the cluster" unless in_cluster?(source)
121
+ source.load!
122
+ end
123
+
124
+ sources.sort_by!{ |node| -node.slots.size }
125
+
126
+ total_slots = sources.inject(0) do |sum, node|
127
+ sum + node.all_slots.size
128
+ end
129
+
130
+ sources.each do |node|
131
+ # Proportional number of slots based on node size
132
+ node_slots = (num_slots.to_f / total_slots * node.all_slots.size)
133
+
134
+ node.all_slots.take(node_slots).each do |slot|
135
+ node.move_slot!(slot, target)
136
+ end
137
+ end
138
+ end
139
+
140
+ def in_cluster?(node)
141
+ nodes.any?{ |n| n.addr == node.addr }
142
+ end
143
+ end
@@ -0,0 +1,172 @@
1
+ class Ruster::Node
2
+ include Ruster::Util
3
+
4
+ attr :addr
5
+ attr :id
6
+ attr :flags
7
+ attr :master_id
8
+ attr :ping_epoch
9
+ attr :pong_epoch
10
+ attr :config_epoch
11
+ attr :state
12
+ attr :slots
13
+ attr :migrating
14
+ attr :importing
15
+ attr :friends
16
+
17
+ def initialize(addr)
18
+ @addr = addr
19
+ end
20
+
21
+ def client
22
+ @client ||= Redic.new("redis://#{addr}")
23
+ end
24
+
25
+ def call(*args)
26
+ res = client.call(*args)
27
+ raise res if res.is_a?(RuntimeError)
28
+ res
29
+ end
30
+
31
+ def enabled?
32
+ parse_info(call("INFO", "cluster"))[:cluster_enabled] == "1"
33
+ end
34
+
35
+ def read_info_line!(info_line)
36
+ parts = info_line.split
37
+
38
+ @id = parts.shift
39
+ addr = parts.shift
40
+ @flags = parts.shift.split(",")
41
+ @addr = addr unless @flags.include?("myself")
42
+ @master_id = parts.shift
43
+ @ping_epoch = parts.shift.to_i
44
+ @pong_epoch = parts.shift.to_i
45
+ @config_epoch = parts.shift.to_i
46
+ @state = parts.shift
47
+ @slots = []
48
+ @migrating = {}
49
+ @importing = {}
50
+
51
+ parts.each do |slots|
52
+ case slots
53
+ when /^(\d+)-(\d+)$/ then @slots << ($1.to_i..$2.to_i)
54
+ when /^\d+$/ then @slots << (slots.to_i..slots.to_i)
55
+ when /^\[(\d+)-([<>])-([a-z0-9]+)\]$/
56
+ case $2
57
+ when ">" then @migrating[$1.to_i] = $3
58
+ when "<" then @importing[$1.to_i] = $3
59
+ end
60
+ end
61
+ end
62
+ end
63
+
64
+ def all_slots
65
+ slots.map(&:to_a).flatten
66
+ end
67
+
68
+ def to_s
69
+ "#{addr} [#{id}]"
70
+ end
71
+
72
+ def self.from_info_line(info_line)
73
+ _, addr, _ = info_line.split
74
+ new(addr).tap { |node| node.read_info_line!(info_line) }
75
+ end
76
+
77
+ def load!
78
+ @friends = []
79
+
80
+ call("CLUSTER", "NODES").split("\n").each do |line|
81
+ if line.include?("myself")
82
+ read_info_line!(line)
83
+ else
84
+ @friends << self.class.from_info_line(line)
85
+ end
86
+ end
87
+ end
88
+
89
+ def meet(ip, port)
90
+ call("CLUSTER", "MEET", ip, port)
91
+ end
92
+
93
+ def forget(node)
94
+ raise ArgumentError, "Node #{node} is not empty" unless node.slots.empty? and node.migrating.empty? and node.importing.empty?
95
+ call("CLUSTER", "FORGET", node.id)
96
+ end
97
+
98
+ def replicate(node)
99
+ call("CLUSTER", "REPLICATE", node.id)
100
+ end
101
+
102
+ def slaves
103
+ call("CLUSTER", "SLAVES", id).map do |line|
104
+ self.class.from_info_line(line)
105
+ end
106
+ end
107
+
108
+ def add_slots(*slots)
109
+ call("CLUSTER", "ADDSLOTS", *slots)
110
+ end
111
+
112
+ def del_slots(*slots)
113
+ call("CLUSTER", "DELSLOTS", *slots)
114
+ end
115
+
116
+ def flush_slots!
117
+ call("CLUSTER", "FLUSHSLOTS")
118
+ end
119
+
120
+ def cluster_info
121
+ parse_info(call("CLUSTER", "INFO"))
122
+ end
123
+
124
+ def ip
125
+ addr.split(":").first
126
+ end
127
+
128
+ def port
129
+ addr.split(":").last
130
+ end
131
+
132
+ # In Redis Cluster only DB 0 is enabled
133
+ DB0 = 0
134
+
135
+ def move_slot!(slot, target, options={})
136
+ options[:num_keys] ||= 10
137
+ options[:timeout] ||= call("CONFIG", "GET", "cluster-node-timeout")
138
+
139
+ # Tell the target node to import the slot
140
+ target.call("CLUSTER", "SETSLOT", slot, "IMPORTING", id)
141
+
142
+ # Tell the current node to export the slot
143
+ call("CLUSTER", "SETSLOT", slot, "MIGRATING", target.id)
144
+
145
+ # Export keys
146
+ done = false
147
+ until done
148
+ keys = call("CLUSTER", "GETKEYSINSLOT", slot, options[:num_keys])
149
+
150
+ done = keys.empty?
151
+
152
+ keys.each do |key|
153
+ call("MIGRATE", target.ip, target.port, key, DB0, options[:timeout])
154
+ end
155
+
156
+ # Tell cluster the location of the new slot
157
+ call("CLUSTER", "SETSLOT", slot, "NODE", target.id)
158
+
159
+ friends.each do |node|
160
+ node.call("CLUSTER", "SETSLOT", slot, "NODE", target.id)
161
+ end
162
+ end
163
+ end
164
+
165
+ def empty?
166
+ call("DBSIZE") == 0
167
+ end
168
+
169
+ def only_node?
170
+ parse_info(call("CLUSTER", "INFO"))[:cluster_known_nodes] == "1"
171
+ end
172
+ end
@@ -0,0 +1,11 @@
1
+ module Ruster::Util
2
+ def parse_info(info)
3
+ {}.tap do |data|
4
+ info.split("\r\n").each do |line|
5
+ next if line[0] == "#"
6
+ key, val = line.split(":")
7
+ data[key.to_sym] = val
8
+ end
9
+ end
10
+ end
11
+ end
@@ -2,7 +2,7 @@
2
2
 
3
3
  Gem::Specification.new do |s|
4
4
  s.name = "ruster"
5
- s.version = "0.0.3"
5
+ s.version = "0.0.4"
6
6
  s.summary = "A simple Redis Cluster Administration tool"
7
7
  s.description = "Control your Redis Cluster from the command line."
8
8
  s.authors = ["Leandro López"]
@@ -15,5 +15,7 @@ Gem::Specification.new do |s|
15
15
  s.add_dependency "redic"
16
16
  s.add_dependency "clap"
17
17
 
18
+ s.add_development_dependency "protest"
19
+
18
20
  s.files = `git ls-files`.split("\n")
19
21
  end
@@ -0,0 +1,81 @@
1
+ require_relative "./helper"
2
+
3
+ require "timeout"
4
+
5
+ Protest.describe "Ruster::Cluster" do
6
+ test "add node" do
7
+ with_nodes(n: 2) do |ports|
8
+ port_a, port_b = ports.to_a
9
+
10
+ bob = Ruster::Node.new("127.0.0.1:#{port_a}")
11
+ bob.add_slots(*0..16383)
12
+ cluster = Ruster::Cluster.new(bob)
13
+
14
+ Timeout.timeout(10) { sleep 0.05 until cluster.ok? }
15
+
16
+ assert_equal 1, cluster.nodes.size
17
+ assert_equal [0..16383], bob.slots
18
+
19
+ cluster.add_node("127.0.0.1", port_b)
20
+
21
+ Timeout.timeout(10) { sleep 0.05 until cluster.ok? }
22
+
23
+ assert_equal 2, cluster.nodes.size
24
+
25
+ slots = cluster.nodes.map do |node|
26
+ [node.addr, node.slots]
27
+ end
28
+
29
+ # Do not realloce slots
30
+ assert_equal 2, slots.size
31
+ assert slots.include?(["127.0.0.1:#{port_a}", [0..16383]])
32
+ assert slots.include?(["127.0.0.1:#{port_b}", []])
33
+ end
34
+ end
35
+
36
+ context "remove node" do
37
+ test "empty node" do
38
+ with_nodes(n: 3) do |ports|
39
+ port_a, port_b, port_c = ports.to_a
40
+
41
+ soveran = Ruster::Node.new("127.0.0.1:#{port_a}")
42
+ cuervo = Ruster::Node.new("127.0.0.1:#{port_b}")
43
+ inkel = Ruster::Node.new("127.0.0.1:#{port_c}")
44
+
45
+ soveran.add_slots(*0..8191)
46
+ cuervo.add_slots(*8192..16383)
47
+
48
+ cluster = Ruster::Cluster.new(soveran)
49
+ cluster.add_node(cuervo.ip, cuervo.port)
50
+ cluster.add_node(inkel.ip, inkel.port)
51
+
52
+ Timeout.timeout(10) { sleep 0.05 until cluster.ok? }
53
+
54
+ soveran.load!
55
+ cuervo.load!
56
+ inkel.load!
57
+
58
+ cluster.remove_node(inkel)
59
+
60
+ Timeout.timeout(10) { sleep 0.05 until cluster.ok? }
61
+
62
+ assert_equal 2, cluster.nodes.size
63
+
64
+ ids = cluster.nodes.map(&:id)
65
+
66
+ assert ids.include?(soveran.id)
67
+ assert ids.include?(cuervo.id)
68
+ assert !ids.include?(inkel.id)
69
+
70
+ slots = cluster.nodes.map do |node|
71
+ [node.addr, node.slots]
72
+ end
73
+
74
+ # Do not realloce slots
75
+ assert_equal 2, slots.size
76
+ assert slots.include?([soveran.addr, [0..8191]])
77
+ assert slots.include?([cuervo.addr, [8192..16383]])
78
+ end
79
+ end
80
+ end
81
+ end
@@ -0,0 +1,58 @@
1
+ $:.unshift(File.expand_path("../lib", File.dirname(__FILE__)))
2
+
3
+ require "protest"
4
+ require "ruster"
5
+ require "tmpdir"
6
+
7
+ def with_nodes(opts={})
8
+ options = {
9
+ n: 3,
10
+ init_port: 12701,
11
+ enabled: "yes"
12
+ }
13
+
14
+ options.merge!(opts)
15
+
16
+ end_port = options[:init_port] + options[:n] - 1
17
+
18
+ tmp = Dir.mktmpdir
19
+
20
+ pids = []
21
+ ports = (options[:init_port]..end_port)
22
+
23
+ ports.each do |port|
24
+ pids << fork do
25
+ dir = File.join(tmp, port.to_s)
26
+
27
+ Dir.mkdir(dir)
28
+
29
+ args = [
30
+ "--port", port.to_s,
31
+ "--dir", dir,
32
+ "--save", "",
33
+ "--logfile", "./redis.log"
34
+ ]
35
+
36
+ if options[:enabled] == "yes"
37
+ args.concat(["--cluster-enabled", "yes",
38
+ "--cluster-config-file", "redis.conf",
39
+ "--cluster-node-timeout", "5000"])
40
+ end
41
+
42
+ exec "redis-server", *args
43
+ end
44
+ end
45
+
46
+ # Wait for redis-server to start
47
+ sleep 0.125
48
+
49
+ yield ports
50
+ ensure
51
+ pids.each { |pid| Process.kill :TERM, pid }
52
+
53
+ Process.waitall
54
+
55
+ FileUtils.remove_entry_secure tmp
56
+ end
57
+
58
+ Protest.report_with((ENV["PROTEST_REPORT"] || "documentation").to_sym)
@@ -0,0 +1,255 @@
1
+ require_relative "./helper"
2
+
3
+ Protest.describe "Node" do
4
+ test "is cluster enabled" do
5
+ with_nodes(n: 1) do |ports|
6
+ node = Ruster::Node.new("127.0.0.1:#{ports.first}")
7
+
8
+ assert node.enabled?
9
+ end
10
+ end
11
+
12
+ test "is not cluster enabled" do
13
+ with_nodes(n: 1, enabled: "no") do |ports|
14
+ node = Ruster::Node.new("127.0.0.1:#{ports.first}")
15
+
16
+ assert !node.enabled?
17
+ end
18
+ end
19
+
20
+ context "information" do
21
+ test "read and parses info line" do
22
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b :0 myself,master - 0 1 2 connected"
23
+
24
+ node = Ruster::Node.new("127.0.0.1:12701")
25
+
26
+ node.read_info_line!(info_line)
27
+
28
+ assert_equal "9aee954a0b7d6b49d7e68c18d08873c56aaead6b", node.id
29
+ assert_equal "127.0.0.1:12701", node.addr
30
+ assert_equal ["myself", "master"], node.flags
31
+ assert_equal "-", node.master_id
32
+ assert_equal 0, node.ping_epoch
33
+ assert_equal 1, node.pong_epoch
34
+ assert_equal 2, node.config_epoch
35
+ assert_equal "connected", node.state
36
+ assert_equal [], node.slots
37
+
38
+ assert_equal "127.0.0.1:12701 [9aee954a0b7d6b49d7e68c18d08873c56aaead6b]", node.to_s
39
+ end
40
+
41
+ context "allocated slots" do
42
+ test "contiguous block" do
43
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b :0 myself,master - 0 1 2 connected 0-16383"
44
+
45
+ node = Ruster::Node.new("127.0.0.1:12701")
46
+
47
+ node.read_info_line!(info_line)
48
+
49
+ assert_equal [(0..16383)], node.slots
50
+ assert node.migrating.empty?
51
+ assert node.importing.empty?
52
+ end
53
+
54
+ test "single" do
55
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b :0 myself,master - 0 1 2 connected 4096"
56
+
57
+ node = Ruster::Node.new("127.0.0.1:12701")
58
+
59
+ node.read_info_line!(info_line)
60
+
61
+ assert_equal [(4096..4096)], node.slots
62
+ assert node.migrating.empty?
63
+ assert node.importing.empty?
64
+ end
65
+
66
+ test "migrating" do
67
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b :0 myself,master - 0 1 2 connected [16383->-6daeaa65c37880d81c86e7d94b6d7b0a459eea9]"
68
+
69
+ node = Ruster::Node.new("127.0.0.1:12701")
70
+
71
+ node.read_info_line!(info_line)
72
+
73
+ assert_equal [], node.slots
74
+ assert_equal 1, node.migrating.size
75
+ assert_equal "6daeaa65c37880d81c86e7d94b6d7b0a459eea9", node.migrating[16383]
76
+ assert node.importing.empty?
77
+ end
78
+
79
+ test "importing" do
80
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b :0 myself,master - 0 1 2 connected [16383-<-6daeaa65c37880d81c86e7d94b6d7b0a459eea9]"
81
+
82
+ node = Ruster::Node.new("127.0.0.1:12701")
83
+
84
+ node.read_info_line!(info_line)
85
+
86
+ assert_equal [], node.slots
87
+ assert_equal 1, node.importing.size
88
+ assert_equal "6daeaa65c37880d81c86e7d94b6d7b0a459eea9", node.importing[16383]
89
+ assert node.migrating.empty?
90
+ end
91
+
92
+ test "combined" do
93
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b :0 myself,master - 0 1 2 connected 0-1024 2048 [3072->-6daeaa65c37880d81c86e7d94b6d7b0a459eea9] 4096 [6144-<-6daeaa65c37880d81c86e7d94b6d7b0a459eea9] 8192-16383"
94
+
95
+ node = Ruster::Node.new("127.0.0.1:12701")
96
+
97
+ node.read_info_line!(info_line)
98
+
99
+ assert_equal [(0..1024), (2048..2048), (4096..4096), (8192..16383)], node.slots
100
+
101
+ assert_equal 1, node.migrating.size
102
+ assert_equal "6daeaa65c37880d81c86e7d94b6d7b0a459eea9", node.migrating[3072]
103
+
104
+ assert_equal 1, node.importing.size
105
+ assert_equal "6daeaa65c37880d81c86e7d94b6d7b0a459eea9", node.importing[6144]
106
+ end
107
+
108
+ test "all allocated slots as an array" do
109
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b :0 myself,master - 0 1 2 connected 0-3 5 10-13 16383"
110
+
111
+ node = Ruster::Node.new("127.0.0.1:12701")
112
+
113
+ node.read_info_line!(info_line)
114
+
115
+ assert_equal [0, 1, 2, 3, 5, 10, 11, 12, 13, 16383], node.all_slots
116
+ end
117
+ end
118
+
119
+ test "create from info line" do
120
+ info_line = "9aee954a0b7d6b49d7e68c18d08873c56aaead6b 127.0.0.1:12701 master - 0 1 2 connected"
121
+
122
+ node = Ruster::Node.from_info_line(info_line)
123
+
124
+ assert_equal "9aee954a0b7d6b49d7e68c18d08873c56aaead6b", node.id
125
+ assert_equal "127.0.0.1:12701", node.addr
126
+ assert_equal ["master"], node.flags
127
+ assert_equal "-", node.master_id
128
+ assert_equal 0, node.ping_epoch
129
+ assert_equal 1, node.pong_epoch
130
+ assert_equal 2, node.config_epoch
131
+ assert_equal "connected", node.state
132
+ assert_equal [], node.slots
133
+
134
+ assert_equal "127.0.0.1:12701 [9aee954a0b7d6b49d7e68c18d08873c56aaead6b]", node.to_s
135
+ end
136
+ end
137
+
138
+ context "in cluster" do
139
+ test "only node" do
140
+ with_nodes(n: 1) do |ports|
141
+ node = Ruster::Node.new("127.0.0.1:#{ports.first}")
142
+
143
+ node.load!
144
+
145
+ assert node.id
146
+ assert_equal [], node.friends
147
+ end
148
+ end
149
+
150
+ test "meet and forget node, a tragic love story" do
151
+ with_nodes(n: 2) do |ports|
152
+ port_a, port_b = ports.to_a
153
+
154
+ # This is the story of two nodes
155
+ joel = Ruster::Node.new("127.0.0.1:#{port_a}")
156
+ clem = Ruster::Node.new("127.0.0.1:#{port_b}")
157
+
158
+ # One day they met for the first time and fell for each other
159
+ joel.meet("127.0.0.1", port_b)
160
+
161
+ # Give the nodes some time to get to know each other
162
+ sleep 0.5
163
+
164
+ joel.load!
165
+ clem.load!
166
+
167
+ assert_equal 1, joel.friends.size
168
+ assert_equal 1, clem.friends.size
169
+
170
+ assert_equal clem.id, joel.friends.first.id
171
+ assert_equal joel.id, clem.friends.first.id
172
+
173
+ # But one tragic afternoon, clem took a terrible decision
174
+ clem.forget(joel)
175
+
176
+ # Give the nodes some time to process their breakup
177
+ sleep 0.5
178
+
179
+ joel.load!
180
+ clem.load!
181
+
182
+ # joel still remembers clem...
183
+ assert_equal 1, joel.friends.size
184
+
185
+ # ...but clem has already moved on
186
+ assert_equal 0, clem.friends.size
187
+
188
+ # joel now decides to use the machine from Eternal sunshine of the spotless mind...
189
+ joel.forget(clem)
190
+
191
+ # ...and after a while, this story ends
192
+ sleep 0.5
193
+
194
+ joel.load!
195
+
196
+ assert_equal 0, joel.friends.size
197
+ end
198
+ end
199
+
200
+ test "replicate/slaves" do
201
+ with_nodes(n: 2) do |ports|
202
+ port_a, port_b = ports.to_a
203
+
204
+ leo = Ruster::Node.new("127.0.0.1:#{port_a}")
205
+ django = Ruster::Node.new("127.0.0.1:#{port_b}")
206
+
207
+ leo.meet("127.0.0.1", port_b)
208
+
209
+ # Give the nodes some time to get to know each other
210
+ sleep 0.5
211
+
212
+ leo.load!
213
+
214
+ django.replicate(leo)
215
+
216
+ # Wait for configuration to update
217
+ sleep 0.5
218
+
219
+ assert_equal 1, leo.slaves.size
220
+
221
+ django.load!
222
+
223
+ assert_equal django.id, leo.slaves.first.id
224
+ end
225
+ end
226
+
227
+ test "allocate, deallocate and flush slots" do
228
+ with_nodes(n: 1) do |ports|
229
+ node = Ruster::Node.new("127.0.0.1:#{ports.first}")
230
+
231
+ # Single slot
232
+ node.add_slots(1024)
233
+
234
+ # Multiple slots
235
+ node.add_slots(2048, 4096)
236
+
237
+ node.load!
238
+
239
+ assert_equal [1024..1024, 2048..2048, 4096..4096], node.slots
240
+
241
+ node.del_slots(1024)
242
+
243
+ node.load!
244
+
245
+ assert_equal [2048..2048, 4096..4096], node.slots
246
+
247
+ node.flush_slots!
248
+
249
+ node.load!
250
+
251
+ assert_equal [], node.slots
252
+ end
253
+ end
254
+ end
255
+ end
@@ -0,0 +1,30 @@
1
+ require_relative "./helper"
2
+
3
+ Protest.describe "Ruster::Util" do
4
+ U = Module.new { extend Ruster::Util }
5
+
6
+ test "parse INFO as hash"do
7
+ info = "cluster_state:fail\r\ncluster_slots_assigned:0\r\ncluster_slots_ok:0\r\ncluster_slots_pfail:0\r\ncluster_slots_fail:0\r\ncluster_known_nodes:1\r\ncluster_size:0\r\ncluster_current_epoch:0\r\ncluster_stats_messages_sent:0\r\ncluster_stats_messages_received:0\r\n"
8
+
9
+ data = U.parse_info(info)
10
+
11
+ assert data.is_a?(Hash)
12
+
13
+ assert_equal "fail", data[:cluster_state]
14
+ assert_equal "0", data[:cluster_slots_assigned]
15
+ assert_equal "0", data[:cluster_slots_ok]
16
+ assert_equal "0", data[:cluster_slots_pfail]
17
+ assert_equal "0", data[:cluster_slots_fail]
18
+ assert_equal "1", data[:cluster_known_nodes]
19
+ assert_equal "0", data[:cluster_size]
20
+ assert_equal "0", data[:cluster_current_epoch]
21
+ assert_equal "0", data[:cluster_stats_messages_sent]
22
+ assert_equal "0", data[:cluster_stats_messages_received]
23
+ end
24
+
25
+ test "ignore comments in INFO parsing" do
26
+ data = U.parse_info("# Cluster\r\ncluster_enabled:1\r\n")
27
+
28
+ assert_equal 1, data.size
29
+ end
30
+ end
metadata CHANGED
@@ -1,41 +1,55 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruster
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.3
4
+ version: 0.0.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Leandro López
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2014-03-15 00:00:00.000000000 Z
11
+ date: 2015-02-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: redic
15
15
  requirement: !ruby/object:Gem::Requirement
16
16
  requirements:
17
- - - '>='
17
+ - - ">="
18
18
  - !ruby/object:Gem::Version
19
19
  version: '0'
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
- - - '>='
24
+ - - ">="
25
25
  - !ruby/object:Gem::Version
26
26
  version: '0'
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: clap
29
29
  requirement: !ruby/object:Gem::Requirement
30
30
  requirements:
31
- - - '>='
31
+ - - ">="
32
32
  - !ruby/object:Gem::Version
33
33
  version: '0'
34
34
  type: :runtime
35
35
  prerelease: false
36
36
  version_requirements: !ruby/object:Gem::Requirement
37
37
  requirements:
38
- - - '>='
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
41
+ - !ruby/object:Gem::Dependency
42
+ name: protest
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - ">="
46
+ - !ruby/object:Gem::Version
47
+ version: '0'
48
+ type: :development
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - ">="
39
53
  - !ruby/object:Gem::Version
40
54
  version: '0'
41
55
  description: Control your Redis Cluster from the command line.
@@ -46,11 +60,19 @@ executables:
46
60
  extensions: []
47
61
  extra_rdoc_files: []
48
62
  files:
49
- - .gitignore
63
+ - ".gitignore"
50
64
  - LICENSE
51
65
  - README.md
52
66
  - bin/ruster
67
+ - lib/ruster.rb
68
+ - lib/ruster/cluster.rb
69
+ - lib/ruster/node.rb
70
+ - lib/ruster/util.rb
53
71
  - ruster.gemspec
72
+ - test/cluster.rb
73
+ - test/helper.rb
74
+ - test/node.rb
75
+ - test/util.rb
54
76
  homepage: http://inkel.github.com/ruster
55
77
  licenses:
56
78
  - MIT
@@ -61,17 +83,17 @@ require_paths:
61
83
  - lib
62
84
  required_ruby_version: !ruby/object:Gem::Requirement
63
85
  requirements:
64
- - - '>='
86
+ - - ">="
65
87
  - !ruby/object:Gem::Version
66
88
  version: '0'
67
89
  required_rubygems_version: !ruby/object:Gem::Requirement
68
90
  requirements:
69
- - - '>='
91
+ - - ">="
70
92
  - !ruby/object:Gem::Version
71
93
  version: '0'
72
94
  requirements: []
73
95
  rubyforge_project:
74
- rubygems_version: 2.0.3
96
+ rubygems_version: 2.2.2
75
97
  signing_key:
76
98
  specification_version: 4
77
99
  summary: A simple Redis Cluster Administration tool