consistent-cluster 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/consistent-cluster/client.rb +38 -0
- data/lib/consistent-cluster/consistent_hashing/red_black_tree.rb +600 -0
- data/lib/consistent-cluster/consistent_hashing/ring.rb +106 -0
- data/lib/consistent-cluster/consistent_hashing/virtual_point.rb +23 -0
- data/lib/consistent-cluster/consistent_hashing.rb +6 -0
- data/lib/consistent-cluster/demo/client_debug.rb +59 -0
- data/lib/consistent-cluster/demo/rbtree_debug.rb +18 -0
- data/lib/consistent-cluster/demo/sync_client_debug.rb +66 -0
- data/lib/consistent-cluster/demo/zk_debug.rb +69 -0
- data/lib/consistent-cluster/sync-client.rb +152 -0
- data/lib/consistent-cluster/version.rb +3 -0
- data/lib/consistent-cluster.rb +2 -0
- metadata +72 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: a4ea10f872a6d170d1827ad5660abb2344a9ec21
|
4
|
+
data.tar.gz: 9f7e1dd8bc5399a5130bb7002b3c21383437b602
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: c910179b9c06d4453a66ce4759a2333ebe1369b4592fb3846f7c90861452bcf0d17f1a59f8e932d74cfbf8434977d5bdac4faa3b5d80f2a115bc83d2cbe320fd
|
7
|
+
data.tar.gz: 02f5644a6729bcda86222fc3d7c3912062455223a03f4507abce2735cac1698128133b412084258edfabe6b155f95b07ab98e3f0b9e087b0ffa250029f9af3cb
|
@@ -0,0 +1,38 @@
|
|
1
|
+
|
2
|
+
require "consistent-cluster/version"
|
3
|
+
|
4
|
+
require "consistent-cluster/consistent_hashing"
|
5
|
+
|
6
|
+
require "json"
|
7
|
+
|
8
|
+
module ConsistentCluster
|
9
|
+
|
10
|
+
class Client
|
11
|
+
|
12
|
+
def initialize(options)
|
13
|
+
|
14
|
+
@cluster = options[:cluster]
|
15
|
+
|
16
|
+
replicas = options[:consistent_hashing_replicas] || 3
|
17
|
+
@ring = ConsistentHashing::Ring.new(@cluster.keys,replicas)
|
18
|
+
|
19
|
+
@shard_num = 0
|
20
|
+
end
|
21
|
+
|
22
|
+
def shard(key=nil)
|
23
|
+
cluster_sum = @cluster.length
|
24
|
+
raise "no service available" if cluster_sum < 1
|
25
|
+
if key
|
26
|
+
point = @ring.point_for(key)
|
27
|
+
server = @cluster[point.node]
|
28
|
+
else
|
29
|
+
@shard_num += 1
|
30
|
+
@shard_num = @shard_num%cluster_sum
|
31
|
+
server = @cluster.values[@shard_num]
|
32
|
+
end
|
33
|
+
server
|
34
|
+
end
|
35
|
+
|
36
|
+
end
|
37
|
+
|
38
|
+
end
|
@@ -0,0 +1,600 @@
|
|
1
|
+
require 'atomic'
|
2
|
+
|
3
|
+
module ConsistentCluster
|
4
|
+
class RedBlackTree
|
5
|
+
include Enumerable
|
6
|
+
|
7
|
+
class Node
|
8
|
+
UNDEFINED = Object.new
|
9
|
+
|
10
|
+
attr_reader :key, :value, :color
|
11
|
+
attr_reader :left, :right
|
12
|
+
|
13
|
+
def initialize(key, value, left, right, color = :RED)
|
14
|
+
@key = key
|
15
|
+
@value = value
|
16
|
+
@left = left
|
17
|
+
@right = right
|
18
|
+
# new node is added as RED
|
19
|
+
@color = color
|
20
|
+
end
|
21
|
+
|
22
|
+
def set_root
|
23
|
+
@color = :BLACK
|
24
|
+
end
|
25
|
+
|
26
|
+
def red?
|
27
|
+
@color == :RED
|
28
|
+
end
|
29
|
+
|
30
|
+
def black?
|
31
|
+
@color == :BLACK
|
32
|
+
end
|
33
|
+
|
34
|
+
def empty?
|
35
|
+
false
|
36
|
+
end
|
37
|
+
|
38
|
+
def size
|
39
|
+
@left.size + 1 + @right.size
|
40
|
+
end
|
41
|
+
|
42
|
+
# inorder
|
43
|
+
def each(&block)
|
44
|
+
@left.each(&block)
|
45
|
+
yield [@key, @value]
|
46
|
+
@right.each(&block)
|
47
|
+
end
|
48
|
+
|
49
|
+
def each_key
|
50
|
+
each do |k, v|
|
51
|
+
yield k
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def each_value
|
56
|
+
each do |k, v|
|
57
|
+
yield v
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def keys
|
62
|
+
collect { |k, v| k }
|
63
|
+
end
|
64
|
+
|
65
|
+
def values
|
66
|
+
collect { |k, v| v }
|
67
|
+
end
|
68
|
+
|
69
|
+
# returns new_root
|
70
|
+
def insert(key, value)
|
71
|
+
ret = self
|
72
|
+
case key <=> @key
|
73
|
+
when -1
|
74
|
+
@left = @left.insert(key, value)
|
75
|
+
if black? and @right.black? and @left.red? and !@left.children_color?(:BLACK)
|
76
|
+
ret = rebalance_for_left_insert
|
77
|
+
end
|
78
|
+
when 0
|
79
|
+
@value = value
|
80
|
+
when 1
|
81
|
+
@right = @right.insert(key, value)
|
82
|
+
if black? and @left.black? and @right.red? and !@right.children_color?(:BLACK)
|
83
|
+
ret = rebalance_for_right_insert
|
84
|
+
end
|
85
|
+
else
|
86
|
+
raise TypeError, "cannot compare #{key} and #{@key} with <=>"
|
87
|
+
end
|
88
|
+
ret.pullup_red
|
89
|
+
end
|
90
|
+
|
91
|
+
# returns value
|
92
|
+
def retrieve(key)
|
93
|
+
case key <=> @key
|
94
|
+
when -1
|
95
|
+
@left.retrieve(key)
|
96
|
+
when 0
|
97
|
+
@value
|
98
|
+
when 1
|
99
|
+
@right.retrieve(key)
|
100
|
+
else
|
101
|
+
nil
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
# returns [deleted_node, new_root, is_rebalance_needed]
|
106
|
+
def delete(key)
|
107
|
+
ret = self
|
108
|
+
case key <=> @key
|
109
|
+
when -1
|
110
|
+
deleted, @left, rebalance = @left.delete(key)
|
111
|
+
if rebalance
|
112
|
+
ret, rebalance = rebalance_for_left_delete
|
113
|
+
end
|
114
|
+
when 0
|
115
|
+
deleted = self
|
116
|
+
ret, rebalance = delete_node
|
117
|
+
when 1
|
118
|
+
deleted, @right, rebalance = @right.delete(key)
|
119
|
+
if rebalance
|
120
|
+
ret, rebalance = rebalance_for_right_delete
|
121
|
+
end
|
122
|
+
else
|
123
|
+
raise TypeError, "cannot compare #{key} and #{@key} with <=>"
|
124
|
+
end
|
125
|
+
[deleted, ret, rebalance]
|
126
|
+
end
|
127
|
+
|
128
|
+
def dump_tree(io, indent = '')
|
129
|
+
@right.dump_tree(io, indent + ' ')
|
130
|
+
io << indent << sprintf("#<%s:0x%010x %s %s> => %s", self.class.name, __id__, @color, @key.inspect, @value.inspect) << $/
|
131
|
+
@left.dump_tree(io, indent + ' ')
|
132
|
+
end
|
133
|
+
|
134
|
+
def dump_sexp
|
135
|
+
left = @left.dump_sexp
|
136
|
+
right = @right.dump_sexp
|
137
|
+
if left or right
|
138
|
+
'(' + [@key, left || '-', right].compact.join(' ') + ')'
|
139
|
+
else
|
140
|
+
@key
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
# for debugging
|
145
|
+
def check_height
|
146
|
+
lh = @left.nil? || @left.empty? ? 0 : @left.check_height
|
147
|
+
rh = @right.nil? || @right.empty? ? 0 : @right.check_height
|
148
|
+
if red?
|
149
|
+
if @left.red? or @right.red?
|
150
|
+
puts dump_tree(STDERR)
|
151
|
+
raise 'red/red assertion failed'
|
152
|
+
end
|
153
|
+
else
|
154
|
+
if lh != rh
|
155
|
+
puts dump_tree(STDERR)
|
156
|
+
raise "black height unbalanced: #{lh} #{rh}"
|
157
|
+
end
|
158
|
+
end
|
159
|
+
(lh > rh ? lh : rh) + (black? ? 1 : 0)
|
160
|
+
end
|
161
|
+
|
162
|
+
protected
|
163
|
+
|
164
|
+
def children_color?(color)
|
165
|
+
@right.color == @left.color && @right.color == color
|
166
|
+
end
|
167
|
+
|
168
|
+
def color=(color)
|
169
|
+
@color = color
|
170
|
+
end
|
171
|
+
|
172
|
+
def left=(left)
|
173
|
+
@left = left
|
174
|
+
end
|
175
|
+
|
176
|
+
def right=(right)
|
177
|
+
@right = right
|
178
|
+
end
|
179
|
+
|
180
|
+
def color_flip(other)
|
181
|
+
@color, other.color = other.color, @color
|
182
|
+
end
|
183
|
+
|
184
|
+
def delete_min
|
185
|
+
if @left.empty?
|
186
|
+
[self, *delete_node]
|
187
|
+
else
|
188
|
+
ret = self
|
189
|
+
deleted, @left, rebalance = @left.delete_min
|
190
|
+
if rebalance
|
191
|
+
ret, rebalance = rebalance_for_left_delete
|
192
|
+
end
|
193
|
+
[deleted, ret, rebalance]
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
# trying to rebalance when the left sub-tree is 1 level lower than the right
|
198
|
+
def rebalance_for_left_delete
|
199
|
+
ret = self
|
200
|
+
rebalance = false
|
201
|
+
if black?
|
202
|
+
if @right.black?
|
203
|
+
if @right.children_color?(:BLACK)
|
204
|
+
# make whole sub-tree 1 level lower and ask rebalance
|
205
|
+
@right.color = :RED
|
206
|
+
rebalance = true
|
207
|
+
else
|
208
|
+
# move 1 black from the right to the left by single/double rotation
|
209
|
+
ret = balanced_rotate_left
|
210
|
+
end
|
211
|
+
else
|
212
|
+
# flip this sub-tree into another type of 3-children node
|
213
|
+
ret = rotate_left
|
214
|
+
# try to rebalance in sub-tree
|
215
|
+
ret.left, rebalance = ret.left.rebalance_for_left_delete
|
216
|
+
raise 'should not happen' if rebalance
|
217
|
+
end
|
218
|
+
else # red
|
219
|
+
if @right.children_color?(:BLACK)
|
220
|
+
# make right sub-tree 1 level lower
|
221
|
+
color_flip(@right)
|
222
|
+
else
|
223
|
+
# move 1 black from the right to the left by single/double rotation
|
224
|
+
ret = balanced_rotate_left
|
225
|
+
end
|
226
|
+
end
|
227
|
+
[ret, rebalance]
|
228
|
+
end
|
229
|
+
|
230
|
+
# trying to rebalance when the right sub-tree is 1 level lower than the left
|
231
|
+
# See rebalance_for_left_delete.
|
232
|
+
def rebalance_for_right_delete
|
233
|
+
ret = self
|
234
|
+
rebalance = false
|
235
|
+
if black?
|
236
|
+
if @left.black?
|
237
|
+
if @left.children_color?(:BLACK)
|
238
|
+
@left.color = :RED
|
239
|
+
rebalance = true
|
240
|
+
else
|
241
|
+
ret = balanced_rotate_right
|
242
|
+
end
|
243
|
+
else
|
244
|
+
ret = rotate_right
|
245
|
+
ret.right, rebalance = ret.right.rebalance_for_right_delete
|
246
|
+
raise 'should not happen' if rebalance
|
247
|
+
end
|
248
|
+
else # red
|
249
|
+
if @left.children_color?(:BLACK)
|
250
|
+
color_flip(@left)
|
251
|
+
else
|
252
|
+
ret = balanced_rotate_right
|
253
|
+
end
|
254
|
+
end
|
255
|
+
[ret, rebalance]
|
256
|
+
end
|
257
|
+
|
258
|
+
# move 1 black from the right to the left by single/double rotation
|
259
|
+
def balanced_rotate_left
|
260
|
+
if @right.left.red? and @right.right.black?
|
261
|
+
@right = @right.rotate_right
|
262
|
+
end
|
263
|
+
ret = rotate_left
|
264
|
+
ret.right.color = ret.left.color = :BLACK
|
265
|
+
ret
|
266
|
+
end
|
267
|
+
|
268
|
+
# move 1 black from the left to the right by single/double rotation
|
269
|
+
def balanced_rotate_right
|
270
|
+
if @left.right.red? and @left.left.black?
|
271
|
+
@left = @left.rotate_left
|
272
|
+
end
|
273
|
+
ret = rotate_right
|
274
|
+
ret.right.color = ret.left.color = :BLACK
|
275
|
+
ret
|
276
|
+
end
|
277
|
+
|
278
|
+
# Right single rotation
|
279
|
+
# (b a (D c E)) where D and E are RED --> (d (B a c) E)
|
280
|
+
#
|
281
|
+
# b d
|
282
|
+
# / \ / \
|
283
|
+
# a D -> B E
|
284
|
+
# / \ / \
|
285
|
+
# c E a c
|
286
|
+
#
|
287
|
+
def rotate_left
|
288
|
+
root = @right
|
289
|
+
@right = root.left
|
290
|
+
root.left = self
|
291
|
+
root.color_flip(root.left)
|
292
|
+
root
|
293
|
+
end
|
294
|
+
|
295
|
+
# Left single rotation
|
296
|
+
# (d (B A c) e) where A and B are RED --> (b A (D c e))
|
297
|
+
#
|
298
|
+
# d b
|
299
|
+
# / \ / \
|
300
|
+
# B e -> A D
|
301
|
+
# / \ / \
|
302
|
+
# A c c e
|
303
|
+
#
|
304
|
+
def rotate_right
|
305
|
+
root = @left
|
306
|
+
@left = root.right
|
307
|
+
root.right = self
|
308
|
+
root.color_flip(root.right)
|
309
|
+
root
|
310
|
+
end
|
311
|
+
|
312
|
+
# Pull up red nodes
|
313
|
+
# (b (A C)) where A and C are RED --> (B (a c))
|
314
|
+
#
|
315
|
+
# b B
|
316
|
+
# / \ -> / \
|
317
|
+
# A C a c
|
318
|
+
#
|
319
|
+
def pullup_red
|
320
|
+
if black? and children_color?(:RED)
|
321
|
+
@left.color = @right.color = :BLACK
|
322
|
+
self.color = :RED
|
323
|
+
end
|
324
|
+
self
|
325
|
+
end
|
326
|
+
|
327
|
+
private
|
328
|
+
|
329
|
+
# trying to rebalance when the left sub-tree is 1 level higher than the right
|
330
|
+
# precondition: self is black and @left is red
|
331
|
+
def rebalance_for_left_insert
|
332
|
+
# move 1 black from the left to the right by single/double rotation
|
333
|
+
if @left.right.red?
|
334
|
+
@left = @left.rotate_left
|
335
|
+
end
|
336
|
+
rotate_right
|
337
|
+
end
|
338
|
+
|
339
|
+
# trying to rebalance when the right sub-tree is 1 level higher than the left
|
340
|
+
# See rebalance_for_left_insert.
|
341
|
+
def rebalance_for_right_insert
|
342
|
+
if @right.left.red?
|
343
|
+
@right = @right.rotate_right
|
344
|
+
end
|
345
|
+
rotate_left
|
346
|
+
end
|
347
|
+
|
348
|
+
def delete_node
|
349
|
+
rebalance = false
|
350
|
+
if @left.empty? and @right.empty?
|
351
|
+
# just remove this node and ask rebalance to the parent
|
352
|
+
new_root = EMPTY
|
353
|
+
if black?
|
354
|
+
rebalance = true
|
355
|
+
end
|
356
|
+
elsif @left.empty? or @right.empty?
|
357
|
+
# pick the single children
|
358
|
+
new_root = @left.empty? ? @right : @left
|
359
|
+
if black?
|
360
|
+
# keep the color black
|
361
|
+
raise 'should not happen' unless new_root.red?
|
362
|
+
color_flip(new_root)
|
363
|
+
else
|
364
|
+
# just remove the red node
|
365
|
+
end
|
366
|
+
else
|
367
|
+
# pick the minimum node from the right sub-tree and replace self with it
|
368
|
+
deleted, @right, rebalance = @right.delete_min
|
369
|
+
new_root = Node.new(deleted.key, deleted.value, @left, @right, @color)
|
370
|
+
if rebalance
|
371
|
+
new_root, rebalance = new_root.rebalance_for_right_delete
|
372
|
+
end
|
373
|
+
end
|
374
|
+
[new_root, rebalance]
|
375
|
+
end
|
376
|
+
|
377
|
+
def collect
|
378
|
+
pool = []
|
379
|
+
each do |key, value|
|
380
|
+
pool << yield(key, value)
|
381
|
+
end
|
382
|
+
pool
|
383
|
+
end
|
384
|
+
|
385
|
+
class EmptyNode < Node
|
386
|
+
def initialize
|
387
|
+
@value = nil
|
388
|
+
@color = :BLACK
|
389
|
+
end
|
390
|
+
|
391
|
+
def empty?
|
392
|
+
true
|
393
|
+
end
|
394
|
+
|
395
|
+
def size
|
396
|
+
0
|
397
|
+
end
|
398
|
+
|
399
|
+
def each(&block)
|
400
|
+
# intentionally blank
|
401
|
+
end
|
402
|
+
|
403
|
+
# returns new_root
|
404
|
+
def insert(key, value)
|
405
|
+
Node.new(key, value, self, self)
|
406
|
+
end
|
407
|
+
|
408
|
+
# returns value
|
409
|
+
def retrieve(key)
|
410
|
+
UNDEFINED
|
411
|
+
end
|
412
|
+
|
413
|
+
# returns [deleted_node, new_root, is_rebalance_needed]
|
414
|
+
def delete(key)
|
415
|
+
[self, self, false]
|
416
|
+
end
|
417
|
+
|
418
|
+
def dump_tree(io, indent = '')
|
419
|
+
# intentionally blank
|
420
|
+
end
|
421
|
+
|
422
|
+
def dump_sexp
|
423
|
+
# intentionally blank
|
424
|
+
end
|
425
|
+
end
|
426
|
+
EMPTY = Node::EmptyNode.new.freeze
|
427
|
+
end
|
428
|
+
|
429
|
+
DEFAULT = Object.new
|
430
|
+
|
431
|
+
attr_accessor :default
|
432
|
+
attr_reader :default_proc
|
433
|
+
|
434
|
+
def initialize(default = DEFAULT, &block)
|
435
|
+
if block && default != DEFAULT
|
436
|
+
raise ArgumentError, 'wrong number of arguments'
|
437
|
+
end
|
438
|
+
@root = Node::EMPTY
|
439
|
+
@default = default
|
440
|
+
@default_proc = block
|
441
|
+
end
|
442
|
+
|
443
|
+
def root
|
444
|
+
@root
|
445
|
+
end
|
446
|
+
|
447
|
+
def empty?
|
448
|
+
root == Node::EMPTY
|
449
|
+
end
|
450
|
+
|
451
|
+
def size
|
452
|
+
root.size
|
453
|
+
end
|
454
|
+
alias length size
|
455
|
+
|
456
|
+
def each(&block)
|
457
|
+
if block_given?
|
458
|
+
root.each(&block)
|
459
|
+
self
|
460
|
+
else
|
461
|
+
Enumerator.new(root)
|
462
|
+
end
|
463
|
+
end
|
464
|
+
alias each_pair each
|
465
|
+
|
466
|
+
def each_key
|
467
|
+
if block_given?
|
468
|
+
root.each do |k, v|
|
469
|
+
yield k
|
470
|
+
end
|
471
|
+
self
|
472
|
+
else
|
473
|
+
Enumerator.new(root, :each_key)
|
474
|
+
end
|
475
|
+
end
|
476
|
+
|
477
|
+
def each_value
|
478
|
+
if block_given?
|
479
|
+
root.each do |k, v|
|
480
|
+
yield v
|
481
|
+
end
|
482
|
+
self
|
483
|
+
else
|
484
|
+
Enumerator.new(root, :each_value)
|
485
|
+
end
|
486
|
+
end
|
487
|
+
|
488
|
+
def keys
|
489
|
+
root.keys
|
490
|
+
end
|
491
|
+
|
492
|
+
def values
|
493
|
+
root.values
|
494
|
+
end
|
495
|
+
|
496
|
+
def clear
|
497
|
+
@root = Node::EMPTY
|
498
|
+
end
|
499
|
+
|
500
|
+
def []=(key, value)
|
501
|
+
@root = @root.insert(key, value)
|
502
|
+
@root.set_root
|
503
|
+
@root.check_height if $DEBUG
|
504
|
+
end
|
505
|
+
alias insert []=
|
506
|
+
|
507
|
+
def key?(key)
|
508
|
+
root.retrieve(key) != Node::UNDEFINED
|
509
|
+
end
|
510
|
+
alias has_key? key?
|
511
|
+
|
512
|
+
def [](key)
|
513
|
+
value = @root.retrieve(key)
|
514
|
+
if value == Node::UNDEFINED
|
515
|
+
default_value
|
516
|
+
else
|
517
|
+
value
|
518
|
+
end
|
519
|
+
end
|
520
|
+
|
521
|
+
def delete(key)
|
522
|
+
deleted, @root, rebalance = @root.delete(key)
|
523
|
+
unless empty?
|
524
|
+
@root.set_root
|
525
|
+
@root.check_height if $DEBUG
|
526
|
+
end
|
527
|
+
deleted.value
|
528
|
+
end
|
529
|
+
|
530
|
+
def dump_tree(io = '')
|
531
|
+
root.dump_tree(io)
|
532
|
+
io << $/
|
533
|
+
io
|
534
|
+
end
|
535
|
+
|
536
|
+
def dump_sexp
|
537
|
+
root.dump_sexp || ''
|
538
|
+
end
|
539
|
+
|
540
|
+
def to_hash
|
541
|
+
inject({}) { |r, (k, v)| r[k] = v; r }
|
542
|
+
end
|
543
|
+
|
544
|
+
def minimum_pair()
|
545
|
+
# Return the key with the smallest key value.
|
546
|
+
return nil if @root.empty?
|
547
|
+
|
548
|
+
current_node = @root
|
549
|
+
while not current_node.left.empty?
|
550
|
+
current_node = current_node.left
|
551
|
+
end
|
552
|
+
|
553
|
+
[current_node.key, current_node.value]
|
554
|
+
end
|
555
|
+
|
556
|
+
def next_gte_pair(key)
|
557
|
+
# Returns the key/value pair with a key that follows the provided key in
|
558
|
+
# sorted order.
|
559
|
+
node = next_gte_node(@root, key)
|
560
|
+
[node.key, node.value] if not node.empty?
|
561
|
+
end
|
562
|
+
|
563
|
+
protected
|
564
|
+
|
565
|
+
def next_gte_node(node, key)
|
566
|
+
return RedBlackTree::Node::EMPTY if node.empty?
|
567
|
+
|
568
|
+
if key < node.key
|
569
|
+
# The current key qualifies as after the provided key. However, we need
|
570
|
+
# to check the tree on the left to see if there's a key in there also
|
571
|
+
# greater than the provided key but less than the current key.
|
572
|
+
after = next_gte_node(node.left, key)
|
573
|
+
after = node if after.empty?
|
574
|
+
elsif key > node.key
|
575
|
+
# The current key will not be after the provided key, but something
|
576
|
+
# in the right branch maybe. Check the right branch for the first key
|
577
|
+
# larger than our value.
|
578
|
+
after = next_gte_node(node.right, key)
|
579
|
+
elsif node.key == key
|
580
|
+
# An exact match qualifies as the next largest node.
|
581
|
+
after = node
|
582
|
+
end
|
583
|
+
|
584
|
+
return after
|
585
|
+
end
|
586
|
+
|
587
|
+
private
|
588
|
+
|
589
|
+
def default_value
|
590
|
+
if @default != DEFAULT
|
591
|
+
@default
|
592
|
+
elsif @default_proc
|
593
|
+
@default_proc.call
|
594
|
+
else
|
595
|
+
nil
|
596
|
+
end
|
597
|
+
end
|
598
|
+
end
|
599
|
+
|
600
|
+
end
|
@@ -0,0 +1,106 @@
|
|
1
|
+
|
2
|
+
require 'digest/md5'
|
3
|
+
#require 'zlib'
|
4
|
+
|
5
|
+
module ConsistentCluster
|
6
|
+
|
7
|
+
module ConsistentHashing
|
8
|
+
|
9
|
+
# Public: the hash ring containing all configured nodes
|
10
|
+
#
|
11
|
+
class Ring
|
12
|
+
|
13
|
+
# Public: returns a new ring object
|
14
|
+
def initialize(nodes = [], replicas = 3)
|
15
|
+
@replicas = replicas
|
16
|
+
@ring = RedBlackTree.new
|
17
|
+
|
18
|
+
nodes.each { |node| add(node) }
|
19
|
+
end
|
20
|
+
|
21
|
+
# Public: returns the (virtual) points in the hash ring
|
22
|
+
#
|
23
|
+
# Returns: a Fixnum
|
24
|
+
def length
|
25
|
+
@ring.length
|
26
|
+
end
|
27
|
+
|
28
|
+
# Public: adds a new node into the hash ring
|
29
|
+
#
|
30
|
+
def add(node)
|
31
|
+
@replicas.times do |i|
|
32
|
+
# generate the key of this (virtual) point in the hash
|
33
|
+
key = hash_key(node, i)
|
34
|
+
|
35
|
+
@ring[key] = VirtualPoint.new(node, key)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
# Public: adds a new node into the hash ring like `add` but returns
|
40
|
+
# a reference to the ring to be used as a fluent interface
|
41
|
+
#
|
42
|
+
def <<(node)
|
43
|
+
add(node)
|
44
|
+
self
|
45
|
+
end
|
46
|
+
|
47
|
+
# Public: removes a node from the hash ring
|
48
|
+
#
|
49
|
+
def delete(node)
|
50
|
+
@replicas.times do |i|
|
51
|
+
key = hash_key(node, i)
|
52
|
+
|
53
|
+
@ring.delete key
|
54
|
+
end
|
55
|
+
|
56
|
+
self
|
57
|
+
end
|
58
|
+
|
59
|
+
# Public: gets the point for an arbitrary key
|
60
|
+
#
|
61
|
+
#
|
62
|
+
def point_for(key)
|
63
|
+
return nil if @ring.empty?
|
64
|
+
key = hash_key(key)
|
65
|
+
_, value = @ring.next_gte_pair(key)
|
66
|
+
_, value = @ring.minimum_pair unless value
|
67
|
+
value
|
68
|
+
end
|
69
|
+
|
70
|
+
# Public: gets the node where to store the key
|
71
|
+
#
|
72
|
+
# Returns: the node Object
|
73
|
+
def node_for(key)
|
74
|
+
point_for(key).node
|
75
|
+
end
|
76
|
+
|
77
|
+
# Public: get all nodes in the ring
|
78
|
+
#
|
79
|
+
# Returns: an Array of the nodes in the ring
|
80
|
+
def nodes
|
81
|
+
nodes = points.map { |point| point.node }
|
82
|
+
nodes.uniq
|
83
|
+
end
|
84
|
+
|
85
|
+
# Public: gets all points in the ring
|
86
|
+
#
|
87
|
+
# Returns: an Array of the points in the ring
|
88
|
+
def points
|
89
|
+
@ring.map { |point| point[1] }
|
90
|
+
end
|
91
|
+
|
92
|
+
protected
|
93
|
+
|
94
|
+
# Internal: hashes the key
|
95
|
+
#
|
96
|
+
# Returns: a String
|
97
|
+
def hash_key(key, index = nil)
|
98
|
+
key = "#{key}:#{index}" if index
|
99
|
+
#value = Zlib::crc32(key.to_s)
|
100
|
+
value = Digest::MD5.hexdigest(key.to_s)[0..16].hex
|
101
|
+
value
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
|
2
|
+
module ConsistentCluster
|
3
|
+
|
4
|
+
module ConsistentHashing
|
5
|
+
|
6
|
+
# Public: represents a virtual point on the hash ring
|
7
|
+
#
|
8
|
+
class VirtualPoint
|
9
|
+
attr_reader :node, :index
|
10
|
+
|
11
|
+
def initialize(node, index)
|
12
|
+
@node = node
|
13
|
+
@index = index.to_i
|
14
|
+
end
|
15
|
+
|
16
|
+
# Public: set a new index for the virtual point. Useful if the point gets duplicated
|
17
|
+
def index=(index)
|
18
|
+
@index = index.to_i
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
|
2
|
+
require "consistent-cluster/client"
|
3
|
+
|
4
|
+
require "thrift_client"
|
5
|
+
|
6
|
+
require "happy_profile/client/thrift" #thrift client example
|
7
|
+
|
8
|
+
cluster = {}
|
9
|
+
|
10
|
+
["127.0.0.1:9091","127.0.0.1:9092","127.0.0.1:9093"].each do |iport|
|
11
|
+
cluster[iport] = ThriftClient.new("servers" => iport,
|
12
|
+
"multiplexed" => false,
|
13
|
+
"protocol" => 'binary',
|
14
|
+
"transport" => 'socket',
|
15
|
+
"framed" => false,
|
16
|
+
"disconnect_exception_classes" => '',
|
17
|
+
"application_exception_classes" => '',
|
18
|
+
"size" => 1,
|
19
|
+
"timeout" => 12,
|
20
|
+
"client_class" => "HappyProfile::HappyProfileThriftService::Client",
|
21
|
+
"test_on_borrow" => true,
|
22
|
+
"pool_timeout" => 12)
|
23
|
+
end
|
24
|
+
|
25
|
+
all_config = {
|
26
|
+
consistent_hashing_replicas: 100,
|
27
|
+
cluster: cluster
|
28
|
+
}
|
29
|
+
|
30
|
+
profile_services = ConsistentCluster::Client.new(all_config)
|
31
|
+
|
32
|
+
while true
|
33
|
+
|
34
|
+
sleep 1
|
35
|
+
|
36
|
+
key = rand(10000)
|
37
|
+
|
38
|
+
2.times do
|
39
|
+
begin
|
40
|
+
profile_services.shard(key).ping.inspect
|
41
|
+
rescue Exception => boom
|
42
|
+
puts boom.message
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
end
|
47
|
+
|
48
|
+
# while true
|
49
|
+
|
50
|
+
# sleep 1
|
51
|
+
|
52
|
+
# begin
|
53
|
+
# profile_services.shard.ping.inspect
|
54
|
+
# rescue Exception => boom
|
55
|
+
# puts boom.message
|
56
|
+
# end
|
57
|
+
|
58
|
+
# end
|
59
|
+
|
@@ -0,0 +1,18 @@
|
|
1
|
+
|
2
|
+
require "consistent-cluster"
|
3
|
+
|
4
|
+
profile_servers_map = {
|
5
|
+
x1: {ip: "192.168.1.136", port: "8081"},
|
6
|
+
x2: {ip: "192.168.1.136", port: "8081"},
|
7
|
+
x3: {ip: "192.168.1.136", port: "8081"},
|
8
|
+
x4: {ip: "192.168.1.136", port: "8081"}
|
9
|
+
}
|
10
|
+
|
11
|
+
nodes = profile_servers_map.keys
|
12
|
+
|
13
|
+
ch_ring = ConsistentCluster::ConsistentHashing::Ring.new(nodes,10)
|
14
|
+
|
15
|
+
|
16
|
+
(1..100).each do |key|
|
17
|
+
p ch_ring.point_for(key)
|
18
|
+
end
|
@@ -0,0 +1,66 @@
|
|
1
|
+
|
2
|
+
require "consistent-cluster/sync-client"
|
3
|
+
|
4
|
+
require "thrift_client"
|
5
|
+
|
6
|
+
require "happy_profile/client/thrift" #thrift client example
|
7
|
+
|
8
|
+
require "json"
|
9
|
+
|
10
|
+
all_config = {
|
11
|
+
consistent_hashing_replicas: 100,
|
12
|
+
create_proc: Proc.new { |zk_content|
|
13
|
+
app_info = JSON.parse(zk_content)
|
14
|
+
ThriftClient.new("servers" => "#{app_info["host"]}:#{app_info["port"]}",
|
15
|
+
"multiplexed" => app_info["serviceNames"].length > 1,
|
16
|
+
"protocol" => 'binary',
|
17
|
+
"transport" => 'socket',
|
18
|
+
"framed" => false,
|
19
|
+
"disconnect_exception_classes" => '',
|
20
|
+
"application_exception_classes" => '',
|
21
|
+
"size" => 1,
|
22
|
+
"timeout" => 12,
|
23
|
+
"client_class" => "HappyProfile::HappyProfileThriftService::Client",
|
24
|
+
"test_on_borrow" => true,
|
25
|
+
"pool_timeout" => 12)
|
26
|
+
},
|
27
|
+
destroy_proc: Proc.new { |client|
|
28
|
+
client.destroy
|
29
|
+
},
|
30
|
+
after_sync_proc: Proc.new { |info|
|
31
|
+
puts info.inspect
|
32
|
+
},
|
33
|
+
zookeeper_service: "127.0.0.1:2181",
|
34
|
+
zookeeper_path: "/test"
|
35
|
+
}
|
36
|
+
|
37
|
+
profile_services = ConsistentCluster::SyncClient.new(all_config)
|
38
|
+
|
39
|
+
while true
|
40
|
+
|
41
|
+
sleep 1
|
42
|
+
|
43
|
+
key = rand(10000)
|
44
|
+
|
45
|
+
2.times do
|
46
|
+
begin
|
47
|
+
profile_services.shard(key).ping.inspect
|
48
|
+
rescue Exception => boom
|
49
|
+
puts boom.message
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
end
|
54
|
+
|
55
|
+
# while true
|
56
|
+
|
57
|
+
# sleep 1
|
58
|
+
|
59
|
+
# begin
|
60
|
+
# profile_services.shard.ping.inspect
|
61
|
+
# rescue Exception => boom
|
62
|
+
# puts boom.message
|
63
|
+
# end
|
64
|
+
|
65
|
+
# end
|
66
|
+
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require 'zk'
|
2
|
+
require 'json'
|
3
|
+
|
4
|
+
$zk = ZK.new("127.0.0.1:2181")
|
5
|
+
|
6
|
+
$path = "/test"
|
7
|
+
|
8
|
+
def create(hash)
|
9
|
+
app_name = hash[:id]
|
10
|
+
|
11
|
+
$zk.create("#{$path}/#{app_name}",hash.to_json)
|
12
|
+
end
|
13
|
+
|
14
|
+
def set(hash)
|
15
|
+
app_name = hash[:id]
|
16
|
+
$zk.set("#{$path}/#{app_name}",hash.to_json)
|
17
|
+
end
|
18
|
+
|
19
|
+
def delete(app)
|
20
|
+
$zk.delete("#{$path}/#{app}")
|
21
|
+
end
|
22
|
+
|
23
|
+
def clear
|
24
|
+
apps = $zk.children($path)
|
25
|
+
apps.each do |app|
|
26
|
+
delete(app)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
|
31
|
+
app1 = {
|
32
|
+
group: "uts",
|
33
|
+
host: "127.0.0.1",
|
34
|
+
port: "9091",
|
35
|
+
id: "127.0.0.1:9091",
|
36
|
+
protocolType: "thrift",
|
37
|
+
serviceNames: ["com.ximalaya.service.uts.api.thrift.IUserTrackRecordServiceHandler$Iface"]
|
38
|
+
}
|
39
|
+
|
40
|
+
app2 = {
|
41
|
+
group: "uts",
|
42
|
+
host: "127.0.0.1",
|
43
|
+
port: "9092",
|
44
|
+
id: "127.0.0.1:9092",
|
45
|
+
protocolType: "thrift",
|
46
|
+
serviceNames: ["com.ximalaya.service.uts.api.thrift.IUserTrackRecordServiceHandler$Iface"]
|
47
|
+
}
|
48
|
+
|
49
|
+
app3 = {
|
50
|
+
group: "uts",
|
51
|
+
host: "127.0.0.1",
|
52
|
+
port: "9093",
|
53
|
+
id: "127.0.0.1:9093",
|
54
|
+
protocolType: "thrift",
|
55
|
+
serviceNames: ["com.ximalaya.service.uts.api.thrift.IUserTrackRecordServiceHandler$Iface"]
|
56
|
+
}
|
57
|
+
|
58
|
+
clear
|
59
|
+
|
60
|
+
create(app1)
|
61
|
+
|
62
|
+
create(app2)
|
63
|
+
|
64
|
+
create(app3)
|
65
|
+
|
66
|
+
#set(app2)
|
67
|
+
|
68
|
+
|
69
|
+
|
@@ -0,0 +1,152 @@
|
|
1
|
+
|
2
|
+
require "consistent-cluster/version"
|
3
|
+
|
4
|
+
require "consistent-cluster/consistent_hashing"
|
5
|
+
|
6
|
+
gem "zk", "~> 1.9.5" #gem本身不默认依赖zk,此处补充依赖
|
7
|
+
|
8
|
+
require "zk"
|
9
|
+
|
10
|
+
module ConsistentCluster
|
11
|
+
|
12
|
+
class SyncClient
|
13
|
+
|
14
|
+
def initialize(options)
|
15
|
+
@zk = ZK.new(options[:zookeeper_service])
|
16
|
+
@path = options[:zookeeper_path]
|
17
|
+
|
18
|
+
@data = {}
|
19
|
+
@cluster = {}
|
20
|
+
|
21
|
+
replicas = options[:consistent_hashing_replicas] || 3
|
22
|
+
@ring = ConsistentHashing::Ring.new([],replicas)
|
23
|
+
|
24
|
+
@create_proc = options[:create_proc]
|
25
|
+
@destroy_proc = options[:destroy_proc]
|
26
|
+
@after_sync_proc = options[:after_sync_proc]
|
27
|
+
|
28
|
+
@to_sync,@syncing = false,false
|
29
|
+
|
30
|
+
@zk.register(@path) do |event|
|
31
|
+
sync_services
|
32
|
+
end
|
33
|
+
|
34
|
+
sync_services
|
35
|
+
|
36
|
+
@shard_num = 0
|
37
|
+
end
|
38
|
+
|
39
|
+
def shard(key=nil)
|
40
|
+
cluster_sum = @cluster.length
|
41
|
+
raise "no service available" if cluster_sum < 1
|
42
|
+
if key
|
43
|
+
point = @ring.point_for(key)
|
44
|
+
server = @cluster[point.node]
|
45
|
+
else
|
46
|
+
@shard_num += 1
|
47
|
+
@shard_num = @shard_num%cluster_sum
|
48
|
+
server = @cluster.values[@shard_num]
|
49
|
+
end
|
50
|
+
server
|
51
|
+
end
|
52
|
+
|
53
|
+
protected
|
54
|
+
|
55
|
+
def sync_services
|
56
|
+
@to_sync = true
|
57
|
+
if !@syncing
|
58
|
+
syncing_process
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def syncing_process
|
63
|
+
@syncing = true
|
64
|
+
while @to_sync
|
65
|
+
@to_sync = false
|
66
|
+
app_names = sync_children
|
67
|
+
current_app_names = @cluster.keys
|
68
|
+
|
69
|
+
to_update = current_app_names&app_names
|
70
|
+
to_create = app_names - current_app_names
|
71
|
+
to_destroy = current_app_names - app_names
|
72
|
+
|
73
|
+
to_update.each do |app_name|
|
74
|
+
update_service(app_name)
|
75
|
+
end
|
76
|
+
|
77
|
+
to_create.each do |app_name|
|
78
|
+
create_service(app_name)
|
79
|
+
end
|
80
|
+
|
81
|
+
to_destroy.each do |app_name|
|
82
|
+
destroy_service(app_name)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
@syncing = false
|
86
|
+
if @after_sync_proc
|
87
|
+
clone_data = Marshal.load(Marshal.dump(@data)) #avoid change outside
|
88
|
+
@after_sync_proc.call(clone_data)
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
def sync_children
|
93
|
+
@zk.children(@path, watch: true)
|
94
|
+
end
|
95
|
+
|
96
|
+
def create_service(app_name)
|
97
|
+
|
98
|
+
app_content = get_app_content(app_name)
|
99
|
+
|
100
|
+
server = @create_proc.call(app_content)
|
101
|
+
|
102
|
+
@data[app_name] = app_content
|
103
|
+
|
104
|
+
@cluster[app_name] = server
|
105
|
+
|
106
|
+
@ring.add(app_name)
|
107
|
+
|
108
|
+
app_path = "#{@path}/#{app_name}"
|
109
|
+
@zk.get(app_path, watch: true)
|
110
|
+
rescue Exception => boom
|
111
|
+
puts "sync create :#{app_name} raise #{boom.class} - #{boom.message}"
|
112
|
+
end
|
113
|
+
|
114
|
+
def destroy_service(app_name)
|
115
|
+
return if app_name.to_s.empty?
|
116
|
+
@ring.delete(app_name)
|
117
|
+
|
118
|
+
if server = @cluster[app_name]
|
119
|
+
@data.delete(app_name)
|
120
|
+
@cluster.delete(app_name)
|
121
|
+
if @destroy_proc
|
122
|
+
@destroy_proc.call(server)
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
rescue Exception => boom
|
127
|
+
puts "sync destroy :#{app_name} raise #{boom.class} - #{boom.message}"
|
128
|
+
end
|
129
|
+
|
130
|
+
def update_service(app_name)
|
131
|
+
return if app_name.to_s.empty?
|
132
|
+
|
133
|
+
app_content = get_app_content(app_name)
|
134
|
+
|
135
|
+
cache_info = @data[app_name]
|
136
|
+
if cache_info != app_content
|
137
|
+
destroy_service(app_name)
|
138
|
+
create_service(app_name)
|
139
|
+
end
|
140
|
+
rescue Exception => boom
|
141
|
+
puts "sync update :#{app_name} raise #{boom.class} - #{boom.message}"
|
142
|
+
end
|
143
|
+
|
144
|
+
def get_app_content(app_name)
|
145
|
+
app_path = "#{@path}/#{app_name}"
|
146
|
+
content = @zk.get(app_path).first
|
147
|
+
content
|
148
|
+
end
|
149
|
+
|
150
|
+
end
|
151
|
+
|
152
|
+
end
|
metadata
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: consistent-cluster
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- jeffrey6052
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2015-03-26 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: atomic
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: 1.1.99
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: 1.1.99
|
27
|
+
description: |2-
|
28
|
+
|
29
|
+
红黑树,一致性Hash算法,Hash环
|
30
|
+
支持zookeeper同步(依赖gem: zk)
|
31
|
+
email:
|
32
|
+
- jeffrey6052@163.com
|
33
|
+
executables: []
|
34
|
+
extensions: []
|
35
|
+
extra_rdoc_files: []
|
36
|
+
files:
|
37
|
+
- lib/consistent-cluster.rb
|
38
|
+
- lib/consistent-cluster/client.rb
|
39
|
+
- lib/consistent-cluster/consistent_hashing.rb
|
40
|
+
- lib/consistent-cluster/consistent_hashing/red_black_tree.rb
|
41
|
+
- lib/consistent-cluster/consistent_hashing/ring.rb
|
42
|
+
- lib/consistent-cluster/consistent_hashing/virtual_point.rb
|
43
|
+
- lib/consistent-cluster/demo/client_debug.rb
|
44
|
+
- lib/consistent-cluster/demo/rbtree_debug.rb
|
45
|
+
- lib/consistent-cluster/demo/sync_client_debug.rb
|
46
|
+
- lib/consistent-cluster/demo/zk_debug.rb
|
47
|
+
- lib/consistent-cluster/sync-client.rb
|
48
|
+
- lib/consistent-cluster/version.rb
|
49
|
+
homepage: ''
|
50
|
+
licenses: []
|
51
|
+
metadata: {}
|
52
|
+
post_install_message:
|
53
|
+
rdoc_options: []
|
54
|
+
require_paths:
|
55
|
+
- lib
|
56
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - ">="
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: '0'
|
61
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
62
|
+
requirements:
|
63
|
+
- - ">="
|
64
|
+
- !ruby/object:Gem::Version
|
65
|
+
version: '0'
|
66
|
+
requirements: []
|
67
|
+
rubyforge_project:
|
68
|
+
rubygems_version: 2.4.5
|
69
|
+
signing_key:
|
70
|
+
specification_version: 4
|
71
|
+
summary: "用于打包集群服务接口,方便客户端调用"
|
72
|
+
test_files: []
|