redis_ring 0.0.2 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +3 -0
- data/Gemfile.lock +9 -3
- data/config/redis.conf.erb +4 -6
- data/lib/redis_ring/application.rb +29 -29
- data/lib/redis_ring/background_thread.rb +45 -0
- data/lib/redis_ring/cli.rb +3 -6
- data/lib/redis_ring/configuration.rb +2 -1
- data/lib/redis_ring/http_client.rb +23 -0
- data/lib/redis_ring/master.rb +154 -0
- data/lib/redis_ring/master_rpc.rb +33 -0
- data/lib/redis_ring/node.rb +66 -0
- data/lib/redis_ring/process_manager.rb +33 -19
- data/lib/redis_ring/shard_config.rb +18 -0
- data/lib/redis_ring/slave.rb +62 -0
- data/lib/redis_ring/slave_rpc.rb +42 -0
- data/lib/redis_ring/version.rb +1 -1
- data/lib/redis_ring/web_interface.rb +63 -2
- data/lib/redis_ring/zookeeper_connection.rb +73 -0
- data/lib/redis_ring/zookeeper_observer.rb +47 -0
- data/lib/redis_ring.rb +11 -0
- data/redis_ring.gemspec +2 -0
- data/spec/cluster_builder.rb +224 -0
- data/spec/fakes/fake_http_client.rb +39 -0
- data/spec/fakes/fake_master_rpc.rb +20 -0
- data/spec/fakes/fake_node_provider.rb +2 -0
- data/spec/fakes/fake_process_manager.rb +19 -0
- data/spec/fakes/fake_slave_rpc.rb +36 -0
- data/spec/fakes/fake_zookeeper_connection.rb +2 -0
- data/spec/redis_ring/application_spec.rb +15 -13
- data/spec/redis_ring/master_rpc_spec.rb +20 -0
- data/spec/redis_ring/master_spec.rb +174 -0
- data/spec/redis_ring/node_spec.rb +53 -0
- data/spec/redis_ring/slave_rpc_spec.rb +46 -0
- data/spec/redis_ring/slave_spec.rb +81 -0
- data/spec/spec_helper.rb +20 -1
- data/spec/test.conf +3 -0
- metadata +54 -6
data/.gitignore
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,10 +1,11 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
redis_ring (0.0.
|
4
|
+
redis_ring (0.0.2)
|
5
5
|
daemons-mikehale
|
6
6
|
json
|
7
7
|
sinatra
|
8
|
+
zookeeper
|
8
9
|
|
9
10
|
GEM
|
10
11
|
remote: http://rubygems.org/
|
@@ -13,7 +14,7 @@ GEM
|
|
13
14
|
diff-lcs (1.1.2)
|
14
15
|
json (1.5.1)
|
15
16
|
mocha (0.9.12)
|
16
|
-
rack (1.2.
|
17
|
+
rack (1.2.2)
|
17
18
|
rspec (2.5.0)
|
18
19
|
rspec-core (~> 2.5.0)
|
19
20
|
rspec-expectations (~> 2.5.0)
|
@@ -22,10 +23,14 @@ GEM
|
|
22
23
|
rspec-expectations (2.5.0)
|
23
24
|
diff-lcs (~> 1.1.2)
|
24
25
|
rspec-mocks (2.5.0)
|
25
|
-
|
26
|
+
simplecov (0.4.1)
|
27
|
+
simplecov-html (~> 0.4.3)
|
28
|
+
simplecov-html (0.4.3)
|
29
|
+
sinatra (1.2.1)
|
26
30
|
rack (~> 1.1)
|
27
31
|
tilt (>= 1.2.2, < 2.0)
|
28
32
|
tilt (1.2.2)
|
33
|
+
zookeeper (0.4.3)
|
29
34
|
|
30
35
|
PLATFORMS
|
31
36
|
ruby
|
@@ -34,3 +39,4 @@ DEPENDENCIES
|
|
34
39
|
mocha
|
35
40
|
redis_ring!
|
36
41
|
rspec
|
42
|
+
simplecov
|
data/config/redis.conf.erb
CHANGED
@@ -20,18 +20,16 @@ loglevel notice
|
|
20
20
|
|
21
21
|
databases 2048
|
22
22
|
|
23
|
-
save 900 1
|
24
|
-
save 300 10
|
25
|
-
save 60 10000
|
23
|
+
# save 900 1
|
24
|
+
# save 300 10
|
25
|
+
# save 60 10000
|
26
26
|
|
27
27
|
rdbcompression yes
|
28
28
|
|
29
29
|
# maxclients 128
|
30
30
|
# maxmemory <bytes>
|
31
31
|
|
32
|
-
appendonly
|
33
|
-
|
34
|
-
# The name of the append only file (default: "appendonly.aof")
|
32
|
+
appendonly yes
|
35
33
|
|
36
34
|
# appendfsync always
|
37
35
|
appendfsync everysec
|
@@ -2,50 +2,50 @@ module RedisRing
|
|
2
2
|
|
3
3
|
class Application
|
4
4
|
|
5
|
-
attr_reader :shards, :configuration, :process_manager
|
5
|
+
attr_reader :shards, :configuration, :process_manager, :zookeeper_observer, :master, :slave, :zookeeper_connection, :master_rpc, :http_client, :node_provider, :slave_rpc
|
6
6
|
|
7
|
-
def initialize(
|
8
|
-
@configuration =
|
7
|
+
def initialize(config)
|
8
|
+
@configuration = config
|
9
9
|
@process_manager = ProcessManager.new
|
10
|
-
@
|
10
|
+
@http_client = HttpClient.new
|
11
|
+
@master_rpc = MasterRPC.new(http_client)
|
12
|
+
@slave_rpc = SlaveRPC.new(http_client)
|
13
|
+
@node_provider = NodeProvider.new(slave_rpc)
|
14
|
+
@zookeeper_connection = ZookeeperConnection.new(config.host_name,
|
15
|
+
config.base_port,
|
16
|
+
config.zookeeper_address)
|
17
|
+
@master = Master.new(zookeeper_connection, config.ring_size, node_provider)
|
18
|
+
@slave = Slave.new(configuration, master_rpc, process_manager)
|
19
|
+
@zookeeper_observer = ZookeeperObserver.new(zookeeper_connection, master, slave)
|
20
|
+
@web_interface_runner = WebInterfaceRunner.new(config.base_port, master, slave)
|
11
21
|
end
|
12
22
|
|
13
23
|
def start
|
14
24
|
self.stop
|
15
25
|
|
16
|
-
@
|
17
|
-
shard_conf = ShardConfig.new(shard_number, configuration)
|
18
|
-
@shards[shard_number] = Shard.new(shard_conf)
|
19
|
-
end
|
20
|
-
|
21
|
-
@shards.each do |shard_no, shard|
|
22
|
-
@process_manager.start_shard(shard)
|
23
|
-
end
|
26
|
+
@web_thread = @web_interface_runner.run
|
24
27
|
|
25
|
-
@
|
26
|
-
|
28
|
+
@zookeeper_connection.connect
|
29
|
+
@slave.node_id = @zookeeper_connection.current_node
|
27
30
|
|
28
|
-
|
29
|
-
@process_manager.
|
31
|
+
@zookeeper_thread = @zookeeper_observer.run
|
32
|
+
@pm_thread = @process_manager.run
|
30
33
|
|
31
|
-
|
32
|
-
|
34
|
+
[:INT, :TERM, :QUIT].each do |sig|
|
35
|
+
trap(sig) { self.stop }
|
33
36
|
end
|
34
|
-
|
35
|
-
@shards = {}
|
36
37
|
end
|
37
38
|
|
38
|
-
def
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
end
|
43
|
-
|
44
|
-
return { :count => configuration.ring_size, :shards => shards_hash }
|
39
|
+
def wait
|
40
|
+
@pm_thread.join if @pm_thread
|
41
|
+
@zookeeper_thread.join if @zookeeper_thread
|
42
|
+
@web_thread.join if @web_thread
|
45
43
|
end
|
46
44
|
|
47
|
-
|
48
|
-
|
45
|
+
def stop
|
46
|
+
@process_manager.halt
|
47
|
+
@zookeeper_observer.halt
|
48
|
+
@web_interface_runner.halt
|
49
49
|
end
|
50
50
|
|
51
51
|
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
module RedisRing
|
2
|
+
|
3
|
+
module BackgroundThread
|
4
|
+
|
5
|
+
def before_run
|
6
|
+
end
|
7
|
+
|
8
|
+
def after_halt
|
9
|
+
end
|
10
|
+
|
11
|
+
def do_work
|
12
|
+
end
|
13
|
+
|
14
|
+
def run
|
15
|
+
before_run
|
16
|
+
|
17
|
+
@continue_running = true
|
18
|
+
|
19
|
+
return Thread.new do
|
20
|
+
begin
|
21
|
+
while continue_running?
|
22
|
+
do_work
|
23
|
+
end
|
24
|
+
after_halt
|
25
|
+
rescue SystemExit
|
26
|
+
raise
|
27
|
+
rescue => e
|
28
|
+
puts "Error caught in #{self.class.name}:"
|
29
|
+
puts e
|
30
|
+
puts e.backtrace.join("\n")
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def continue_running?
|
36
|
+
@continue_running
|
37
|
+
end
|
38
|
+
|
39
|
+
def halt
|
40
|
+
@continue_running = false
|
41
|
+
end
|
42
|
+
|
43
|
+
end
|
44
|
+
|
45
|
+
end
|
data/lib/redis_ring/cli.rb
CHANGED
@@ -42,12 +42,9 @@ USAGE
|
|
42
42
|
def start(config_file = nil)
|
43
43
|
config = config_file ? Configuration.from_yml_file(config_file) : Configuration.new
|
44
44
|
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
WebInterface.run!(:port => config.base_port)
|
49
|
-
|
50
|
-
Application.instance.stop
|
45
|
+
app = Application.new(config)
|
46
|
+
app.start
|
47
|
+
app.wait
|
51
48
|
end
|
52
49
|
|
53
50
|
end
|
@@ -7,7 +7,7 @@ module RedisRing
|
|
7
7
|
class Configuration
|
8
8
|
|
9
9
|
PARAMETERS = [:host_name, :base_port, :ring_size, :redis_path, :redis_config_template_path,
|
10
|
-
:total_vm_size, :base_directory, :password, :total_max_memory, :vm_page_size]
|
10
|
+
:total_vm_size, :base_directory, :password, :total_max_memory, :vm_page_size, :zookeeper_address]
|
11
11
|
|
12
12
|
attr_reader *PARAMETERS
|
13
13
|
|
@@ -50,6 +50,7 @@ module RedisRing
|
|
50
50
|
self.base_directory ||= "/var/lib/redis"
|
51
51
|
self.total_max_memory ||= 1024 * 1024 * 1024 # 1GB
|
52
52
|
self.vm_page_size ||= 32
|
53
|
+
self.zookeeper_address ||= "localhost:2181"
|
53
54
|
end
|
54
55
|
|
55
56
|
def validate!
|
@@ -0,0 +1,23 @@
|
|
1
|
+
module RedisRing
|
2
|
+
|
3
|
+
class HttpClient
|
4
|
+
|
5
|
+
def get(host, port, path, params = {})
|
6
|
+
Net::HTTP.get(uri(host, port, path, params))
|
7
|
+
end
|
8
|
+
|
9
|
+
def post(host, port, path, params = {})
|
10
|
+
Net::HTTP.post_form(uri(host, port, path, params), {}).body
|
11
|
+
end
|
12
|
+
|
13
|
+
protected
|
14
|
+
|
15
|
+
def uri(host, port, path, params)
|
16
|
+
params_str = params.map{|k,v| "#{k}=#{v}"}.join("&")
|
17
|
+
params_str = "?" + params_str unless params_str.empty?
|
18
|
+
URI.parse("http://#{host}:#{port}#{path}#{params_str}")
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
22
|
+
|
23
|
+
end
|
@@ -0,0 +1,154 @@
|
|
1
|
+
module RedisRing
|
2
|
+
|
3
|
+
class Master
|
4
|
+
|
5
|
+
attr_reader :zookeeper_connection, :ring_size, :node_provider
|
6
|
+
|
7
|
+
def initialize(zookeeper_connection, ring_size, node_provider)
|
8
|
+
@zookeeper_connection = zookeeper_connection
|
9
|
+
@ring_size = ring_size
|
10
|
+
@node_provider = node_provider
|
11
|
+
@node_ids = []
|
12
|
+
@is_master = false
|
13
|
+
end
|
14
|
+
|
15
|
+
def became_master
|
16
|
+
return if is_master?
|
17
|
+
|
18
|
+
puts "BECAME MASTER"
|
19
|
+
|
20
|
+
@is_master = true
|
21
|
+
end
|
22
|
+
|
23
|
+
def no_longer_is_master
|
24
|
+
return unless is_master?
|
25
|
+
|
26
|
+
puts "LOST MASTER STATUS"
|
27
|
+
|
28
|
+
@is_master = false
|
29
|
+
end
|
30
|
+
|
31
|
+
def nodes_changed(changed_node_ids)
|
32
|
+
return unless is_master?
|
33
|
+
|
34
|
+
new_nodes = changed_node_ids - node_ids
|
35
|
+
removed_nodes = node_ids - changed_node_ids
|
36
|
+
|
37
|
+
puts "NODES CHANGED"
|
38
|
+
puts "NEW: #{new_nodes.join(", ")}" if new_nodes.any?
|
39
|
+
puts "REMOVED: #{removed_nodes.join(', ')}" if removed_nodes.any?
|
40
|
+
|
41
|
+
@node_ids = changed_node_ids
|
42
|
+
|
43
|
+
reassign_shards
|
44
|
+
end
|
45
|
+
|
46
|
+
def node_joined(node_id)
|
47
|
+
puts "NODE JOINED #{node_id}"
|
48
|
+
|
49
|
+
reassign_shards
|
50
|
+
end
|
51
|
+
|
52
|
+
def node_leaving(node_id)
|
53
|
+
puts "NODE LEAVING #{node_id}"
|
54
|
+
|
55
|
+
node_ids.delete(node_id)
|
56
|
+
reassign_shards
|
57
|
+
end
|
58
|
+
|
59
|
+
def is_master?
|
60
|
+
return @is_master
|
61
|
+
end
|
62
|
+
|
63
|
+
def reassign_shards
|
64
|
+
update_node_statuses
|
65
|
+
|
66
|
+
running_shards = {}
|
67
|
+
best_candidates = {}
|
68
|
+
best_candidates_timestamps = Hash.new(0)
|
69
|
+
|
70
|
+
nodes.each do |node_id, node|
|
71
|
+
node.running_shards.dup.each do |shard_no|
|
72
|
+
if running_shards.key?(shard_no)
|
73
|
+
node.stop_shard(shard_no)
|
74
|
+
else
|
75
|
+
running_shards[shard_no] = node_id
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
node.available_shards.each do |shard_no, timestamp|
|
80
|
+
if timestamp > best_candidates_timestamps[shard_no]
|
81
|
+
best_candidates[shard_no] = node_id
|
82
|
+
best_candidates_timestamps[shard_no] = timestamp
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
offline_shards = (0...ring_size).to_a - running_shards.keys
|
88
|
+
shards_per_node = (1.0 * ring_size / nodes.size).floor
|
89
|
+
rest = ring_size - shards_per_node * nodes.size
|
90
|
+
|
91
|
+
nodes.each do |node_id, node|
|
92
|
+
next unless node.joined?
|
93
|
+
break if offline_shards.empty?
|
94
|
+
count_to_assign = shards_per_node - node.running_shards.size
|
95
|
+
count_to_assign += 1 if node_ids.index(node_id) < rest
|
96
|
+
count_to_assign.times do
|
97
|
+
shard_no = offline_shards.shift
|
98
|
+
break unless shard_no
|
99
|
+
node.start_shard(shard_no)
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
zookeeper_connection.update_status(status)
|
104
|
+
end
|
105
|
+
|
106
|
+
def status
|
107
|
+
{
|
108
|
+
:ring_size => ring_size,
|
109
|
+
:shards => shards
|
110
|
+
}
|
111
|
+
end
|
112
|
+
|
113
|
+
protected
|
114
|
+
|
115
|
+
attr_reader :node_ids
|
116
|
+
attr_accessor :nodes
|
117
|
+
|
118
|
+
def shards
|
119
|
+
running_shards = {}
|
120
|
+
nodes.each do |node_id, node|
|
121
|
+
node.running_shards.each do |shard_no|
|
122
|
+
running_shards[shard_no] = {
|
123
|
+
:host => node.host,
|
124
|
+
:port => node.port + shard_no + 1,
|
125
|
+
:status => :running
|
126
|
+
}
|
127
|
+
end
|
128
|
+
end
|
129
|
+
return running_shards
|
130
|
+
end
|
131
|
+
|
132
|
+
def update_node_statuses
|
133
|
+
self.nodes ||= {}
|
134
|
+
|
135
|
+
nodes.each do |node_id, node|
|
136
|
+
unless node_ids.include?(node_id)
|
137
|
+
nodes.delete(node_id)
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
node_ids.each do |node_id|
|
142
|
+
next if nodes.key?(node_id)
|
143
|
+
node_data = zookeeper_connection.node_data(node_id)
|
144
|
+
nodes[node_id] = node_provider.new(node_data["host"], node_data["port"])
|
145
|
+
end
|
146
|
+
|
147
|
+
nodes.each do |node_id, node|
|
148
|
+
node.update_status!
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
end
|
153
|
+
|
154
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
module RedisRing
|
2
|
+
|
3
|
+
class MasterRPC
|
4
|
+
|
5
|
+
attr_reader :http_client
|
6
|
+
|
7
|
+
def initialize(http_client)
|
8
|
+
@http_client = http_client
|
9
|
+
end
|
10
|
+
|
11
|
+
def connection(host, port)
|
12
|
+
Connection.new(http_client, host, port)
|
13
|
+
end
|
14
|
+
|
15
|
+
class Connection
|
16
|
+
|
17
|
+
attr_reader :http_client, :host, :port
|
18
|
+
|
19
|
+
def initialize(http_client, host, port)
|
20
|
+
@http_client = http_client
|
21
|
+
@host = host
|
22
|
+
@port = port
|
23
|
+
end
|
24
|
+
|
25
|
+
def node_loaded(node_id)
|
26
|
+
http_client.post(host, port, "/master/node_joined/#{node_id}")
|
27
|
+
end
|
28
|
+
|
29
|
+
end
|
30
|
+
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
@@ -0,0 +1,66 @@
|
|
1
|
+
module RedisRing
|
2
|
+
|
3
|
+
class NodeProvider
|
4
|
+
|
5
|
+
attr_reader :slave_rpc
|
6
|
+
|
7
|
+
def initialize(slave_rpc)
|
8
|
+
@slave_rpc = slave_rpc
|
9
|
+
end
|
10
|
+
|
11
|
+
def new(host, port)
|
12
|
+
Node.new(slave_rpc.connection(host, port), host, port)
|
13
|
+
end
|
14
|
+
|
15
|
+
end
|
16
|
+
|
17
|
+
class Node
|
18
|
+
|
19
|
+
attr_reader :slave_rpc, :host, :port
|
20
|
+
|
21
|
+
def initialize(slave_rpc, host, port)
|
22
|
+
@slave_rpc = slave_rpc
|
23
|
+
@host = host
|
24
|
+
@port = port
|
25
|
+
end
|
26
|
+
|
27
|
+
def update_status!
|
28
|
+
status_hash = slave_rpc.status
|
29
|
+
@joined = status_hash["joined"]
|
30
|
+
@running_shards = status_hash["running_shards"] || []
|
31
|
+
@available_shards = keys_to_i(status_hash["available_shards"] || {})
|
32
|
+
end
|
33
|
+
|
34
|
+
def joined?
|
35
|
+
@joined
|
36
|
+
end
|
37
|
+
|
38
|
+
def start_shard(shard_number)
|
39
|
+
running_shards << shard_number
|
40
|
+
slave_rpc.start_shard(shard_number)
|
41
|
+
end
|
42
|
+
|
43
|
+
def stop_shard(shard_number)
|
44
|
+
running_shards.delete(shard_number)
|
45
|
+
slave_rpc.stop_shard(shard_number)
|
46
|
+
end
|
47
|
+
|
48
|
+
def running_shards
|
49
|
+
@running_shards ||= []
|
50
|
+
end
|
51
|
+
|
52
|
+
def available_shards
|
53
|
+
@available_shards ||= {}
|
54
|
+
end
|
55
|
+
|
56
|
+
protected
|
57
|
+
|
58
|
+
def keys_to_i(hash)
|
59
|
+
result = {}
|
60
|
+
hash.each { |key, val| result[key.to_i] = val }
|
61
|
+
return result
|
62
|
+
end
|
63
|
+
|
64
|
+
end
|
65
|
+
|
66
|
+
end
|
@@ -4,49 +4,63 @@ module RedisRing
|
|
4
4
|
|
5
5
|
class ProcessManager
|
6
6
|
|
7
|
+
include RedisRing::BackgroundThread
|
8
|
+
|
7
9
|
def initialize
|
8
10
|
@shards = {}
|
11
|
+
@shards_to_stop = []
|
12
|
+
@mutex = Mutex.new
|
9
13
|
end
|
10
14
|
|
11
|
-
def
|
12
|
-
|
13
|
-
|
14
|
-
monitor_processes_loop
|
15
|
-
end
|
15
|
+
def do_work
|
16
|
+
monitor_processes
|
17
|
+
sleep(0.5)
|
16
18
|
end
|
17
19
|
|
18
|
-
def
|
19
|
-
|
20
|
+
def after_halt
|
21
|
+
shards.each do |shard_no, shard|
|
22
|
+
if shard.alive?
|
23
|
+
puts "Stopping shard #{shard_no}"
|
24
|
+
shard.stop
|
25
|
+
end
|
26
|
+
end
|
20
27
|
end
|
21
28
|
|
22
29
|
def start_shard(shard)
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
shards[shard.shard_number] = shard
|
30
|
+
@mutex.synchronize do
|
31
|
+
if shards.key?(shard.shard_number)
|
32
|
+
raise ShardAlreadyStarted.new("Shard: #{shard.shard_number} already started!")
|
33
|
+
end
|
28
34
|
|
29
|
-
|
35
|
+
shards[shard.shard_number] = shard
|
36
|
+
end
|
30
37
|
end
|
31
38
|
|
32
39
|
def stop_shard(shard)
|
33
|
-
|
34
|
-
|
40
|
+
@mutex.synchronize do
|
41
|
+
shards.delete(shard.shard_number)
|
42
|
+
shards_to_stop << shard
|
43
|
+
end
|
35
44
|
end
|
36
45
|
|
37
46
|
protected
|
38
47
|
|
39
|
-
attr_reader :shards
|
48
|
+
attr_reader :shards, :shards_to_stop
|
49
|
+
|
50
|
+
def monitor_processes
|
51
|
+
@mutex.synchronize do
|
52
|
+
shards_to_stop.each do |shard|
|
53
|
+
puts "Stopping shard #{shard.shard_number}"
|
54
|
+
shard.stop
|
55
|
+
end
|
56
|
+
@shards_to_stop = []
|
40
57
|
|
41
|
-
def monitor_processes_loop
|
42
|
-
while(@continue_running) do
|
43
58
|
shards.each do |shard_no, shard|
|
44
59
|
unless shard.alive?
|
45
60
|
puts "Restarting shard #{shard_no}"
|
46
61
|
shard.start
|
47
62
|
end
|
48
63
|
end
|
49
|
-
sleep(1)
|
50
64
|
end
|
51
65
|
end
|
52
66
|
|
@@ -5,6 +5,10 @@ module RedisRing
|
|
5
5
|
attr_reader :shard_number, :configuration
|
6
6
|
|
7
7
|
def initialize(shard_number, configuration)
|
8
|
+
unless shard_number >= 0 && shard_number < configuration.ring_size
|
9
|
+
raise ArgumentError.new("shard number #{shard_number} must be between 0 and #{configuration.ring_size - 1}")
|
10
|
+
end
|
11
|
+
|
8
12
|
@shard_number = shard_number
|
9
13
|
@configuration = configuration
|
10
14
|
end
|
@@ -74,6 +78,14 @@ module RedisRing
|
|
74
78
|
file('db_files', "shard-#{shard_number}.aof")
|
75
79
|
end
|
76
80
|
|
81
|
+
def db_mtime
|
82
|
+
mtime(db_file_name)
|
83
|
+
end
|
84
|
+
|
85
|
+
def aof_mtime
|
86
|
+
mtime(aof_file_name)
|
87
|
+
end
|
88
|
+
|
77
89
|
def password
|
78
90
|
configuration.password
|
79
91
|
end
|
@@ -88,6 +100,12 @@ module RedisRing
|
|
88
100
|
File.join('..', '..', *parts)
|
89
101
|
end
|
90
102
|
|
103
|
+
def mtime(relative_path)
|
104
|
+
path = File.expand_path(relative_path, working_directory)
|
105
|
+
return nil unless File.exist?(path)
|
106
|
+
return File.mtime(path).to_i
|
107
|
+
end
|
108
|
+
|
91
109
|
end
|
92
110
|
|
93
111
|
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
module RedisRing
|
2
|
+
|
3
|
+
class Slave
|
4
|
+
|
5
|
+
attr_accessor :current_master_host, :current_master_port, :node_id
|
6
|
+
attr_reader :configuration, :master_rpc, :process_manager
|
7
|
+
attr_reader :running_shards
|
8
|
+
|
9
|
+
def initialize(configuration, master_rpc, process_manager)
|
10
|
+
@configuration = configuration
|
11
|
+
@master_rpc = master_rpc
|
12
|
+
@process_manager = process_manager
|
13
|
+
@joined = false
|
14
|
+
@running_shards = {}
|
15
|
+
end
|
16
|
+
|
17
|
+
def joined?
|
18
|
+
@joined
|
19
|
+
end
|
20
|
+
|
21
|
+
def available_shards
|
22
|
+
available_shards = {}
|
23
|
+
configuration.ring_size.times do |shard_no|
|
24
|
+
shard_conf = ShardConfig.new(shard_no, configuration)
|
25
|
+
timestamp = [shard_conf.db_mtime, shard_conf.aof_mtime].compact.max
|
26
|
+
available_shards[shard_no] = timestamp if timestamp
|
27
|
+
end
|
28
|
+
return available_shards
|
29
|
+
end
|
30
|
+
|
31
|
+
def status
|
32
|
+
{ :joined => joined?, :running_shards => running_shards.keys, :available_shards => available_shards }
|
33
|
+
end
|
34
|
+
|
35
|
+
def join
|
36
|
+
puts "JOINING CLUSTER"
|
37
|
+
@joined = true
|
38
|
+
master_rpc.connection(current_master_host, current_master_port).node_loaded(node_id)
|
39
|
+
end
|
40
|
+
|
41
|
+
def start_shard(shard_number)
|
42
|
+
puts "STARTING SHARD #{shard_number}"
|
43
|
+
return if running_shards.include?(shard_number)
|
44
|
+
shard_conf = ShardConfig.new(shard_number, configuration)
|
45
|
+
shard = running_shards[shard_number] = Shard.new(shard_conf)
|
46
|
+
process_manager.start_shard(shard)
|
47
|
+
end
|
48
|
+
|
49
|
+
def stop_shard(shard_number)
|
50
|
+
puts "STOPPING SHARD #{shard_number}"
|
51
|
+
shard = running_shards[shard_number]
|
52
|
+
return unless shard
|
53
|
+
process_manager.stop_shard(shard)
|
54
|
+
running_shards.delete(shard_number)
|
55
|
+
end
|
56
|
+
|
57
|
+
def sync_shard_with(shard_number, host, port)
|
58
|
+
end
|
59
|
+
|
60
|
+
end
|
61
|
+
|
62
|
+
end
|