flare-tools 0.1.4 → 0.4.5.1
Sign up to get free protection for your applications and to get access to all the features.
- data/.gemtest +0 -0
- data/Flare-tools.txt +0 -0
- data/History.txt +114 -2
- data/LICENSE +21 -0
- data/Manifest.txt +65 -8
- data/README.txt +356 -0
- data/Rakefile +90 -25
- data/Tutorial.txt +370 -0
- data/bin/flare-admin +6 -0
- data/bin/flare-argv0 +6 -0
- data/bin/flare-deploy +6 -0
- data/bin/flare-keychecker +6 -0
- data/bin/flare-part +6 -0
- data/bin/flare-ping +6 -0
- data/bin/flare-stats +4 -10
- data/bin/flare-zkadmin +6 -0
- data/lib/flare/net/connection.rb +98 -0
- data/lib/flare/test/cluster.rb +140 -0
- data/lib/flare/test/daemon.rb +144 -0
- data/lib/flare/test/node.rb +62 -0
- data/lib/flare/tools.rb +18 -16
- data/lib/flare/tools/cli.rb +32 -0
- data/lib/flare/tools/cli/activate.rb +106 -0
- data/lib/flare/tools/cli/balance.rb +83 -0
- data/lib/flare/tools/cli/cli_util.rb +77 -0
- data/lib/flare/tools/cli/deploy.rb +170 -0
- data/lib/flare/tools/cli/down.rb +85 -0
- data/lib/flare/tools/cli/dump.rb +219 -0
- data/lib/flare/tools/cli/dumpkey.rb +117 -0
- data/lib/flare/tools/cli/flare_admin.rb +81 -0
- data/lib/flare/tools/cli/flare_argv0.rb +60 -0
- data/lib/flare/tools/cli/flare_keychecker.rb +106 -0
- data/lib/flare/tools/cli/flare_zkadmin.rb +226 -0
- data/lib/flare/tools/cli/index.rb +54 -0
- data/lib/flare/tools/cli/list.rb +93 -0
- data/lib/flare/tools/cli/master.rb +143 -0
- data/lib/flare/tools/cli/part.rb +100 -0
- data/lib/flare/tools/cli/ping.rb +81 -0
- data/lib/flare/tools/cli/reconstruct.rb +164 -0
- data/lib/flare/tools/cli/remove.rb +119 -0
- data/lib/flare/tools/cli/restore.rb +180 -0
- data/lib/flare/tools/cli/slave.rb +125 -0
- data/lib/flare/tools/cli/stats.rb +229 -122
- data/lib/flare/tools/cli/sub_command.rb +73 -0
- data/lib/flare/tools/cli/summary.rb +97 -0
- data/lib/flare/tools/cli/threads.rb +78 -0
- data/lib/flare/tools/cli/verify.rb +202 -0
- data/lib/flare/tools/client.rb +267 -0
- data/lib/flare/tools/cluster.rb +319 -0
- data/lib/flare/tools/common.rb +196 -0
- data/lib/flare/tools/index_server.rb +51 -0
- data/lib/flare/tools/node.rb +162 -0
- data/lib/flare/tools/stats.rb +75 -0
- data/lib/flare/tools/zk_util.rb +28 -0
- data/lib/flare/util.rb +34 -0
- data/lib/flare/util/bwlimit.rb +132 -0
- data/lib/flare/util/command_line.rb +79 -0
- data/lib/flare/util/conf.rb +71 -0
- data/lib/flare/util/constant.rb +25 -0
- data/lib/flare/util/conversion.rb +26 -0
- data/lib/flare/util/default_logger.rb +52 -0
- data/lib/flare/util/exception.rb +19 -0
- data/lib/flare/util/filesystem.rb +30 -0
- data/lib/flare/util/flared_conf.rb +33 -0
- data/lib/flare/util/flarei_conf.rb +32 -0
- data/lib/flare/util/hash_function.rb +32 -0
- data/lib/flare/util/interruption.rb +70 -0
- data/lib/flare/util/key_resolver.rb +67 -0
- data/lib/flare/util/log4r_logger.rb +79 -0
- data/lib/flare/util/logger.rb +40 -0
- data/lib/flare/util/logging.rb +84 -0
- data/lib/flare/util/result.rb +53 -0
- data/test/test/experimental/cache_test.rb +113 -0
- data/test/test/experimental/key_distribution_test.rb +38 -0
- data/test/test/experimental/keychecker_test.rb +60 -0
- data/test/test/experimental/list_test.rb +108 -0
- data/test/test/extra/replication_test.rb +184 -0
- data/test/test/integration/cli_test.rb +348 -0
- data/test/test/integration/dump_expired_test.rb +103 -0
- data/test/test/integration/dump_test.rb +128 -0
- data/test/test/integration/index_server_test.rb +35 -0
- data/test/test/integration/node_test.rb +78 -0
- data/test/test/integration/partition_test.rb +235 -0
- data/test/test/integration/proxy_test.rb +54 -0
- data/test/test/integration/stats_test.rb +79 -0
- data/test/test/system/flare_admin_test.rb +191 -0
- data/test/test/unit/bwlimit_test.rb +52 -0
- data/test/test/unit/cluster_test.rb +96 -0
- data/test/test/unit/daemon_test.rb +30 -0
- data/test/test/unit/logger_test.rb +46 -0
- data/test/test/unit/tools_test.rb +25 -0
- data/test/test/unit/util_test.rb +70 -0
- metadata +239 -84
- data/README.rdoc +0 -83
- data/bin/flare-partition-setting +0 -12
- data/lib/flare/tools/cli/partition_setting.rb +0 -86
- data/lib/flare/tools/core.rb +0 -189
- data/lib/flare/tools/logger.rb +0 -31
- data/test/test_flare-tools.rb +0 -11
- data/test/test_helper.rb +0 -3
@@ -0,0 +1,267 @@
|
|
1
|
+
# -*- coding: utf-8; -*-
|
2
|
+
# Authors:: Kiyoshi Ikehara <kiyoshi.ikehara@gree.net>
|
3
|
+
# Copyright:: Copyright (C) GREE, Inc. 2011.
|
4
|
+
# License:: MIT-style
|
5
|
+
|
6
|
+
require 'timeout'
|
7
|
+
require 'flare/net/connection'
|
8
|
+
require 'flare/util/logging'
|
9
|
+
require 'flare/util/constant'
|
10
|
+
require 'flare/util/result'
|
11
|
+
|
12
|
+
#
|
13
|
+
module Flare
|
14
|
+
module Tools
|
15
|
+
|
16
|
+
# == Description
|
17
|
+
#
|
18
|
+
class Client
|
19
|
+
include Flare::Util::Logging
|
20
|
+
extend Flare::Util::Logging
|
21
|
+
include Flare::Util::Constant
|
22
|
+
include Flare::Util::Result
|
23
|
+
|
24
|
+
def self.open(host, port, tout = DefaultTimeout, uplink_limit = DefalutBwlimit, downlink_limit = DefalutBwlimit, &block)
|
25
|
+
session = nil
|
26
|
+
session = self.new(host, port, tout, uplink_limit, downlink_limit)
|
27
|
+
return session if block.nil?
|
28
|
+
return block.call(session)
|
29
|
+
ensure
|
30
|
+
if session.nil?
|
31
|
+
error "failed to open #{host}:#{port}."
|
32
|
+
else
|
33
|
+
session.close # this might raise IOError
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def initialize(host, port, tout = DefaultTimeout, uplink_limit = DefalutBwlimit, downlink_limit = DefalutBwlimit)
|
38
|
+
@tout = tout
|
39
|
+
@conn = nil
|
40
|
+
timeout(1) do
|
41
|
+
@conn = Flare::Net::Connection.new(host, port, uplink_limit, downlink_limit)
|
42
|
+
end
|
43
|
+
@server_name, @version = server_version
|
44
|
+
rescue Errno::ECONNREFUSED
|
45
|
+
debug "Connection refused. server=[#{@conn}]"
|
46
|
+
raise
|
47
|
+
rescue TimeoutError
|
48
|
+
debug "Connection timeout. server=[#{@conn}]"
|
49
|
+
raise
|
50
|
+
rescue SocketError
|
51
|
+
debug "Connection error. server=[#{host}:#{port}]"
|
52
|
+
raise
|
53
|
+
end
|
54
|
+
|
55
|
+
def required_version?(required_version, version = @version)
|
56
|
+
(0...required_version.size).each do |i|
|
57
|
+
n = if i < version.size then version[i] else 0 end
|
58
|
+
return true if n > required_version[i]
|
59
|
+
return false if n < required_version[i]
|
60
|
+
end
|
61
|
+
true
|
62
|
+
end
|
63
|
+
|
64
|
+
def host
|
65
|
+
@conn.host
|
66
|
+
end
|
67
|
+
|
68
|
+
def hostname
|
69
|
+
@conn.host
|
70
|
+
end
|
71
|
+
|
72
|
+
def port
|
73
|
+
@conn.port
|
74
|
+
end
|
75
|
+
|
76
|
+
def request(cmd, parser, processor, tout = @tout)
|
77
|
+
# info "request(#{cmd}, #{noreply})"
|
78
|
+
@conn.reconnect if @conn.closed?
|
79
|
+
debug "Enter the command server. server=[#{@conn}] command=[#{cmd}}]"
|
80
|
+
response = nil
|
81
|
+
cmd.chomp!
|
82
|
+
cmd += "\r\n"
|
83
|
+
timeout(tout) do
|
84
|
+
@conn.send(cmd)
|
85
|
+
response = parser.call(@conn, processor)
|
86
|
+
end
|
87
|
+
response
|
88
|
+
rescue TimeoutError => e
|
89
|
+
error "Connection timeout. server=[#{@conn}] command=[#{cmd}}]"
|
90
|
+
@conn.close
|
91
|
+
raise e
|
92
|
+
end
|
93
|
+
|
94
|
+
def sent_size
|
95
|
+
@conn.sent_size
|
96
|
+
end
|
97
|
+
|
98
|
+
def received_size
|
99
|
+
@conn.received_size
|
100
|
+
end
|
101
|
+
|
102
|
+
def close()
|
103
|
+
begin
|
104
|
+
timeout(1) { quit }
|
105
|
+
rescue Timeout::Error => e
|
106
|
+
end
|
107
|
+
@conn.close
|
108
|
+
end
|
109
|
+
|
110
|
+
@@processors = {}
|
111
|
+
@@parsers = {}
|
112
|
+
|
113
|
+
def self.defcmd_generic(method_symbol, command_template, parser, timeout, &default_processor)
|
114
|
+
@@parsers[method_symbol] = parser
|
115
|
+
@@processors[method_symbol] = default_processor || proc { false }
|
116
|
+
timeout_expr = if timeout then "@tout" else "nil" end
|
117
|
+
self.class_eval %{
|
118
|
+
def #{method_symbol.to_s}(*args, &processor)
|
119
|
+
cmd = "#{command_template}"
|
120
|
+
cmd = cmd % args if args.size > 0
|
121
|
+
processor = @@processors[:#{method_symbol}] if processor.nil?
|
122
|
+
request(cmd, @@parsers[:#{method_symbol}], processor, #{timeout_expr})
|
123
|
+
end
|
124
|
+
}
|
125
|
+
end
|
126
|
+
|
127
|
+
def self.defcmd(method_symbol, command_template, &default_processor)
|
128
|
+
parser = lambda {|conn,processor|
|
129
|
+
resp = ""
|
130
|
+
answers = [Ok, End, Stored, Deleted, NotFound].map {|x| Flare::Util::Result.string_of_result(x)}
|
131
|
+
fails = [Exists].map {|x| Flare::Util::Result.string_of_result(x)}
|
132
|
+
errors = [Error, ServerError, ClientError].map {|x| Flare::Util::Result.string_of_result(x)}
|
133
|
+
while x = conn.getline
|
134
|
+
ans = x.chomp.split(' ', 2)
|
135
|
+
ans = if ans.empty? then '' else ans[0] end
|
136
|
+
case ans
|
137
|
+
when *answers
|
138
|
+
break
|
139
|
+
when *fails
|
140
|
+
resp = false
|
141
|
+
break
|
142
|
+
when *errors
|
143
|
+
warn "Failed command. server=[#{self}] sent=[#{conn.last_sent}] result=[#{x.chomp}]"
|
144
|
+
resp = false
|
145
|
+
break
|
146
|
+
else
|
147
|
+
resp += x
|
148
|
+
end
|
149
|
+
end
|
150
|
+
return processor.call(resp) if resp
|
151
|
+
return false
|
152
|
+
}
|
153
|
+
defcmd_generic(method_symbol, command_template, parser, true, &default_processor)
|
154
|
+
end
|
155
|
+
|
156
|
+
def self.defcmd_noreply(method_symbol, command_template, &default_processor)
|
157
|
+
parser = lambda {|conn,processor|
|
158
|
+
return processor.call()
|
159
|
+
}
|
160
|
+
defcmd_generic(method_symbol, command_template, parser, true, &default_processor)
|
161
|
+
end
|
162
|
+
|
163
|
+
def self.defcmd_oneline(method_symbol, command_template, &default_processor)
|
164
|
+
parser = lambda {|conn,processor|
|
165
|
+
line = conn.getline
|
166
|
+
processor.call(line)
|
167
|
+
}
|
168
|
+
defcmd_generic(method_symbol, command_template, parser, true, &default_processor)
|
169
|
+
end
|
170
|
+
|
171
|
+
def self.defcmd_key(method_symbol, command_template, &default_processor)
|
172
|
+
parser = lambda {|conn,processor|
|
173
|
+
rets = []
|
174
|
+
while true
|
175
|
+
line = conn.getline
|
176
|
+
elems = line.split(' ')
|
177
|
+
if elems[0] == "KEY"
|
178
|
+
unless processor.nil?
|
179
|
+
r = processor.call(elems[1])
|
180
|
+
rets << r if r
|
181
|
+
end
|
182
|
+
elsif elems[0] == "END"
|
183
|
+
return rets
|
184
|
+
else
|
185
|
+
info "key parser: error \"#{line.chomp}\""
|
186
|
+
return false
|
187
|
+
end
|
188
|
+
end
|
189
|
+
}
|
190
|
+
defcmd_generic(method_symbol, command_template, parser, false, &default_processor)
|
191
|
+
end
|
192
|
+
|
193
|
+
def self.defcmd_value(method_symbol, command_template, &default_processor)
|
194
|
+
parser = lambda {|conn,processor|
|
195
|
+
rets = []
|
196
|
+
while true
|
197
|
+
line = conn.getline
|
198
|
+
elems = line.split(' ')
|
199
|
+
if elems[0] == "VALUE"
|
200
|
+
key, flag, len, version, expire = elems[1], elems[2].to_i, elems[3].to_i, elems[4].to_i, elems[5].to_i
|
201
|
+
data = conn.read(len)
|
202
|
+
unless processor.nil?
|
203
|
+
r = processor.call(data, key, flag, len, version, expire)
|
204
|
+
rets << r if r
|
205
|
+
end
|
206
|
+
conn.getline # skip
|
207
|
+
elsif elems[0] == "END"
|
208
|
+
return rets
|
209
|
+
else
|
210
|
+
info "value parser: error \"#{line.chomp}\""
|
211
|
+
return false
|
212
|
+
end
|
213
|
+
end
|
214
|
+
}
|
215
|
+
defcmd_generic(method_symbol, command_template, parser, false, &default_processor)
|
216
|
+
end
|
217
|
+
|
218
|
+
def self.defcmd_listelement(method_symbol, command_template, &default_processor)
|
219
|
+
parser = lambda {|conn,processor|
|
220
|
+
rets = []
|
221
|
+
while true
|
222
|
+
line = conn.getline
|
223
|
+
elems = line.split(' ')
|
224
|
+
if elems[0] == "LISTELEMENT"
|
225
|
+
key, rel, abs = elems[1], elems[2].to_i, elems[3].to_i
|
226
|
+
flag, len, version, expire = elems[4].to_i, elems[5].to_i, elems[6], elems[7]
|
227
|
+
data = conn.read(len)
|
228
|
+
unless processor.nil?
|
229
|
+
r = processor.call(data, key, rel, abs, flag, len, version, expire)
|
230
|
+
rets << r if r
|
231
|
+
end
|
232
|
+
conn.getline # skip
|
233
|
+
elsif elems[0] == "END"
|
234
|
+
return rets[0] if rets.size == 1
|
235
|
+
return rets
|
236
|
+
else
|
237
|
+
info "error \"#{line.chomp}\""
|
238
|
+
return false
|
239
|
+
end
|
240
|
+
end
|
241
|
+
}
|
242
|
+
defcmd_generic(method_symbol, command_template, parser, false, &default_processor)
|
243
|
+
end
|
244
|
+
|
245
|
+
def server_version
|
246
|
+
verstrings = version.split('-')
|
247
|
+
server = "flare"
|
248
|
+
server = verstrings.shift if verstrings.size > 1
|
249
|
+
version = verstrings[0].split('.').map {|v| v.to_i}
|
250
|
+
[server, version]
|
251
|
+
end
|
252
|
+
|
253
|
+
# we have two types of VERSION formats.
|
254
|
+
# VERSION flare-1.0.14
|
255
|
+
# VERSION 1.0.9
|
256
|
+
def version
|
257
|
+
version_
|
258
|
+
end
|
259
|
+
defcmd_oneline :version_, 'version\r\n' do |resp|
|
260
|
+
code, version = resp.chomp.split(' ')
|
261
|
+
return "0.0.0" if code != "VERSION"
|
262
|
+
version
|
263
|
+
end
|
264
|
+
|
265
|
+
end
|
266
|
+
end
|
267
|
+
end
|
@@ -0,0 +1,319 @@
|
|
1
|
+
# -*- coding: utf-8; -*-
|
2
|
+
# Authors:: Kiyoshi Ikehara <kiyoshi.ikehara@gree.net>
|
3
|
+
# Copyright:: Copyright (C) GREE, Inc. 2011.
|
4
|
+
# License:: MIT-style
|
5
|
+
|
6
|
+
require 'rexml/document'
|
7
|
+
require 'flare/util/constant'
|
8
|
+
require 'flare/tools/common'
|
9
|
+
|
10
|
+
#
|
11
|
+
module Flare
|
12
|
+
module Tools
|
13
|
+
|
14
|
+
# == Description
|
15
|
+
# Cluster is a class that discribes a cluster information.
|
16
|
+
class Cluster
|
17
|
+
include Flare::Util::Constant
|
18
|
+
|
19
|
+
State = 'state'
|
20
|
+
Role = 'role'
|
21
|
+
StateActive = 'active'
|
22
|
+
StateDown = 'down'
|
23
|
+
StateReady = 'ready'
|
24
|
+
StatePrepare = 'prepare'
|
25
|
+
RoleProxy = 'proxy'
|
26
|
+
RoleMaster = 'master'
|
27
|
+
RoleSlave = 'slave'
|
28
|
+
StatPartition = 'partition'
|
29
|
+
|
30
|
+
States = { "active" => '0', "prepare" => '1', "down" => '2', "ready" => '3' }
|
31
|
+
Roles = { "master" => '0', "slave" => '1', "proxy" => '2' }
|
32
|
+
|
33
|
+
class NodeStat
|
34
|
+
def initialize stat
|
35
|
+
@stat = stat.dup
|
36
|
+
end
|
37
|
+
|
38
|
+
def [](i)
|
39
|
+
@stat[i]
|
40
|
+
end
|
41
|
+
|
42
|
+
def []=(i, v)
|
43
|
+
@stat[i] = v.to_s
|
44
|
+
end
|
45
|
+
|
46
|
+
def master?
|
47
|
+
(role == RoleMaster)
|
48
|
+
end
|
49
|
+
|
50
|
+
def slave?
|
51
|
+
(role == RoleSlave)
|
52
|
+
end
|
53
|
+
|
54
|
+
def proxy?
|
55
|
+
(role == RoleProxy)
|
56
|
+
end
|
57
|
+
|
58
|
+
def active?
|
59
|
+
(state == StateActive)
|
60
|
+
end
|
61
|
+
|
62
|
+
def ready?
|
63
|
+
(state == StateReady)
|
64
|
+
end
|
65
|
+
|
66
|
+
def down?
|
67
|
+
(state == StateDown)
|
68
|
+
end
|
69
|
+
|
70
|
+
def prepare?
|
71
|
+
(state == StatePrepare)
|
72
|
+
end
|
73
|
+
|
74
|
+
def partition
|
75
|
+
@stat['partition'].to_i
|
76
|
+
end
|
77
|
+
|
78
|
+
def thread_type
|
79
|
+
@stat['thread_type'].to_i
|
80
|
+
end
|
81
|
+
|
82
|
+
def balance
|
83
|
+
@stat['balance'].to_i
|
84
|
+
end
|
85
|
+
|
86
|
+
def method_missing(action, *args)
|
87
|
+
if @stat.has_key? action.to_s
|
88
|
+
@stat[action.to_s]
|
89
|
+
else
|
90
|
+
@stat.__send__(action, *args)
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
def initialize(index_server_hostname, index_server_port, nodes_stat)
|
96
|
+
@index_server_hostname = index_server_hostname
|
97
|
+
@index_server_port = index_server_port
|
98
|
+
@nodes_stat = nodes_stat
|
99
|
+
max_partition = -1
|
100
|
+
nodes_stat.each do |nodekey,node_stat|
|
101
|
+
p = node_stat[StatPartition].to_i
|
102
|
+
max_partition = p if p > max_partition
|
103
|
+
end
|
104
|
+
@partition = if max_partition >= 0
|
105
|
+
(0..max_partition).map {Hash.new}
|
106
|
+
else
|
107
|
+
[]
|
108
|
+
end
|
109
|
+
@partition_size = max_partition+1
|
110
|
+
nodes_stat.each do |nodekey,node_stat|
|
111
|
+
p = node_stat[StatPartition].to_i
|
112
|
+
@partition[p][nodekey] = node_stat if p >= 0
|
113
|
+
end
|
114
|
+
@nodes = {}
|
115
|
+
nodes_stat.each do |k,v|
|
116
|
+
@nodes[k] = NodeStat.new(v)
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
# check if the partition of a nodekey has at least one active slave
|
121
|
+
def reconstructable?(nodekey)
|
122
|
+
node = node_stat(nodekey)
|
123
|
+
ret = if node[State] == StateActive
|
124
|
+
case node[Role]
|
125
|
+
when RoleProxy
|
126
|
+
false
|
127
|
+
when RoleSlave
|
128
|
+
true
|
129
|
+
when RoleMaster
|
130
|
+
# if the partition has at least one active slave, one of the slaves will take over the master.
|
131
|
+
slaves_in_partition(node[StatPartition]).inject(false) do |r,slave_nodekey|
|
132
|
+
node_stat(slave_nodekey)[State] == StateActive
|
133
|
+
end
|
134
|
+
else
|
135
|
+
error "unknown role: #{node[Role]}"
|
136
|
+
false
|
137
|
+
end
|
138
|
+
else
|
139
|
+
false
|
140
|
+
end
|
141
|
+
ret
|
142
|
+
end
|
143
|
+
|
144
|
+
def safely_reconstructable?(nodekey)
|
145
|
+
node = node_stat(nodekey)
|
146
|
+
return false if node[State] != StateActive
|
147
|
+
case node[Role]
|
148
|
+
when RoleProxy
|
149
|
+
false
|
150
|
+
when RoleSlave
|
151
|
+
slaves_in_partition(node[StatPartition]).inject(false) do |r, slave_nodekey|
|
152
|
+
if slave_nodekey != nodekey
|
153
|
+
node_stat(slave_nodekey)[State] == StateActive
|
154
|
+
else
|
155
|
+
r
|
156
|
+
end
|
157
|
+
end
|
158
|
+
when RoleMaster
|
159
|
+
count = slaves_in_partition(node[StatPartition]).inject(0) do |r, slave_nodekey|
|
160
|
+
if node_stat(slave_nodekey)[State] == StateActive then r+1 else r end
|
161
|
+
end
|
162
|
+
(count >= 2)
|
163
|
+
else
|
164
|
+
raise "internal error."
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
def partition(p)
|
169
|
+
@partition[p.to_i]
|
170
|
+
end
|
171
|
+
|
172
|
+
def master_in_partition(p)
|
173
|
+
return nil if partition(p).nil?
|
174
|
+
partition(p).inject(nil) {|r,i|
|
175
|
+
nodekey, node = i
|
176
|
+
if node[Role] == RoleMaster then nodekey else r end
|
177
|
+
}
|
178
|
+
end
|
179
|
+
|
180
|
+
def slaves_in_partition(p)
|
181
|
+
return nil if partition(p).nil?
|
182
|
+
partition(p).inject([]) {|r,i| if i[1][Role] == RoleSlave then r << i[0] else r end}
|
183
|
+
end
|
184
|
+
|
185
|
+
def nodekeys_(&block)
|
186
|
+
unordered = if block.nil?
|
187
|
+
@nodes.keys
|
188
|
+
else
|
189
|
+
ret = []
|
190
|
+
@nodes.each do |k,v|
|
191
|
+
ret << k if block.call(v)
|
192
|
+
end
|
193
|
+
ret
|
194
|
+
end
|
195
|
+
unordered.sort_by do |i|
|
196
|
+
p = @nodes[i].partition
|
197
|
+
p = @partition_size if p < 0
|
198
|
+
[p, @nodes[i].role, i]
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
def nodekeys
|
203
|
+
nodekeys_
|
204
|
+
end
|
205
|
+
|
206
|
+
def master_nodekeys
|
207
|
+
nodekeys_ {|v| v[Role] == RoleMaster }
|
208
|
+
end
|
209
|
+
|
210
|
+
def master_and_slave_nodekeys
|
211
|
+
nodekeys_ {|v| v[Role] == RoleMaster || v[Role] == RoleSlave }
|
212
|
+
end
|
213
|
+
|
214
|
+
def node_stat(nodekey)
|
215
|
+
@nodes[nodekey]
|
216
|
+
end
|
217
|
+
|
218
|
+
def size
|
219
|
+
@nodes.size
|
220
|
+
end
|
221
|
+
|
222
|
+
def partition_size
|
223
|
+
@partition_size
|
224
|
+
end
|
225
|
+
|
226
|
+
# proxy -> -1
|
227
|
+
# not found -> nil
|
228
|
+
def partition_of_nodename node
|
229
|
+
@nodes.each do |k,v|
|
230
|
+
return v[StatPartition].to_i if k == node
|
231
|
+
end
|
232
|
+
return nil
|
233
|
+
end
|
234
|
+
|
235
|
+
def has_nodekey?(nodekey)
|
236
|
+
@nodes.has_key? nodekey
|
237
|
+
end
|
238
|
+
|
239
|
+
def serattr_ x
|
240
|
+
return "" if x.nil?
|
241
|
+
" class_id=\"#{x['class_id']}\" tracking_level=\"#{x['tracking_level']}\" version=\"#{x['version']}\""
|
242
|
+
end
|
243
|
+
|
244
|
+
def serialize
|
245
|
+
thread_type = 0
|
246
|
+
|
247
|
+
node_map_id = {"class_id"=>"0", "tracking_level"=>"0", "version"=>"0"}
|
248
|
+
item_id = {"class_id"=>"1", "tracking_level"=>"0", "version"=>"0"}
|
249
|
+
second_id = {"class_id"=>"2", "tracking_level"=>"0", "version"=>"0"}
|
250
|
+
|
251
|
+
output =<<"EOS"
|
252
|
+
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
|
253
|
+
<!DOCTYPE boost_serialization>
|
254
|
+
<boost_serialization signature="serialization::archive" version="4">
|
255
|
+
<node_map#{serattr_(node_map_id)}>
|
256
|
+
\t<count>#{@nodes.size}</count>
|
257
|
+
\t<item_version>0</item_version>
|
258
|
+
EOS
|
259
|
+
@nodes.each do |k,v|
|
260
|
+
node_server_name, node_server_port = k.split(':')
|
261
|
+
node_role = Roles[v['role']]
|
262
|
+
node_state = States[v['state']]
|
263
|
+
node_partition = v['partition']
|
264
|
+
node_balance = v['balance']
|
265
|
+
node_thread_type = v['thread_type'].to_i
|
266
|
+
|
267
|
+
output +=<<"EOS"
|
268
|
+
\t<item#{serattr_(item_id)}>
|
269
|
+
\t\t<first>#{k}</first>
|
270
|
+
\t\t<second#{serattr_(second_id)}>
|
271
|
+
\t\t\t<node_server_name>#{node_server_name}</node_server_name>
|
272
|
+
\t\t\t<node_server_port>#{node_server_port}</node_server_port>
|
273
|
+
\t\t\t<node_role>#{node_role}</node_role>
|
274
|
+
\t\t\t<node_state>#{node_state}</node_state>
|
275
|
+
\t\t\t<node_partition>#{node_partition}</node_partition>
|
276
|
+
\t\t\t<node_balance>#{node_balance}</node_balance>
|
277
|
+
\t\t\t<node_thread_type>#{node_thread_type}</node_thread_type>
|
278
|
+
\t\t</second>
|
279
|
+
\t</item>
|
280
|
+
EOS
|
281
|
+
item_id = nil
|
282
|
+
second_id = nil
|
283
|
+
thread_type = node_thread_type+1 if node_thread_type >= thread_type
|
284
|
+
end
|
285
|
+
output +=<<"EOS"
|
286
|
+
</node_map>
|
287
|
+
<thread_type>#{thread_type}</thread_type>
|
288
|
+
</boost_serialization>
|
289
|
+
EOS
|
290
|
+
output
|
291
|
+
end
|
292
|
+
|
293
|
+
def self.build flare_xml
|
294
|
+
doc = REXML::Document.new flare_xml
|
295
|
+
nodemap = doc.elements['/boost_serialization/node_map']
|
296
|
+
thread_type = doc.elements['/boost_serialization/thread_type']
|
297
|
+
count = nodemap.elements['count'].get_text.to_s.to_i
|
298
|
+
item_version = nodemap.elements['item_version'].get_text.to_s.to_i
|
299
|
+
nodestat = []
|
300
|
+
nodemap.elements.each('item') do |item|
|
301
|
+
nodekey = item.elements['first'].get_text.to_s
|
302
|
+
elem = item.elements['second'].elements
|
303
|
+
node = {
|
304
|
+
'server_name' => elem['node_server_name'].get_text.to_s,
|
305
|
+
'server_port' => elem['node_server_port'].get_text.to_s,
|
306
|
+
'role' => elem['node_role'].get_text.to_s,
|
307
|
+
'state' => elem['node_state'].get_text.to_s,
|
308
|
+
'partition' => elem['node_partition'].get_text.to_s,
|
309
|
+
'balance' => elem['node_balance'].get_text.to_s,
|
310
|
+
'thread_type' => elem['node_thread_type'].get_text.to_s
|
311
|
+
}
|
312
|
+
nodestat << [nodekey, node]
|
313
|
+
end
|
314
|
+
Cluster.new(DefaultIndexServerName, DefaultIndexServerPort, nodestat)
|
315
|
+
end
|
316
|
+
|
317
|
+
end
|
318
|
+
end
|
319
|
+
end
|