flare-tools 0.5.0 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
data/.gitignore CHANGED
@@ -22,3 +22,4 @@ tmp
22
22
  *.o
23
23
  *.a
24
24
  mkmf.log
25
+ environment.txt
@@ -1,3 +1,9 @@
1
+ === 0.6.0 / 2014-11-07
2
+ * Minor Enhancements
3
+ * Modify default options to safely
4
+ * Clean up the node before constructiong the master/slave node by default.
5
+ * Check redundancy of the partition before recontstructiong with --all option by default.
6
+
1
7
  === 0.5.0 / 2014-10-15
2
8
  * Refactoring structures
3
9
  * Refactoring.
data/README.txt CHANGED
@@ -267,6 +267,7 @@ subcommands:
267
267
  --force commit changes without confirmation
268
268
  --retry=[COUNT] specify retry count (default:10)
269
269
  --activate change node's state from ready to active
270
+ --without-clean don't clear datastore before construction
270
271
 
271
272
  [balance] set the balance values of nodes.
272
273
  Usage: flare-admin balance [hostname:port:balance] ...
@@ -326,7 +327,8 @@ subcommands:
326
327
  Usage: flare-admin slave [hostname:port:balance:partition] ...
327
328
  --force commit changes without confirmation
328
329
  --retry=[COUNT] specify retry count(default:10)
329
- --clean clear datastore before construction
330
+ --without-clean don't clear datastore before construction
331
+ --clean [obsolete] now slave command clear datastore before construction by default.
330
332
 
331
333
  [list] show the list of nodes in a flare cluster.
332
334
  Usage: flare-admin list
@@ -347,8 +349,9 @@ subcommands:
347
349
  [reconstruct] reconstruct the database of nodes by copying.
348
350
  Usage: flare-admin reconstruct [hostname:port] ...
349
351
  --force commit changes without confirmation
350
- --safe reconstruct a node safely
351
- --retry=[COUNT] specify retry count (default:10)
352
+ --unsafe reconstruct a node safely
353
+ --safe [obsolete] now reconstruct a node safely by default
354
+ --retry=COUNT specify retry count (default:10)
352
355
  --all reconstruct all nodes
353
356
 
354
357
  == THANKS:
@@ -1,4 +1,7 @@
1
1
  module Flare; end
2
2
  module Flare::Entity; end
3
3
  class Flare::Entity::Server < Struct.new(:host, :port)
4
+ def to_s
5
+ "#{self.host}:#{self.port}"
6
+ end
4
7
  end
@@ -9,7 +9,7 @@ module Flare
9
9
  # flare-tools module.
10
10
  module Tools
11
11
  # the version number of flare-tools
12
- VERSION = '0.5.0'
12
+ VERSION = '0.6.0'
13
13
  TITLE = "Flare-tools version #{VERSION} Copyright (C) GREE, Inc. 2011-2014"
14
14
  autoload :Common, 'flare/tools/common'
15
15
  autoload :Cluster, 'flare/tools/cluster'
@@ -30,8 +30,9 @@ module Flare
30
30
  set_option_index_server
31
31
  set_option_dry_run
32
32
  set_option_force
33
- @optp.on('--retry=COUNT', "specify retry count (default:#{@retry})" ) {|v| @retry = v.to_i}
34
- @optp.on('--activate', "change node's state from ready to active") {@activate = true}
33
+ @optp.on('--retry=COUNT', "specify retry count (default:#{@retry})" ) {|v| @retry = v.to_i }
34
+ @optp.on('--activate', "change node's state from ready to active") { @activate = true }
35
+ @optp.on('--without-clean', "don't clear datastore before construction") { @without_clean = true }
35
36
  end
36
37
 
37
38
  def initialize
@@ -39,6 +40,7 @@ module Flare
39
40
  @force = false
40
41
  @retry = 10
41
42
  @activate = false
43
+ @without_clean = false
42
44
  end
43
45
 
44
46
  def execute(config, args)
@@ -89,11 +91,27 @@ module Flare
89
91
  info "no need to change the role of #{ipaddr}:#{port}."
90
92
  elsif existing_master
91
93
  info "the partiton already has a master #{existing_master}."
94
+ elsif node['role'] != 'proxy'
95
+ puts "#{nodekey} is not a proxy."
92
96
  else
93
- STDERR.print "making the node master (node=#{ipaddr}:#{port}, role=#{node['role']} -> #{role}) (y/n): "
97
+ clean_notice_base = "\nitems stored in the node will be cleaned up (exec flush_all) before constructing it"
98
+ clean_notice = @without_clean ? clean_notice_base : ''
99
+ STDERR.print "making the node master (node=#{ipaddr}:#{port}, role=#{node['role']} -> #{role})#{clean_notice} (y/n): "
94
100
  exec = interruptible {(gets.chomp.upcase == "Y")}
95
101
  end
96
102
  if exec && !@dry_run
103
+ unless @without_clean
104
+ resp = false
105
+ Flare::Tools::Node.open(hostname, port, @timeout) do |n|
106
+ resp = n.flush_all
107
+ end
108
+ unless resp
109
+ STDERR.print "executing flush_all failed."
110
+ return S_NG
111
+ end
112
+ puts "executed flush_all command before constructing the master node."
113
+ end
114
+
97
115
  nretry = 0
98
116
  resp = false
99
117
  while resp == false && nretry < @retry
@@ -31,15 +31,18 @@ module Flare
31
31
  set_option_index_server
32
32
  set_option_dry_run
33
33
  set_option_force
34
- @optp.on('--safe', "reconstruct a node safely" ) {@safe = true}
35
- @optp.on('--retry=COUNT', "specify retry count (default:#{@retry})") {|v| @retry = v.to_i}
36
- @optp.on('--all', "reconstruct all nodes" ) {@all = true}
34
+ @optp.on('--unsafe', "reconstruct a node safely" ) { @unsafe = true }
35
+ @optp.on('--safe', "[obsolete] now reconstruct a node safely by default") do
36
+ # do nothing
37
+ end
38
+ @optp.on('--retry=COUNT', "specify retry count (default:#{@retry})") {|v| @retry = v.to_i }
39
+ @optp.on('--all', "reconstruct all nodes" ) { @all = true }
37
40
  end
38
41
 
39
42
  def initialize
40
43
  super
41
44
  @force = false
42
- @safe = false
45
+ @unsafe = false
43
46
  @retry = 10
44
47
  @all = false
45
48
  end
@@ -88,7 +91,7 @@ module Flare
88
91
  next
89
92
  end
90
93
  is_safe = cluster.safely_reconstructable? nodekey
91
- if @safe && !is_safe
94
+ if !@unsafe && !is_safe
92
95
  puts "The partition needs one more slave to reconstruct #{nodekey} safely."
93
96
  status = S_NG
94
97
  next
@@ -33,15 +33,18 @@ module Flare
33
33
  set_option_index_server
34
34
  set_option_dry_run
35
35
  set_option_force
36
- @optp.on('--retry=COUNT', "specify retry count(default:#{@retry})") {|v| @retry = v.to_i}
37
- @optp.on('--clean', "clear datastore before construction") {@clean = true}
36
+ @optp.on('--retry=COUNT', "specify retry count(default:#{@retry})") {|v| @retry = v.to_i}
37
+ @optp.on('--without-clean', "don't clear datastore before construction") { @without_clean = true }
38
+ @optp.on('--clean', '[obsolete] now slave command clear datastore before construction by default.') do
39
+ # do nothing
40
+ end
38
41
  end
39
42
 
40
43
  def initialize
41
44
  super
42
45
  @force = false
43
46
  @retry = DefaultRetry
44
- @clean = false
47
+ @without_clean = false
45
48
  end
46
49
 
47
50
  def execute(config, args)
@@ -77,16 +80,24 @@ module Flare
77
80
 
78
81
  exec = @force
79
82
  unless exec
80
- STDERR.print "making node slave (node=#{nodekey}, role=#{node['role']} -> slave) (y/n): "
83
+ clean_notice_base = "\nitems stored in the node will be cleaned up (exec flush_all) before constructing it"
84
+ clean_notice = @without_clean ? clean_notice_base : ''
85
+ STDERR.print "making node slave (node=#{nodekey}, role=#{node['role']} -> slave)#{clean_notice} (y/n): "
81
86
  interruptible do
82
87
  exec = true if gets.chomp.upcase == "Y"
83
88
  end
84
89
  end
85
90
  if exec && !@dry_run
86
- if @clean
91
+ unless @without_clean
92
+ resp = false
87
93
  Flare::Tools::Node.open(hostname, port, @timeout) do |n|
88
- n.flush_all
94
+ resp = n.flush_all
95
+ end
96
+ unless resp
97
+ STDERR.print "executing flush_all failed."
98
+ return S_NG
89
99
  end
100
+ puts "executed flush_all command before constructing the slave node."
90
101
  end
91
102
 
92
103
  nretry = 0
@@ -126,18 +126,13 @@ module Flare
126
126
  nodes.each do |k, n|
127
127
  max_nodekey_length = k.length if k.length > max_nodekey_length
128
128
  end
129
- table = Table.new
130
- add_header_to_table(table, header_configs)
131
- nodes.each do |k, node|
132
- stats_data = queue[k].pop
133
- next if (args.size > 0 && !args.include?(k))
134
- behind = (threads.has_key?(k) || threads[k].has_key?('behind')) ? threads[k]['behind'] : "-"
135
- r = record(stats_data, node, behind, query_prev, k)
136
- add_record_to_table(table, header_configs, r)
137
- end
129
+ r = records(args, nodes, queue, threads, query_prev)
138
130
  interruptible {
139
131
  wait_for_stats
140
132
  }
133
+ table = Table.new
134
+ add_header_to_table(table, header_configs)
135
+ add_records_to_table(table, header_configs, r)
141
136
  puts table.prettify
142
137
  end
143
138
  s.close
@@ -235,12 +230,24 @@ module Flare
235
230
  table.add_row(row)
236
231
  end
237
232
 
238
- def add_record_to_table(table, header_configs, record)
239
- row = Row.new(:separator => @delimiter)
240
- header_configs.each_with_index do |header_config, index|
241
- row.add_column(Column.new(record[index], header_config[1]))
233
+ def add_records_to_table(table, header_configs, records)
234
+ records.each do |record|
235
+ row = Row.new(:separator => @delimiter)
236
+ header_configs.each_with_index do |header_config, index|
237
+ row.add_column(Column.new(record[index], header_config[1]))
238
+ end
239
+ table.add_row(row)
240
+ end
241
+ end
242
+
243
+ # You can override this method to extend stats infos.
244
+ def records(args, nodes, queue, threads, query_prev)
245
+ nodes.map do |k, node|
246
+ stats_data = queue[k].pop
247
+ next if (args.size > 0 && !args.include?(k))
248
+ behind = (threads.has_key?(k) || threads[k].has_key?('behind')) ? threads[k]['behind'] : "-"
249
+ record(stats_data, node, behind, query_prev, k)
242
250
  end
243
- table.add_row(row)
244
251
  end
245
252
 
246
253
  # You can override this method to extend stats infos.
@@ -159,10 +159,13 @@ class FlareAdminTest < Test::Unit::TestCase
159
159
  p = @datanodes[0].port
160
160
  flare_admin_with_yes "master --index-server=#{@indexname}:#{@indexport} #{h}:#{p}:1:0"
161
161
  assert_equal(S_OK, $?.exitstatus)
162
- h = @datanodes[1].hostname
163
- p = @datanodes[1].port
164
- flare_admin_with_yes "slave --index-server=#{@indexname}:#{@indexport} #{h}:#{p}:1:0"
165
- assert_equal(S_OK, $?.exitstatus)
162
+ [1, 2].each do |i|
163
+ assert_equal(S_OK, $?.exitstatus)
164
+ h = @datanodes[i].hostname
165
+ p = @datanodes[i].port
166
+ flare_admin_with_yes "slave --index-server=#{@indexname}:#{@indexport} #{h}:#{p}:1:0"
167
+ assert_equal(S_OK, $?.exitstatus)
168
+ end
166
169
  flare_admin_with_yes "reconstruct --index-server=#{@indexname}:#{@indexport} --all"
167
170
  assert_equal(S_OK, $?.exitstatus)
168
171
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: flare-tools
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0
4
+ version: 0.6.0
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2014-10-15 00:00:00.000000000 Z
13
+ date: 2014-11-07 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: log4r
@@ -222,7 +222,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
222
222
  version: '0'
223
223
  segments:
224
224
  - 0
225
- hash: 54780071370042886
225
+ hash: 4173589925550821460
226
226
  required_rubygems_version: !ruby/object:Gem::Requirement
227
227
  none: false
228
228
  requirements: