flare-tools 0.1.4 → 0.4.5.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (100) hide show
  1. data/.gemtest +0 -0
  2. data/Flare-tools.txt +0 -0
  3. data/History.txt +114 -2
  4. data/LICENSE +21 -0
  5. data/Manifest.txt +65 -8
  6. data/README.txt +356 -0
  7. data/Rakefile +90 -25
  8. data/Tutorial.txt +370 -0
  9. data/bin/flare-admin +6 -0
  10. data/bin/flare-argv0 +6 -0
  11. data/bin/flare-deploy +6 -0
  12. data/bin/flare-keychecker +6 -0
  13. data/bin/flare-part +6 -0
  14. data/bin/flare-ping +6 -0
  15. data/bin/flare-stats +4 -10
  16. data/bin/flare-zkadmin +6 -0
  17. data/lib/flare/net/connection.rb +98 -0
  18. data/lib/flare/test/cluster.rb +140 -0
  19. data/lib/flare/test/daemon.rb +144 -0
  20. data/lib/flare/test/node.rb +62 -0
  21. data/lib/flare/tools.rb +18 -16
  22. data/lib/flare/tools/cli.rb +32 -0
  23. data/lib/flare/tools/cli/activate.rb +106 -0
  24. data/lib/flare/tools/cli/balance.rb +83 -0
  25. data/lib/flare/tools/cli/cli_util.rb +77 -0
  26. data/lib/flare/tools/cli/deploy.rb +170 -0
  27. data/lib/flare/tools/cli/down.rb +85 -0
  28. data/lib/flare/tools/cli/dump.rb +219 -0
  29. data/lib/flare/tools/cli/dumpkey.rb +117 -0
  30. data/lib/flare/tools/cli/flare_admin.rb +81 -0
  31. data/lib/flare/tools/cli/flare_argv0.rb +60 -0
  32. data/lib/flare/tools/cli/flare_keychecker.rb +106 -0
  33. data/lib/flare/tools/cli/flare_zkadmin.rb +226 -0
  34. data/lib/flare/tools/cli/index.rb +54 -0
  35. data/lib/flare/tools/cli/list.rb +93 -0
  36. data/lib/flare/tools/cli/master.rb +143 -0
  37. data/lib/flare/tools/cli/part.rb +100 -0
  38. data/lib/flare/tools/cli/ping.rb +81 -0
  39. data/lib/flare/tools/cli/reconstruct.rb +164 -0
  40. data/lib/flare/tools/cli/remove.rb +119 -0
  41. data/lib/flare/tools/cli/restore.rb +180 -0
  42. data/lib/flare/tools/cli/slave.rb +125 -0
  43. data/lib/flare/tools/cli/stats.rb +229 -122
  44. data/lib/flare/tools/cli/sub_command.rb +73 -0
  45. data/lib/flare/tools/cli/summary.rb +97 -0
  46. data/lib/flare/tools/cli/threads.rb +78 -0
  47. data/lib/flare/tools/cli/verify.rb +202 -0
  48. data/lib/flare/tools/client.rb +267 -0
  49. data/lib/flare/tools/cluster.rb +319 -0
  50. data/lib/flare/tools/common.rb +196 -0
  51. data/lib/flare/tools/index_server.rb +51 -0
  52. data/lib/flare/tools/node.rb +162 -0
  53. data/lib/flare/tools/stats.rb +75 -0
  54. data/lib/flare/tools/zk_util.rb +28 -0
  55. data/lib/flare/util.rb +34 -0
  56. data/lib/flare/util/bwlimit.rb +132 -0
  57. data/lib/flare/util/command_line.rb +79 -0
  58. data/lib/flare/util/conf.rb +71 -0
  59. data/lib/flare/util/constant.rb +25 -0
  60. data/lib/flare/util/conversion.rb +26 -0
  61. data/lib/flare/util/default_logger.rb +52 -0
  62. data/lib/flare/util/exception.rb +19 -0
  63. data/lib/flare/util/filesystem.rb +30 -0
  64. data/lib/flare/util/flared_conf.rb +33 -0
  65. data/lib/flare/util/flarei_conf.rb +32 -0
  66. data/lib/flare/util/hash_function.rb +32 -0
  67. data/lib/flare/util/interruption.rb +70 -0
  68. data/lib/flare/util/key_resolver.rb +67 -0
  69. data/lib/flare/util/log4r_logger.rb +79 -0
  70. data/lib/flare/util/logger.rb +40 -0
  71. data/lib/flare/util/logging.rb +84 -0
  72. data/lib/flare/util/result.rb +53 -0
  73. data/test/test/experimental/cache_test.rb +113 -0
  74. data/test/test/experimental/key_distribution_test.rb +38 -0
  75. data/test/test/experimental/keychecker_test.rb +60 -0
  76. data/test/test/experimental/list_test.rb +108 -0
  77. data/test/test/extra/replication_test.rb +184 -0
  78. data/test/test/integration/cli_test.rb +348 -0
  79. data/test/test/integration/dump_expired_test.rb +103 -0
  80. data/test/test/integration/dump_test.rb +128 -0
  81. data/test/test/integration/index_server_test.rb +35 -0
  82. data/test/test/integration/node_test.rb +78 -0
  83. data/test/test/integration/partition_test.rb +235 -0
  84. data/test/test/integration/proxy_test.rb +54 -0
  85. data/test/test/integration/stats_test.rb +79 -0
  86. data/test/test/system/flare_admin_test.rb +191 -0
  87. data/test/test/unit/bwlimit_test.rb +52 -0
  88. data/test/test/unit/cluster_test.rb +96 -0
  89. data/test/test/unit/daemon_test.rb +30 -0
  90. data/test/test/unit/logger_test.rb +46 -0
  91. data/test/test/unit/tools_test.rb +25 -0
  92. data/test/test/unit/util_test.rb +70 -0
  93. metadata +239 -84
  94. data/README.rdoc +0 -83
  95. data/bin/flare-partition-setting +0 -12
  96. data/lib/flare/tools/cli/partition_setting.rb +0 -86
  97. data/lib/flare/tools/core.rb +0 -189
  98. data/lib/flare/tools/logger.rb +0 -31
  99. data/test/test_flare-tools.rb +0 -11
  100. data/test/test_helper.rb +0 -3
data/Rakefile CHANGED
@@ -1,25 +1,90 @@
1
- require 'rubygems'
2
- gem 'hoe', '>= 2.1.0'
3
- require 'hoe'
4
- require 'fileutils'
5
- require 'lib/flare/tools'
6
-
7
- Hoe.plugin :newgem
8
- # Hoe.plugin :website
9
- # Hoe.plugin :cucumberfeatures
10
-
11
- $hoe = Hoe.spec 'flare-tools' do
12
- self.version = FlareTools::VERSION
13
- self.developer 'kgws', 'dev.kgws@gmail.com'
14
- self.post_install_message = 'PostInstall.txt'
15
- self.url = 'http://github.com/kgws/flare-tools'
16
- self.summary = "Management Tools for Flare"
17
- self.description = "Flare is a collection of tools for management."
18
- self.extra_deps = [
19
- ]
20
- end
21
-
22
- require 'newgem/tasks'
23
- Dir['tasks/**/*.rake'].each { |t| load t }
24
-
25
- task :default => [:spec, :features]
1
+
2
+ $LOAD_PATH.unshift File.dirname(__FILE__)+"/lib"
3
+
4
+ require 'rubygems'
5
+ gem 'hoe', '>= 2.1.0'
6
+ require 'hoe'
7
+ gem 'rdoc'
8
+ require 'rdoc'
9
+ require 'rdoc/markup'
10
+ require 'rdoc/markup/formatter'
11
+ require 'rdoc/markup/to_ansi'
12
+
13
+ require 'fileutils'
14
+ require 'flare/tools'
15
+
16
+ Hoe.plugin :newgem
17
+
18
+ $hoe = Hoe.spec 'flare-tools' do
19
+ self.version = Flare::Tools::VERSION
20
+ self.developer 'kikehara', 'kiyoshi.ikehara@gree.net'
21
+ self.urls = ['http://github.com/gree/flare-tools']
22
+ self.summary = "Management Tools for Flare"
23
+ self.post_install_message = 'PostInstall.txt'
24
+ self.description = "Flare-tools is a collection of tools for Flare distributed key-value store."
25
+ self.readme_file = "README.txt"
26
+ self.extra_deps = [['log4r', '>= 1.1.4'], ['zookeeper', '>= 1.2.6'], ['tokyocabinet', '>= 1.29']]
27
+ self.rubyforge_name = 'flare-tools'
28
+ self.extra_rdoc_files = []
29
+ end
30
+
31
+ require 'newgem/tasks'
32
+ Dir['tasks/**/*.rake'].each { |t| load t }
33
+
34
+ task :default => [:spec, :features]
35
+
36
+ task :manual do
37
+ h = RDoc::Markup::ToAnsi.new
38
+ rdoc = File.read("README.txt")
39
+ puts h.convert(rdoc)
40
+ end
41
+
42
+ task :test do
43
+ sh "(cd test && rake)"
44
+ end
45
+
46
+ task :stress_test do
47
+ sh "(cd test && rake stress)"
48
+ end
49
+
50
+ task :clean do
51
+ sh "(cd test && rake clean)"
52
+ end
53
+
54
+ task :manifest_post do
55
+ sh "grep -ve '^debian' Manifest.txt| grep -ve '^test' | grep -ve '^package' | grep -ve '^Makefile' | grep -v '#\$' > Manifest.tmp"
56
+ sh "mv Manifest.tmp Manifest.txt"
57
+ end
58
+
59
+ task :install => [:manifest, :manifest_post, :install_gem]
60
+
61
+ task :debuild do |t|
62
+ sh "debuild -us -uc"
63
+ end
64
+
65
+ task :debclean do
66
+ sh "debclean"
67
+ sh "(cd .. && rm -f *.dsc *.tar.gz *.build *.changes)"
68
+ sh "rm -f debian/changelog.dch"
69
+ end
70
+
71
+ def previous version
72
+ prev = version.split('.').map{|v| v.to_i}
73
+ prev[2] -= 1
74
+ prev.join('.')
75
+ end
76
+
77
+ task :change do
78
+ puts "================================="
79
+ puts " Flare::Tools::VERSION = #{Flare::Tools::VERSION}"
80
+ puts "================================="
81
+ debian_branch = ENV["DEBIAN_BRANCH"] || "(no branch)"
82
+ version = Flare::Tools::VERSION
83
+ since = previous version
84
+ sh "git-dch --debian-branch='#{debian_branch}' --new-version #{version} --since=#{since}"
85
+ end
86
+
87
+ task :cleanall => [:clean] do
88
+ end
89
+
90
+
data/Tutorial.txt ADDED
@@ -0,0 +1,370 @@
1
+ :title:Tutorial
2
+
3
+ = Tutorial
4
+
5
+ * under construction
6
+
7
+ In this document, you can find how to manage a flare cluster using flare-tools.
8
+
9
+ == Preparation
10
+
11
+ Through this tutorial, we use a number of hosts to deploy flare nodes.
12
+
13
+ mynode1:: a host for the index server and a node of partition 0
14
+ mynode2:: a host for another node of partition 0
15
+
16
+ Please note that all the commands in flare-tools suppose that node names you are specifying
17
+ in the configuration files are able to be resolved by naming services.
18
+ If some of your flare nodes don't have the same name listed in /etc/hosts or DNS servers,
19
+ you have to add them to those naming services before using flare-tools.
20
+
21
+ mynode1$ sudo vi /etc/hosts
22
+ ...
23
+ 192.168.0.1 mynode1
24
+ 192.168.0.2 mynode2
25
+ ...
26
+
27
+ == Installing flare
28
+
29
+ === For debian users
30
+
31
+ If you're going to use flare on a debian-based distribution, you can install it using dpkg.
32
+ You can find debian packages at http://labs.gree.jp/Top/OpenSource/Flare/Download.html .
33
+
34
+ mynode1$ sudo dpkg -i flare_1.0.x-x_i386.deb
35
+ ...
36
+ mynode1$
37
+
38
+ === For fedora users
39
+
40
+ * install stop-start-daemon RPM package.
41
+ * install flare RPM package.
42
+
43
+ == Installing flare-tools
44
+
45
+ Flare-tools hasn't been published to the public gem repository yet, so please pull the
46
+ code from the git repository and type "rake install_gem" in your shell.
47
+
48
+ mynode1$ sudo gem install hoe newgem rdoc
49
+ mynode1$ git clone ...
50
+ mynode1$ cd flare-tools
51
+ mynode1$ rake rake install_gem
52
+
53
+ == Setting up your index server.
54
+
55
+ First of all, you should setup an index server to create a flare cluster.
56
+
57
+ === STEP 1. Editing index server's configuration file
58
+
59
+ You should edit /etc/flarei.conf to specify your index server's name.
60
+
61
+ mynode1$ vi /etc/flarei.conf
62
+ ...
63
+ server-name = mynode1
64
+ server-port = 12120
65
+ ...
66
+
67
+ === STEP 2. Starting an index server
68
+
69
+ Now you can start your first flare cluster using flare's init.d script.
70
+
71
+ mynode1$ sudo /etc/init.d/flare start-index
72
+ ...
73
+
74
+ Please confirm that your index server is running.
75
+
76
+ mynode1$ flare-admin ping mynode1:12120
77
+ alive
78
+
79
+ == Setting up your first node.
80
+
81
+ === STEP 1. Editing node server's configuration file
82
+
83
+ You should also edit /etc/flared.conf to specify your node name and the index server.
84
+
85
+ mynode1$ vi /etc/flared.conf
86
+ ...
87
+ index-server-name = mynode1
88
+ index-server-port = 12120
89
+ server-name = mynode1
90
+ server-port = 12121
91
+ ...
92
+
93
+ === STEP 2. Starting a node
94
+
95
+ Now you can start your first node using flare's init.d script.
96
+
97
+ mynode1$ sudo /etc/init.d/flare start-node
98
+ ...
99
+
100
+ You can use list command to show the nodes recognized by the index server.
101
+
102
+ mynode1$ flare-admin list
103
+ node partition role state balance
104
+ mynode1:12121 - proxy active 0
105
+
106
+ If you'd like to confirm that the node is really alive, you can use ping subcommand
107
+ to send ping request.
108
+
109
+ mynode1$ flare-admin ping mynode1:12121
110
+ alive
111
+
112
+ === STEP 3. Creating a new partition
113
+
114
+ At this moment, there's no partition in this cluster.
115
+ Let's make a partition with mynode1:12121 using master subcommand.
116
+
117
+ mynode1$ flare-admin master mynode1:12121:1:0
118
+ making the node master (node=192.168.0.1:12121, role=proxy -> master) (y/n): y
119
+ 0 (role = master, state = active) [ETA: n/a sec (elapsed = 0 sec)]
120
+ state is ready -> stop waiting
121
+ node partition role state balance
122
+ mynode1:12121 0 master active 1
123
+ mynode1$
124
+
125
+ The arguments of master subcommand should be specified in the form of "hostname:port:balance:partition".
126
+
127
+ hostname:: node's hostname (specified in flared.conf)
128
+ port:: node's port (specified in flared.conf)
129
+ balance:: node's balance parameter to be set
130
+ partition:: the index number of a new partition
131
+
132
+ This command does the actions shown below:
133
+ * changes node's state from proxy to master.
134
+ * waits for the node to be ready by checking the role and state described in the cluster information distributed by the index node.
135
+
136
+ === STEP 4. Storing data for testing
137
+
138
+ The cluster now can store key-value pairs through the memcached compatible protocol, so lets store an entry.
139
+
140
+ Each node has no data at the time of creation. Please confirm that with flare-stats command.
141
+
142
+ mynode1$ flare-stats
143
+ hostname:port state role partition balance items conn behind hit size uptime version
144
+ mynode1:12121 active master 0 1 0 2 0 - 0 1d 1.0.14
145
+
146
+ You can store a key-value entry to the cluster as follows.
147
+
148
+ $ printf "set key1 0 0 6\r\nvalue1\r\nquit\r\n" | netcat mynode1 12121
149
+ STORED
150
+
151
+ Now flare-stats reports that the node has one item.
152
+
153
+ mynode1$ flare-stats
154
+ hostname:port state role partition balance items conn behind hit size uptime version
155
+ mynode1:12121 active master 0 1 1 2 0 - 0 1d 1.0.14
156
+
157
+ == Creating a slave node
158
+
159
+ In this section, you will find how to add a node as a slave to a partition.
160
+
161
+ === STEP 1. Storing more data to the cluster
162
+
163
+ Adding a slave node may become a time-consuming work.
164
+ Before doing that, lets add more data to the cluster to make the situation more real.
165
+
166
+ $ (for i in $(seq 0 99999); do printf "set key%06d 0 0 10000\r\n%010000d\r\n" $i $i; done; echo "quit") | netcat mynode1 12121 2>&1 > /dev/null
167
+ $ flare-stats
168
+ hostname:port state role partition balance items conn behind hit size uptime version
169
+ mynode1:12121 active master 0 1 100001 2 0 - 0 1d 1.0.14
170
+
171
+ This command stores about 1GB of data.
172
+
173
+ mynode1$ ls -l /tmp/flare.hdb
174
+ -rw-r--r-- 1 root root 1005328752 Nov 24 10:15 /tmp/flare.hdb
175
+
176
+ === STEP 2. Starting the second node.
177
+
178
+ Next, start the second node on mynode2.
179
+
180
+ mynode2$ sudo /etc/init.d/flare start-node
181
+ ...
182
+
183
+ The node mynode2 is now recognized by the index server.
184
+
185
+ mynode1$ flare-admin list
186
+ node partition role state balance
187
+ mynode2:12121 - proxy active 0
188
+ mynode1:12121 0 master active 1
189
+
190
+ === STEP 3. Making a slave node
191
+
192
+ $ flare-admin slave mynode2:12121:1:0
193
+ making node slave (node=mynode2:12121, role=proxy -> slave) (y/n): y
194
+ started constructing slave node...
195
+ 0/100001 (role = slave, state = prepare) [ETA: n/a sec (elapsed = 3 sec)]
196
+ 0/100001 (role = slave, state = prepare) [ETA: n/a sec (elapsed = 4 sec)]
197
+ 5/100001 (role = slave, state = prepare) [ETA: 102796 sec (elapsed = 5 sec)]
198
+ 866/100001 (role = slave, state = prepare) [ETA: 705 sec (elapsed = 6 sec)]
199
+ 2230/100001 (role = slave, state = prepare) [ETA: 314 sec (elapsed = 7 sec)]
200
+ 3604/100001 (role = slave, state = prepare) [ETA: 219 sec (elapsed = 8 sec)]
201
+ 4977/100001 (role = slave, state = prepare) [ETA: 175 sec (elapsed = 9 sec)]
202
+ 6608/100001 (role = slave, state = prepare) [ETA: 144 sec (elapsed = 10 sec)]
203
+ 8433/100001 (role = slave, state = prepare) [ETA: 122 sec (elapsed = 11 sec)]
204
+ 10629/100001 (role = slave, state = prepare) [ETA: 103 sec (elapsed = 12 sec)]
205
+ ...
206
+ 77306/100001 (role = slave, state = prepare) [ETA: 10 sec (elapsed = 36 sec)]
207
+ 80288/100001 (role = slave, state = prepare) [ETA: 9 sec (elapsed = 37 sec)]
208
+ 83298/100001 (role = slave, state = prepare) [ETA: 7 sec (elapsed = 38 sec)]
209
+ 86277/100001 (role = slave, state = prepare) [ETA: 6 sec (elapsed = 39 sec)]
210
+ 89000/100001 (role = slave, state = prepare) [ETA: 5 sec (elapsed = 40 sec)]
211
+ 90753/100001 (role = slave, state = prepare) [ETA: 4 sec (elapsed = 41 sec)]
212
+ 91627/100001 (role = slave, state = prepare) [ETA: 3 sec (elapsed = 42 sec)]
213
+ 93623/100001 (role = slave, state = prepare) [ETA: 2 sec (elapsed = 43 sec)]
214
+ 95840/100001 (role = slave, state = prepare) [ETA: 1 sec (elapsed = 44 sec)]
215
+ 96910/100001 (role = slave, state = prepare) [ETA: 1 sec (elapsed = 45 sec)]
216
+ 98110/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 46 sec)]
217
+ 100001/100001 (role = slave, state = active) [ETA: 0 sec (elapsed = 48 sec)]
218
+ state is active -> stop waiting
219
+ node partition role state balance
220
+ mynode2:12121 0 slave active 1
221
+
222
+ === STEP 4. Adding more data
223
+
224
+ == Reconstructing nodes
225
+
226
+ Sometimes the size of database may become larger than that of the actual data
227
+ because of fragmentation or some other reason peculiar to the backend datastore.
228
+ In that situation, reconstruction of a database is helpful and you might want
229
+ to do that periodically.
230
+
231
+ Reconstructing a node is basically the same as making a slave, but before changing
232
+ the role, the node is turned down and its data are cleared by 'flush_all' command.
233
+
234
+ == Reconstructing a node
235
+
236
+ Flare has fail over feature to keep a cluster alive in case that the master is down,
237
+ so it is possible to specify the master node of a partition.
238
+
239
+ mynode1$ flare-stats
240
+ hostname:port state role partition balance items conn behind hit size uptime version
241
+ mynode1:12121 active master 0 1 100001 2 0 - 0 2d 1.0.14
242
+ mynode2:12121 active slave 0 1 100001 2 0 - 0 1d 1.0.14
243
+
244
+ In this case, mynode1 is a master and mynode2 is a slave.
245
+ Type the command shown below and check its behavior.
246
+
247
+ mynode1$ flare-admin reconstruct mynode1:12121
248
+ node partition role state balance
249
+ mynode1:12121 0 master active 1
250
+
251
+ you are trying to reconstruct mynode1:12121 without redanduncy.
252
+ reconstructing node (node=mynode1:12121, role=master) (y/n/a/q/h:help): y
253
+ turning down...
254
+ waiting for node to be active again...
255
+ started constructing node...
256
+ 0/100001 (role = slave, state = prepare) [ETA: n/a sec (elapsed = 0 sec)]
257
+ 606/100001 (role = slave, state = prepare) [ETA: 174 sec (elapsed = 1 sec)]
258
+ 1502/100001 (role = slave, state = prepare) [ETA: 136 sec (elapsed = 2 sec)]
259
+ 2379/100001 (role = slave, state = prepare) [ETA: 127 sec (elapsed = 3 sec)]
260
+ 3302/100001 (role = slave, state = prepare) [ETA: 120 sec (elapsed = 4 sec)]
261
+ 4508/100001 (role = slave, state = prepare) [ETA: 108 sec (elapsed = 5 sec)]
262
+ 5879/100001 (role = slave, state = prepare) [ETA: 98 sec (elapsed = 6 sec)]
263
+ 7665/100001 (role = slave, state = prepare) [ETA: 86 sec (elapsed = 7 sec)]
264
+ 9853/100001 (role = slave, state = prepare) [ETA: 75 sec (elapsed = 8 sec)]
265
+ 12711/100001 (role = slave, state = prepare) [ETA: 63 sec (elapsed = 9 sec)]
266
+ 16441/100001 (role = slave, state = prepare) [ETA: 52 sec (elapsed = 10 sec)]
267
+ 21198/100001 (role = slave, state = prepare) [ETA: 41 sec (elapsed = 11 sec)]
268
+ 24516/100001 (role = slave, state = prepare) [ETA: 37 sec (elapsed = 12 sec)]
269
+ ...
270
+ 78659/100001 (role = slave, state = prepare) [ETA: 7 sec (elapsed = 28 sec)]
271
+ 78669/100001 (role = slave, state = prepare) [ETA: 8 sec (elapsed = 29 sec)]
272
+ 79113/100001 (role = slave, state = prepare) [ETA: 8 sec (elapsed = 30 sec)]
273
+ 80108/100001 (role = slave, state = prepare) [ETA: 7 sec (elapsed = 31 sec)]
274
+ 82138/100001 (role = slave, state = prepare) [ETA: 7 sec (elapsed = 32 sec)]
275
+ 83652/100001 (role = slave, state = prepare) [ETA: 6 sec (elapsed = 33 sec)]
276
+ 86597/100001 (role = slave, state = prepare) [ETA: 5 sec (elapsed = 34 sec)]
277
+ 90671/100001 (role = slave, state = prepare) [ETA: 3 sec (elapsed = 35 sec)]
278
+ 93559/100001 (role = slave, state = prepare) [ETA: 2 sec (elapsed = 36 sec)]
279
+ 93968/100001 (role = slave, state = prepare) [ETA: 2 sec (elapsed = 37 sec)]
280
+ 94964/100001 (role = slave, state = prepare) [ETA: 2 sec (elapsed = 38 sec)]
281
+ 95469/100001 (role = slave, state = prepare) [ETA: 1 sec (elapsed = 39 sec)]
282
+ 97760/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 40 sec)]
283
+ 100001/100001 (role = slave, state = active) [ETA: 0 sec (elapsed = 41 sec)]
284
+ state is active -> stop waiting
285
+ done.
286
+ node partition role state balance
287
+ mynode1:12121 0 slave active 1
288
+
289
+ Reconstruction has finished, please check the state of the cluster.
290
+
291
+ mynode1$ flare-stats
292
+ hostname:port state role partition balance items conn behind hit size uptime version
293
+ mynode2:12121 active master 0 1 100001 2 0 - 0 1d 1.0.14
294
+ mynode1:12121 active slave 0 1 100001 2 0 - 0 2d 1.0.14
295
+
296
+ The node mynode2 has taken over the role of mynode1.
297
+
298
+ === Reconstructing all the nodes
299
+
300
+ In many cases, most nodes in a cluster have similar state of their database because
301
+ they might be created at the same time and have been doing the same transaction.
302
+ Reconstruct subcommand has --all option for that situation.
303
+
304
+ mynode1$ flare-admin reconstruct --all
305
+
306
+ This reconstructs nodes one-by-one.
307
+ If you don't want to be asked for each node,
308
+ specify --force option to skip the confirmation.
309
+
310
+ mynode1$ flare-admin reconstruct --all --force
311
+
312
+ == Stopping and Restoring a node
313
+
314
+ === STEP 1. Turning a node down
315
+
316
+ If you'd like to stop the node and bring it back to a proxy role, please use down subcommand.
317
+
318
+ mynode1$ flare-stats
319
+ hostname:port state role partition balance items conn behind hit size uptime version
320
+ mynode2:12121 active master 0 1 100001 2 0 - 0 1d 1.0.14
321
+ mynode1:12121 active slave 0 1 100001 2 0 - 0 2d 1.0.14
322
+
323
+ mynode1$ flare-admin down mynode2:12121
324
+ turning node down (node=192.168.0.2:12121, state=active -> down) (y/n): y
325
+ node partition role state balance
326
+ mynode2:12121 -1 proxy down 0
327
+
328
+ Now, the node mynode2 is a proxy and nolonger duplicates master's data.
329
+
330
+ mynode1$ flare-stats
331
+ hostname:port state role partition balance items conn behind hit size uptime version
332
+ mynode2:12121 active proxy -1 0 100001 2 0 - 0 1d 1.0.14
333
+ mynode1:12121 active master 0 1 100001 2 0 - 0 2d 1.0.14
334
+
335
+ === STEP 2. Restoring a slave
336
+
337
+ Once you turn a node down, it's purged from a partition and the data become invalid.
338
+ The reconstruction step is needed to restore the node.
339
+
340
+ $ flare-admin slave mynode2:12121:1:0
341
+ making node slave (node=192.168.0.2:12121, role=proxy -> slave) (y/n): y
342
+ started constructing slave node...
343
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 0 sec)]
344
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 1 sec)]
345
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 2 sec)]
346
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 3 sec)]
347
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 4 sec)]
348
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 5 sec)]
349
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 6 sec)]
350
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 7 sec)]
351
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 8 sec)]
352
+ ...
353
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 49 sec)]
354
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 50 sec)]
355
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 51 sec)]
356
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 52 sec)]
357
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 53 sec)]
358
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 54 sec)]
359
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 55 sec)]
360
+ 100001/100001 (role = slave, state = prepare) [ETA: 0 sec (elapsed = 56 sec)]
361
+ 100001/100001 (role = slave, state = active) [ETA: 0 sec (elapsed = 57 sec)]
362
+ state is active -> stop waiting
363
+ node partition role state balance
364
+ mynode2:12121 0 slave active 1
365
+
366
+ This time the progress might seem to be 100% at the beginning of reconstruction,
367
+ but the target node just has invalid data.
368
+ Wait for a moment while data copying process is running.
369
+
370
+