elasticsearch-manager 0.1.2 → 0.1.3.pre
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/README.md +54 -10
- data/RELEASES.md +27 -0
- data/bin/elasticsearch-manager +14 -4
- data/lib/elasticsearch/manager/cmd.rb +2 -1
- data/lib/elasticsearch/manager/rollingrestart.rb +4 -4
- data/lib/elasticsearch/manager/version.rb +1 -1
- data/spec/cmd_spec.rb +21 -0
- metadata +6 -5
- data/REALEASES.md +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 6079bcc920e6439d8a9a559475286c95b10bbd01
|
4
|
+
data.tar.gz: fa08640b414124cfe481e31c5e53671766ff24b7
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: cf51c83135769abb3be98b2e1ad34a9a6823d3ffed8cbb4f49bb0f2ef0e441affcd1162b719964703c852265104d0f15da6185aa91323681feaeb300f362c1f8
|
7
|
+
data.tar.gz: 3515126e07ad11ad37bc7c8e9e3ae2e0e0573764265bda527877e41baa19d605c39ff0ccd0194a4d4610b62772cf35360acfd11abaa09688b1e31c45a6c2cfb8
|
data/Gemfile.lock
CHANGED
data/README.md
CHANGED
@@ -13,6 +13,26 @@ For a list of all supported actions, please run: `$ elasticsearch-manager --help
|
|
13
13
|
gem install elasticsearch-manager
|
14
14
|
```
|
15
15
|
|
16
|
+
## List IPs for all cluster members
|
17
|
+
```
|
18
|
+
$ elasticsearch-manager --cluster-hostname elasticsearch.example.com \
|
19
|
+
--port 9200 status \
|
20
|
+
list-nodes
|
21
|
+
10.0.0.3 -- master
|
22
|
+
10.0.0.1
|
23
|
+
10.0.0.2
|
24
|
+
```
|
25
|
+
|
26
|
+
## Display the count of all shard states for each node in the cluster
|
27
|
+
```
|
28
|
+
$ elasticsearch-manager --cluster-hostname elasticsearch.example.com \
|
29
|
+
--port 9200 status \
|
30
|
+
shard-state
|
31
|
+
10.0.0.1: STARTED: 8 INITIALIZING: 0 RELOCATING: 0
|
32
|
+
10.0.0.2: STARTED: 8 INITIALIZING: 0 RELOCATING: 1
|
33
|
+
10.0.0.3: STARTED: 7 INITIALIZING: 1 RELOCATING: 0
|
34
|
+
```
|
35
|
+
|
16
36
|
## Check the current status of a cluster
|
17
37
|
|
18
38
|
```
|
@@ -27,29 +47,53 @@ The Elasticsearch cluster is currently: green
|
|
27
47
|
$ elasticsearch-manager --cluster-hostname elasticsearch.example.com \
|
28
48
|
--port 9200 status \
|
29
49
|
rolling-restart
|
30
|
-
|
50
|
+
Continue with rolling restart of cluster? (y/n) y
|
31
51
|
|
32
|
-
Restarting Elasticsearch on node:
|
33
|
-
Elasticsearch restarted on node:
|
52
|
+
Restarting Elasticsearch on node: 10.0.0.1
|
53
|
+
Elasticsearch restarted on node: 10.0.0.1
|
54
|
+
Waiting for node to become available...
|
55
|
+
Node back up!
|
34
56
|
Waiting for cluster to stabilize...
|
35
57
|
Waiting for cluster to stabilize...
|
36
58
|
Cluster stabilized!
|
37
|
-
Continue with rolling restart of cluster? (
|
59
|
+
Continue with rolling restart of cluster? (y/n) y
|
38
60
|
|
39
|
-
Restarting Elasticsearch on node:
|
40
|
-
Elasticsearch restarted on node:
|
61
|
+
Restarting Elasticsearch on node: 10.0.0.2
|
62
|
+
Elasticsearch restarted on node: 10.0.0.2
|
63
|
+
Waiting for node to become available...
|
64
|
+
Waiting for node to become available...
|
65
|
+
Node back up!
|
41
66
|
Waiting for cluster to stabilize...
|
42
67
|
Waiting for cluster to stabilize...
|
43
68
|
Waiting for cluster to stabilize...
|
44
69
|
Waiting for cluster to stabilize...
|
45
70
|
Cluster stabilized!
|
46
|
-
Continue with rolling restart of cluster? (yes/no) yes
|
47
71
|
|
48
|
-
Restarting current cluster master, continue? (
|
72
|
+
Restarting current cluster master, continue? (y/n) y
|
49
73
|
|
50
|
-
Restarting Elasticsearch on node:
|
51
|
-
Elasticsearch restarted on node:
|
74
|
+
Restarting Elasticsearch on node: 10.0.0.3
|
75
|
+
Elasticsearch restarted on node: 10.0.0.3
|
76
|
+
Waiting for node to become available...
|
77
|
+
Waiting for node to become available...
|
78
|
+
Waiting for node to become available...
|
79
|
+
Waiting for node to become available...
|
80
|
+
Node back up!
|
52
81
|
Waiting for cluster to stabilize...
|
53
82
|
Cluster stabilized!
|
54
83
|
```
|
55
84
|
|
85
|
+
## Disable routing allocation on the cluster
|
86
|
+
```
|
87
|
+
$ elasticsearch-manager --cluster-hostname elasticsearch.example.com \
|
88
|
+
--port 9200 status \
|
89
|
+
diable-routing
|
90
|
+
Disabling shard routing allocation... disabled!
|
91
|
+
```
|
92
|
+
|
93
|
+
## Enable routing allocation on the cluster
|
94
|
+
```
|
95
|
+
$ elasticsearch-manager --cluster-hostname elasticsearch.example.com \
|
96
|
+
--port 9200 status \
|
97
|
+
enable-routing
|
98
|
+
Enabling shard routing allocation... enabled!
|
99
|
+
```
|
data/RELEASES.md
ADDED
@@ -0,0 +1,27 @@
|
|
1
|
+
# Releases
|
2
|
+
|
3
|
+
## Current
|
4
|
+
- Adding new options
|
5
|
+
+ -y/--y, assume yes to all questions and do not prompt
|
6
|
+
+ -s/--sleep, set sleep interval used between checks
|
7
|
+
|
8
|
+
## v0.1.2
|
9
|
+
- Added new actions
|
10
|
+
+ list-nodes
|
11
|
+
+ shard-state
|
12
|
+
+ disable-routing
|
13
|
+
+ enable-routing
|
14
|
+
- Begun introducing flag to print verbose messaging
|
15
|
+
- Now updating cluster-wide `node_concurrent_recoveries` setting equal
|
16
|
+
to the number of shards present on the node being restarted
|
17
|
+
- Began raising Elasitcsearch::Manager::ApiError when API request exceptions occur
|
18
|
+
|
19
|
+
## v0.1.1
|
20
|
+
- Add required user confirmation between node restarts
|
21
|
+
- Add parameterized sleep interval between stabilization checks
|
22
|
+
- Guarantee the current master is restarted last
|
23
|
+
- Add wait for node availability before re-enabling route allocation
|
24
|
+
|
25
|
+
## v0.1.0
|
26
|
+
- Initial release of elasticsearch manager
|
27
|
+
- Supported rolling-restart of cluster and printing simple cluster status (green/yellow/red)
|
data/bin/elasticsearch-manager
CHANGED
@@ -17,13 +17,13 @@ options = {}
|
|
17
17
|
opt_parser = OptionParser.new do |opts|
|
18
18
|
opts.banner = <<EOC
|
19
19
|
elasticsearch-manager [options] <command>
|
20
|
-
|
20
|
+
|
21
21
|
Available commands:
|
22
22
|
rolling-restart -- Restart elasticsearch across the entire cluster, one node at a time
|
23
23
|
list-nodes -- List IPs of nodes in the cluster
|
24
24
|
shard-state -- Print current shard states
|
25
25
|
status -- Check the current state of the cluster (green/yellow/red)
|
26
|
-
|
26
|
+
disable-routing -- Disable shard routing allocation
|
27
27
|
enable-routing -- Enable shard routing allocation
|
28
28
|
|
29
29
|
EOC
|
@@ -39,14 +39,24 @@ EOC
|
|
39
39
|
end
|
40
40
|
|
41
41
|
options[:timeout] = 600
|
42
|
-
opts.on('-t TIMEOUT', '--timeout TIMEOUT', 'Timeout for waiting for cluster to stabilize during rolling-restart [default: 600]') do |
|
43
|
-
options[:port] =
|
42
|
+
opts.on('-t TIMEOUT', '--timeout TIMEOUT', 'Timeout for waiting for cluster to stabilize during rolling-restart [default: 600]') do |t|
|
43
|
+
options[:port] = t
|
44
|
+
end
|
45
|
+
|
46
|
+
options[:sleep_interval] = 30
|
47
|
+
opts.on('-s SLEEP', '--sleep SLEEP', 'Sleep interval between cluster stabilize/node availability checks during rolling-restart [default: 30]') do |s|
|
48
|
+
options[:port] = s
|
44
49
|
end
|
45
50
|
|
46
51
|
options[:verbose] = false
|
47
52
|
opts.on('-v', '--verbose', 'Print verbose messaging') do |v|
|
48
53
|
options[:verbose] = v
|
49
54
|
end
|
55
|
+
|
56
|
+
options[:assume_yes] = false
|
57
|
+
opts.on('-y', '--yes', 'Assume Yes to all queries and do not prompt') do |y|
|
58
|
+
options[:assume_yes] = y
|
59
|
+
end
|
50
60
|
end
|
51
61
|
|
52
62
|
opt_parser.parse!
|
@@ -31,9 +31,10 @@ module Elasticsearch
|
|
31
31
|
|
32
32
|
timeout = opts[:timeout] || 600
|
33
33
|
sleep_interval = opts[:sleep_interval] || 30
|
34
|
+
assume_yes = opts[:assume_yes].nil? ? false : opts[:assume_yes]
|
34
35
|
|
35
36
|
begin
|
36
|
-
manager.rolling_restart(timeout, sleep_interval)
|
37
|
+
manager.rolling_restart(timeout, sleep_interval, assume_yes)
|
37
38
|
rescue Elasticsearch::Manager::ApiError => e
|
38
39
|
puts e
|
39
40
|
return 3
|
@@ -11,17 +11,17 @@ module Elasticsearch
|
|
11
11
|
module Manager
|
12
12
|
|
13
13
|
class ESManager
|
14
|
-
def rolling_restart(timeout = 600, sleep_interval = 30)
|
14
|
+
def rolling_restart(timeout = 600, sleep_interval = 30, assume_yes = false)
|
15
15
|
highline = HighLine.new
|
16
16
|
@members.each do |m|
|
17
17
|
unless m == @leader
|
18
|
-
unless highline.agree('Continue with rolling restart of cluster? (y/n) ')
|
18
|
+
unless assume_yes || highline.agree('Continue with rolling restart of cluster? (y/n) ')
|
19
19
|
raise UserRequestedStop, "Stopping rolling restart at user request!".colorize(:red)
|
20
20
|
end
|
21
21
|
restart_node(m, timeout, sleep_interval)
|
22
22
|
end
|
23
23
|
end
|
24
|
-
unless highline.agree("\nRestarting current cluster master, continue? (y/n) ")
|
24
|
+
unless assume_yes || highline.agree("\nRestarting current cluster master, continue? (y/n) ")
|
25
25
|
raise UserRequestedStop, "Stopping rolling restart at user request before restarting master node!".colorize(:red)
|
26
26
|
end
|
27
27
|
restart_node(@leader, timeout, sleep_interval)
|
@@ -30,7 +30,7 @@ module Elasticsearch
|
|
30
30
|
def restart_node(node_ip, timeout, sleep_interval)
|
31
31
|
puts "\nRestarting Elasticsearch on node: #{node_ip}"
|
32
32
|
# Pull the current node's state
|
33
|
-
n = @state.nodes.select { |
|
33
|
+
n = @state.nodes.select { |i| i.ip == node_ip }[0]
|
34
34
|
|
35
35
|
raise ClusterSettingsUpdateError, "Could not disable shard routing prior to restarting node: #{node_ip}".colorize(:red) unless disable_routing
|
36
36
|
|
data/spec/cmd_spec.rb
CHANGED
@@ -40,6 +40,27 @@ describe 'Elasticsearch::Manager::CMD' '#rolling_restart' do
|
|
40
40
|
expect(exit_code).to eql(0)
|
41
41
|
end
|
42
42
|
|
43
|
+
it 'does a clean restart with assume yes' do
|
44
|
+
expect(Net::SSH).to receive(:start).with('10.110.40.133', ENV['USER']).ordered
|
45
|
+
expect(Net::SSH).to receive(:start).with('10.110.33.218', ENV['USER']).ordered
|
46
|
+
expect(Net::SSH).to receive(:start).with('10.110.38.153', ENV['USER']).ordered
|
47
|
+
|
48
|
+
allow(ssh_connection).to receive(:exec) do |arg|
|
49
|
+
expect(arg).to eql('sudo service elasticsearch restart')
|
50
|
+
end
|
51
|
+
expect(ssh_connection).to receive(:exec).exactly(3).times
|
52
|
+
|
53
|
+
@input << ""
|
54
|
+
@input.rewind
|
55
|
+
|
56
|
+
exit_code = -1
|
57
|
+
output = capture_stdout do
|
58
|
+
opts = { :hostname => 'localhost', :port => '9200', :sleep_interval => 1, :assume_yes => true }
|
59
|
+
exit_code = CMD.rolling_restart(opts)
|
60
|
+
end
|
61
|
+
expect(exit_code).to eql(0)
|
62
|
+
end
|
63
|
+
|
43
64
|
it 'throws stabilization timeout' do
|
44
65
|
allow(ssh_connection).to receive(:exec) do |arg|
|
45
66
|
expect(arg).to eql('sudo service elasticsearch restart')
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: elasticsearch-manager
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.3.pre
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Brian Oldfield
|
@@ -193,7 +193,7 @@ files:
|
|
193
193
|
- Gemfile.lock
|
194
194
|
- LICENSE
|
195
195
|
- README.md
|
196
|
-
-
|
196
|
+
- RELEASES.md
|
197
197
|
- bin/elasticsearch-manager
|
198
198
|
- elasticsearch-manager.gemspec
|
199
199
|
- lib/elasticsearch/client.rb
|
@@ -241,12 +241,12 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
241
241
|
version: '0'
|
242
242
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
243
243
|
requirements:
|
244
|
-
- - '
|
244
|
+
- - '>'
|
245
245
|
- !ruby/object:Gem::Version
|
246
|
-
version:
|
246
|
+
version: 1.3.1
|
247
247
|
requirements: []
|
248
248
|
rubyforge_project:
|
249
|
-
rubygems_version: 2.4.
|
249
|
+
rubygems_version: 2.4.2
|
250
250
|
signing_key:
|
251
251
|
specification_version: 4
|
252
252
|
summary: Basic managment utility for Elasticsearch
|
@@ -265,3 +265,4 @@ test_files:
|
|
265
265
|
- spec/fixtures/state.json
|
266
266
|
- spec/manager_spec.rb
|
267
267
|
- spec/spec_helper.rb
|
268
|
+
has_rdoc:
|
data/REALEASES.md
DELETED
File without changes
|