elasticsearch-manager 0.1.2.pre.2 → 0.1.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/lib/elasticsearch/client/base.rb +3 -5
- data/lib/elasticsearch/manager/cmd.rb +19 -5
- data/lib/elasticsearch/manager/errors.rb +6 -0
- data/lib/elasticsearch/manager/rollingrestart.rb +4 -3
- data/lib/elasticsearch/manager/version.rb +1 -1
- data/spec/cmd_spec.rb +65 -0
- data/spec/manager_spec.rb +47 -0
- data/spec/spec_helper.rb +65 -9
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 663a444a1cd91b9cd92f1e0ac98244f2b7fb21a3
|
4
|
+
data.tar.gz: 9b2d5f1716c05bd61fed68a0ac22086924532eb9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1febf08eeceb2bc8de64f498aca26d5c29802ffad686da422c203da816304d71dccbbe328088b609b411b0e37ad421d1de9cad8ffa68c5b8511b854180cab0a7
|
7
|
+
data.tar.gz: 09d7e9f77f471e77fb7cbc2de7a5d48ec2c44c072c423dc1c6ddde5b71186f7421454702455e201634983230f589a02c6fa529510d279870f48c6cf752bbd6fe
|
data/Gemfile.lock
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
require 'logger'
|
2
2
|
require 'rest-client'
|
3
|
+
require 'elasticsearch/manager/errors'
|
3
4
|
|
4
5
|
module Elasticsearch
|
5
6
|
module Client
|
@@ -35,9 +36,7 @@ module Elasticsearch
|
|
35
36
|
begin
|
36
37
|
return RestClient.get url, opts
|
37
38
|
rescue Exception => e
|
38
|
-
|
39
|
-
raise e
|
40
|
-
raise IOError.new "Unable to complete get request: #{e}"
|
39
|
+
raise Elasticsearch::Manager::ApiError.new "Unable to complete get request: #{e}"
|
41
40
|
end
|
42
41
|
end
|
43
42
|
|
@@ -48,8 +47,7 @@ module Elasticsearch
|
|
48
47
|
begin
|
49
48
|
return RestClient.put url, body, opts
|
50
49
|
rescue Exception => e
|
51
|
-
|
52
|
-
raise IOError.new "Unable to complete put request: #{e}"
|
50
|
+
raise Elasticsearch::Manager::ApiError.new "Unable to complete put request: #{e}"
|
53
51
|
end
|
54
52
|
end
|
55
53
|
|
@@ -12,17 +12,31 @@ module Elasticsearch
|
|
12
12
|
def self.rolling_restart(opts)
|
13
13
|
manager = _manager(opts)
|
14
14
|
# Check that the cluster is stable?
|
15
|
-
|
16
|
-
|
15
|
+
begin
|
16
|
+
unless manager.cluster_stable?
|
17
|
+
print_cluster_status(manager, 'The cluster is currently unstable! Not proceeding with rolling-restart')
|
18
|
+
return 2
|
19
|
+
end
|
20
|
+
|
21
|
+
print "Discovering cluster members..." if opts[:verbose]
|
22
|
+
manager.cluster_members!
|
23
|
+
print "\rDiscovering cluster members... Done!\n" if opts[:verbose]
|
24
|
+
rescue Elasticsearch::Manager::ApiError => e
|
25
|
+
puts e
|
26
|
+
return 3
|
27
|
+
rescue Exception => e
|
28
|
+
puts e
|
17
29
|
return 2
|
18
30
|
end
|
19
|
-
|
20
|
-
manager.cluster_members!
|
21
|
-
print "\rDiscovering cluster members... Done!\n" if opts[:verbose]
|
31
|
+
|
22
32
|
timeout = opts[:timeout] || 600
|
23
33
|
sleep_interval = opts[:sleep_interval] || 30
|
34
|
+
|
24
35
|
begin
|
25
36
|
manager.rolling_restart(timeout, sleep_interval)
|
37
|
+
rescue Elasticsearch::Manager::ApiError => e
|
38
|
+
puts e
|
39
|
+
return 3
|
26
40
|
rescue Exception => e
|
27
41
|
puts e
|
28
42
|
return 2
|
@@ -32,7 +32,8 @@ module Elasticsearch
|
|
32
32
|
# Pull the current node's state
|
33
33
|
n = @state.nodes.select { |n| n.ip == node_ip }[0]
|
34
34
|
|
35
|
-
raise "Could not disable shard routing prior to restarting node: #{node_ip}".colorize(:red) unless disable_routing
|
35
|
+
raise ClusterSettingsUpdateError, "Could not disable shard routing prior to restarting node: #{node_ip}".colorize(:red) unless disable_routing
|
36
|
+
|
36
37
|
Net::SSH.start(node_ip, ENV['USER']) do |ssh|
|
37
38
|
ssh.exec 'sudo service elasticsearch restart'
|
38
39
|
end
|
@@ -47,9 +48,9 @@ module Elasticsearch
|
|
47
48
|
|
48
49
|
# Make sure the cluster is willing to concurrently recover as many
|
49
50
|
# shards per node as this node happens to have.
|
50
|
-
raise "Could not update node_concurrent_recoveries prior to restarting node: #{node_ip}".colorize(:red) unless set_concurrent_recoveries(n.count_started_shards + 1)
|
51
|
+
raise ClusterSettingsUpdateError, "Could not update node_concurrent_recoveries prior to restarting node: #{node_ip}".colorize(:red) unless set_concurrent_recoveries(n.count_started_shards + 1)
|
51
52
|
|
52
|
-
raise "Could not re-enable shard routing following restart of node: #{node_ip}".colorize(:red) unless enable_routing
|
53
|
+
raise ClusterSettingsUpdateError, "Could not re-enable shard routing following restart of node: #{node_ip}".colorize(:red) unless enable_routing
|
53
54
|
|
54
55
|
begin
|
55
56
|
wait_for_stable(timeout, sleep_interval)
|
data/spec/cmd_spec.rb
CHANGED
@@ -119,5 +119,70 @@ describe 'Elasticsearch::Manager::CMD' '#rolling_restart' do
|
|
119
119
|
end
|
120
120
|
expect(exit_code).to eql(2)
|
121
121
|
end
|
122
|
+
|
123
|
+
it 'throws settings update error when disabling routing' do
|
124
|
+
opts = {:hostname => 'localhost-disable-routing-error', :port => '9200'}
|
125
|
+
|
126
|
+
@input << "y\n"
|
127
|
+
@input.rewind
|
128
|
+
|
129
|
+
exit_code = -1
|
130
|
+
output = capture_stdout do
|
131
|
+
exit_code = CMD.rolling_restart(opts)
|
132
|
+
end
|
133
|
+
expect(exit_code).to eql(2)
|
134
|
+
end
|
135
|
+
|
136
|
+
it 'throws settings update error when updating recovery concurrency' do
|
137
|
+
opts = {:hostname => 'localhost-update-concurrent-error', :port => '9200'}
|
138
|
+
|
139
|
+
@input << "y\n"
|
140
|
+
@input.rewind
|
141
|
+
|
142
|
+
exit_code = -1
|
143
|
+
output = capture_stdout do
|
144
|
+
exit_code = CMD.rolling_restart(opts)
|
145
|
+
end
|
146
|
+
expect(exit_code).to eql(2)
|
147
|
+
end
|
148
|
+
|
149
|
+
it 'handles server errors on settings update' do
|
150
|
+
opts = {:hostname => 'localhost-error-settings', :port => '9200'}
|
151
|
+
|
152
|
+
@input << "y\n"
|
153
|
+
@input.rewind
|
154
|
+
|
155
|
+
exit_code = -1
|
156
|
+
output = capture_stdout do
|
157
|
+
exit_code = CMD.rolling_restart(opts)
|
158
|
+
end
|
159
|
+
expect(exit_code).to eql(3)
|
160
|
+
end
|
161
|
+
|
162
|
+
it 'handles server errors on state request' do
|
163
|
+
opts = {:hostname => 'localhost-error-state', :port => '9200'}
|
164
|
+
|
165
|
+
@input << "y\n"
|
166
|
+
@input.rewind
|
167
|
+
|
168
|
+
exit_code = -1
|
169
|
+
output = capture_stdout do
|
170
|
+
exit_code = CMD.rolling_restart(opts)
|
171
|
+
end
|
172
|
+
expect(exit_code).to eql(3)
|
173
|
+
end
|
174
|
+
|
175
|
+
it 'handles server errors on health request' do
|
176
|
+
opts = {:hostname => 'localhost-error-health', :port => '9200'}
|
177
|
+
|
178
|
+
@input << "y\n"
|
179
|
+
@input.rewind
|
180
|
+
|
181
|
+
exit_code = -1
|
182
|
+
output = capture_stdout do
|
183
|
+
exit_code = CMD.rolling_restart(opts)
|
184
|
+
end
|
185
|
+
expect(exit_code).to eql(3)
|
186
|
+
end
|
122
187
|
end
|
123
188
|
end
|
data/spec/manager_spec.rb
CHANGED
@@ -230,5 +230,52 @@ describe 'Elasticsearch::Manager::ESManager' 'routing' do
|
|
230
230
|
expect { manager.rolling_restart(2, 1) }.to raise_error(Elasticsearch::Manager::UserRequestedStop)
|
231
231
|
end
|
232
232
|
end
|
233
|
+
|
234
|
+
it 'throws settings update error when disabling routing' do
|
235
|
+
manager = ESManager.new('localhost-disable-routing-error', 9200)
|
236
|
+
manager.cluster_members!
|
237
|
+
opts = {:hostname => 'localhost', :port => '9200'}
|
238
|
+
|
239
|
+
@input << "y\n"
|
240
|
+
@input.rewind
|
241
|
+
|
242
|
+
output = capture_stdout do
|
243
|
+
expect { manager.rolling_restart(2, 1) }.to raise_error(Elasticsearch::Manager::ClusterSettingsUpdateError)
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
it 'throws settings update error when updating recovery concurrency' do
|
248
|
+
manager = ESManager.new('localhost-update-concurrent-error', 9200)
|
249
|
+
manager.cluster_members!
|
250
|
+
opts = {:hostname => 'localhost', :port => '9200'}
|
251
|
+
|
252
|
+
@input << "y\n"
|
253
|
+
@input.rewind
|
254
|
+
|
255
|
+
output = capture_stdout do
|
256
|
+
expect { manager.rolling_restart(2, 1) }.to raise_error(Elasticsearch::Manager::ClusterSettingsUpdateError)
|
257
|
+
end
|
258
|
+
end
|
259
|
+
|
260
|
+
it 'handles server errors on settings update' do
|
261
|
+
manager = ESManager.new('localhost-error-settings', 9200)
|
262
|
+
manager.cluster_members!
|
263
|
+
opts = {:hostname => 'localhost', :port => '9200'}
|
264
|
+
|
265
|
+
@input << "y\n"
|
266
|
+
@input.rewind
|
267
|
+
|
268
|
+
output = capture_stdout do
|
269
|
+
expect { manager.rolling_restart(2, 1) }.to raise_error(Elasticsearch::Manager::ApiError)
|
270
|
+
end
|
271
|
+
end
|
272
|
+
|
273
|
+
it 'handles server errors on state request' do
|
274
|
+
manager = ESManager.new('localhost-error-state', 9200)
|
275
|
+
|
276
|
+
output = capture_stdout do
|
277
|
+
expect { manager.cluster_members! }.to raise_error(Elasticsearch::Manager::ApiError)
|
278
|
+
end
|
279
|
+
end
|
233
280
|
end
|
234
281
|
end
|
data/spec/spec_helper.rb
CHANGED
@@ -22,12 +22,14 @@ require 'rack'
|
|
22
22
|
WebMock.disable_net_connect!(allow_localhost: false)
|
23
23
|
DIR = File.expand_path(File.dirname(__FILE__))
|
24
24
|
|
25
|
-
class
|
26
|
-
def initialize(state_success_count = 10)
|
25
|
+
class EsApiRack
|
26
|
+
def initialize(state_success_count = 10, concurrency_update_fail = false, routing_update_faile = false)
|
27
27
|
@health_call_count = 0
|
28
28
|
@state_call_count = 0
|
29
29
|
|
30
30
|
@state_success_count = state_success_count
|
31
|
+
@concurrency_update_fail = concurrency_update_fail
|
32
|
+
@routing_update_faile = routing_update_faile
|
31
33
|
end
|
32
34
|
|
33
35
|
def call(env)
|
@@ -44,6 +46,17 @@ class RestartTimeoutRack
|
|
44
46
|
inp = env['rack.input'].read
|
45
47
|
at = inp[/\.([\w_]+)":/,1]
|
46
48
|
val = inp[/allocation.*":["]?([^"]+)["]?}}/,1]
|
49
|
+
if @routing_update_faile
|
50
|
+
case val
|
51
|
+
when 'all'
|
52
|
+
val = 'none'
|
53
|
+
when 'none'
|
54
|
+
val = 'all'
|
55
|
+
end
|
56
|
+
elsif @concurrency_update_fail
|
57
|
+
val = '2'
|
58
|
+
end
|
59
|
+
|
47
60
|
ret = "{\"transient\":{\"cluster\":{\"routing\":{\"allocation\":{\"#{at}\":\"#{val}\"}}}}}"
|
48
61
|
when '/_cluster/state'
|
49
62
|
if @state_call_count < @state_success_count
|
@@ -57,6 +70,34 @@ class RestartTimeoutRack
|
|
57
70
|
end
|
58
71
|
end
|
59
72
|
|
73
|
+
class EsApiErrorRack < EsApiRack
|
74
|
+
def initialize(err_health = false, err_settings = false, err_state = false)
|
75
|
+
super(10, false, false)
|
76
|
+
@err_settings = err_settings
|
77
|
+
@err_health = err_health
|
78
|
+
@err_state = err_state
|
79
|
+
end
|
80
|
+
|
81
|
+
def call(env)
|
82
|
+
ret = super(env)
|
83
|
+
case env['PATH_INFO']
|
84
|
+
when '/_cluster/health'
|
85
|
+
if @err_health
|
86
|
+
ret = [500, ret[1], ret[2]]
|
87
|
+
end
|
88
|
+
when '/_cluster/settings'
|
89
|
+
if @err_settings
|
90
|
+
ret = [500, ret[1], ret[2]]
|
91
|
+
end
|
92
|
+
when '/_cluster/state'
|
93
|
+
if @err_state
|
94
|
+
ret = [500, ret[1], ret[2]]
|
95
|
+
end
|
96
|
+
end
|
97
|
+
ret
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
60
101
|
RSpec.configure do |config|
|
61
102
|
# rspec-expectations config goes here. You can use an alternate
|
62
103
|
# assertion/expectation library such as wrong or the stdlib/minitest
|
@@ -190,22 +231,37 @@ RSpec.configure do |config|
|
|
190
231
|
headers: {'Content-Type' => 'application/json'})
|
191
232
|
|
192
233
|
stub_request(:put, /localhost:9200\/_cluster\/settings/).
|
193
|
-
to_rack(
|
234
|
+
to_rack(EsApiRack.new(15))
|
194
235
|
|
195
236
|
stub_request(:any, /localhost-restart-timeout:9200\//).
|
196
|
-
to_rack(
|
237
|
+
to_rack(EsApiRack.new)
|
197
238
|
stub_request(:any, /localhost-cmd-restart-timeout:9200\//).
|
198
|
-
to_rack(
|
239
|
+
to_rack(EsApiRack.new)
|
199
240
|
|
200
241
|
stub_request(:any, /localhost-restart-stabilization:9200\//).
|
201
|
-
to_rack(
|
242
|
+
to_rack(EsApiRack.new)
|
202
243
|
stub_request(:any, /localhost-cmd-restart-stabilization:9200\//).
|
203
|
-
to_rack(
|
244
|
+
to_rack(EsApiRack.new)
|
204
245
|
|
205
246
|
stub_request(:any, /localhost-restart-not-available:9200\//).
|
206
|
-
to_rack(
|
247
|
+
to_rack(EsApiRack.new(2))
|
207
248
|
stub_request(:any, /localhost-cmd-restart-not-available:9200\//).
|
208
|
-
to_rack(
|
249
|
+
to_rack(EsApiRack.new(1))
|
250
|
+
|
251
|
+
stub_request(:any, /localhost-disable-routing-error:9200\//).
|
252
|
+
to_rack(EsApiRack.new(2, false, true))
|
253
|
+
|
254
|
+
stub_request(:any, /localhost-update-concurrent-error:9200\//).
|
255
|
+
to_rack(EsApiRack.new(2, true, false))
|
256
|
+
|
257
|
+
stub_request(:any, /localhost-error-health:9200\//).
|
258
|
+
to_rack(EsApiErrorRack.new(true, false, false))
|
259
|
+
|
260
|
+
stub_request(:any, /localhost-error-settings:9200\//).
|
261
|
+
to_rack(EsApiErrorRack.new(false, true, false))
|
262
|
+
|
263
|
+
stub_request(:any, /localhost-error-state:9200\//).
|
264
|
+
to_rack(EsApiErrorRack.new(false, false, true))
|
209
265
|
end
|
210
266
|
end
|
211
267
|
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: elasticsearch-manager
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.2
|
4
|
+
version: 0.1.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Brian Oldfield
|
@@ -241,9 +241,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
241
241
|
version: '0'
|
242
242
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
243
243
|
requirements:
|
244
|
-
- - '
|
244
|
+
- - '>='
|
245
245
|
- !ruby/object:Gem::Version
|
246
|
-
version:
|
246
|
+
version: '0'
|
247
247
|
requirements: []
|
248
248
|
rubyforge_project:
|
249
249
|
rubygems_version: 2.4.6
|