elasticsearch-extensions 0.0.18 → 0.0.19
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +19 -17
- data/lib/elasticsearch/extensions/test/cluster.rb +72 -34
- data/lib/elasticsearch/extensions/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8b6250368c4ae4b601347a41c4af300c9c539b71
|
4
|
+
data.tar.gz: 809f7bc9ed9ddf45c3dffda512a5c9fe4a2c8a0f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ecb428c94a77ad8578141d5b2d8a4806942e900958539999b9d3bae21549eba985296f1869a7652a6a4ca9293a27b06c4f20810d1568e97a3f2e1314aa3f31b5
|
7
|
+
data.tar.gz: 1ea390d043992f5ee8346d9bb1a78b1f4a527e54a7f0d076cffc22d7161005128c959384fee8126ce32d4b5a676e7d4475de5c47394fa494911ceac2a6b3ab23
|
data/README.md
CHANGED
@@ -3,6 +3,24 @@
|
|
3
3
|
This library provides a set of extensions to the
|
4
4
|
[`elasticsearch`](https://github.com/elasticsearch/elasticsearch-ruby) Rubygem.
|
5
5
|
|
6
|
+
## Installation
|
7
|
+
|
8
|
+
Install the package from [Rubygems](https://rubygems.org):
|
9
|
+
|
10
|
+
gem install elasticsearch-extensions
|
11
|
+
|
12
|
+
To use an unreleased version, either add it to your `Gemfile` for [Bundler](http://gembundler.com):
|
13
|
+
|
14
|
+
gem 'elasticsearch-extensions', git: 'git://github.com/elasticsearch/elasticsearch-ruby.git'
|
15
|
+
|
16
|
+
or install it from a source code checkout:
|
17
|
+
|
18
|
+
git clone https://github.com/elasticsearch/elasticsearch-ruby.git
|
19
|
+
cd elasticsearch-ruby/elasticsearch-extensions
|
20
|
+
bundle install
|
21
|
+
rake install
|
22
|
+
|
23
|
+
|
6
24
|
## Extensions
|
7
25
|
|
8
26
|
### ANSI
|
@@ -69,6 +87,7 @@ You can control the cluster configuration with environment variables as well:
|
|
69
87
|
TEST_CLUSTER_COMMAND=/usr/local/Cellar/elasticsearch/0.90.10/bin/elasticsearch \
|
70
88
|
TEST_CLUSTER_PORT=9350 \
|
71
89
|
TEST_CLUSTER_NODES=3 \
|
90
|
+
TEST_CLUSTER_NAME=my_testing_cluster \
|
72
91
|
ruby -r elasticsearch -e "require 'elasticsearch/extensions/test/cluster'; Elasticsearch::Extensions::Test::Cluster.start"
|
73
92
|
|
74
93
|
[Full documentation](http://rubydoc.info/gems/elasticsearch-extensions/Elasticsearch/Extensions/Test/Cluster)
|
@@ -144,23 +163,6 @@ When omitted, the full code profile by [RubyProf](https://github.com/ruby-prof/r
|
|
144
163
|
[Example in the Elasticsearch gem](https://github.com/elasticsearch/elasticsearch-ruby/blob/master/elasticsearch-transport/test/profile/client_benchmark_test.rb)
|
145
164
|
|
146
165
|
|
147
|
-
## Installation
|
148
|
-
|
149
|
-
Install the package from [Rubygems](https://rubygems.org):
|
150
|
-
|
151
|
-
gem install elasticsearch-extensions
|
152
|
-
|
153
|
-
To use an unreleased version, either add it to your `Gemfile` for [Bundler](http://gembundler.com):
|
154
|
-
|
155
|
-
gem 'elasticsearch-extensions', git: 'git://github.com/elasticsearch/elasticsearch-ruby.git'
|
156
|
-
|
157
|
-
or install it from a source code checkout:
|
158
|
-
|
159
|
-
git clone https://github.com/elasticsearch/elasticsearch-ruby.git
|
160
|
-
cd elasticsearch-ruby/elasticsearch-extensions
|
161
|
-
bundle install
|
162
|
-
rake install
|
163
|
-
|
164
166
|
## Development
|
165
167
|
|
166
168
|
To work on the code, clone and bootstrap the main repository first --
|
@@ -1,6 +1,7 @@
|
|
1
1
|
require 'timeout'
|
2
2
|
require 'net/http'
|
3
3
|
require 'fileutils'
|
4
|
+
require 'socket'
|
4
5
|
require 'uri'
|
5
6
|
require 'json'
|
6
7
|
require 'ansi'
|
@@ -31,7 +32,9 @@ module Elasticsearch
|
|
31
32
|
# @see Cluster#stop Cluster.stop
|
32
33
|
#
|
33
34
|
module Cluster
|
35
|
+
@@network_host = ENV.fetch('TEST_CLUSTER_NETWORK_HOST', 'localhost')
|
34
36
|
@@number_of_nodes = (ENV['TEST_CLUSTER_NODES'] || 2).to_i
|
37
|
+
@@default_cluster_name = "elasticsearch-test-#{Socket.gethostname.downcase}"
|
35
38
|
|
36
39
|
# Starts a cluster
|
37
40
|
#
|
@@ -40,11 +43,18 @@ module Elasticsearch
|
|
40
43
|
#
|
41
44
|
# Use the {Cluster#stop Cluster.stop} command with the same arguments to stop this cluster.
|
42
45
|
#
|
43
|
-
# @option arguments [String] :
|
44
|
-
# @option arguments [Integer] :nodes Number of desired nodes (default: 2)
|
45
|
-
# @option arguments [String] :
|
46
|
-
# @option arguments [String] :port Starting port number; will be auto-incremented (default: 9250)
|
47
|
-
# @option arguments [
|
46
|
+
# @option arguments [String] :cluster_name Cluster name (default: `elasticsearch_test`)
|
47
|
+
# @option arguments [Integer] :nodes Number of desired nodes (default: 2)
|
48
|
+
# @option arguments [String] :command Elasticsearch command (default: `elasticsearch`)
|
49
|
+
# @option arguments [String] :port Starting port number; will be auto-incremented (default: 9250)
|
50
|
+
# @option arguments [String] :node_name The node name (will be appended with a number)
|
51
|
+
# @option arguments [String] :path_data Path to the directory to store data in
|
52
|
+
# @option arguments [String] :path_work Path to the directory with auxiliary files
|
53
|
+
# @option arguments [String] :path_logs Path to the directory with log files
|
54
|
+
# @option arguments [Boolean] :multicast_enabled Whether multicast is enabled (default: true)
|
55
|
+
# @option arguments [Integer] :timeout Timeout when starting the cluster (default: 30)
|
56
|
+
# @option arguments [String] :network_host The host that nodes will bind on and publish to
|
57
|
+
# @option arguments [Boolean] :clear Wipe out cluster content on startup (default: true)
|
48
58
|
#
|
49
59
|
# You can also use environment variables to set these options.
|
50
60
|
#
|
@@ -66,16 +76,20 @@ module Elasticsearch
|
|
66
76
|
# @see Cluster#stop Cluster.stop
|
67
77
|
#
|
68
78
|
def start(arguments={})
|
69
|
-
@@number_of_nodes = (ENV
|
70
|
-
|
71
|
-
arguments[:command]
|
72
|
-
arguments[:port]
|
73
|
-
arguments[:cluster_name]
|
74
|
-
arguments[:
|
75
|
-
arguments[:
|
76
|
-
arguments[:path_work]
|
77
|
-
arguments[:
|
78
|
-
arguments[:
|
79
|
+
@@number_of_nodes = ( ENV.fetch('TEST_CLUSTER_NODES', arguments[:nodes] || 2) ).to_i
|
80
|
+
|
81
|
+
arguments[:command] ||= ENV.fetch('TEST_CLUSTER_COMMAND', 'elasticsearch')
|
82
|
+
arguments[:port] ||= (ENV.fetch('TEST_CLUSTER_PORT', 9250).to_i)
|
83
|
+
arguments[:cluster_name] ||= (ENV.fetch('TEST_CLUSTER_NAME', @@default_cluster_name).chomp)
|
84
|
+
arguments[:node_name] ||= ENV.fetch('TEST_CLUSTER_NODE_NAME', 'node')
|
85
|
+
arguments[:path_data] ||= ENV.fetch('TEST_CLUSTER_DATA', '/tmp/elasticsearch_test')
|
86
|
+
arguments[:path_work] ||= ENV.fetch('TEST_CLUSTER_TMP', '/tmp')
|
87
|
+
arguments[:path_logs] ||= ENV.fetch('TEST_CLUSTER_LOGS', '/var/log/elasticsearch')
|
88
|
+
arguments[:es_params] ||= ENV.fetch('TEST_CLUSTER_PARAMS', '')
|
89
|
+
arguments[:multicast_enabled] ||= ENV.fetch('TEST_CLUSTER_MULTICAST', 'true')
|
90
|
+
arguments[:timeout] ||= (ENV.fetch('TEST_CLUSTER_TIMEOUT', 30).to_i)
|
91
|
+
arguments[:network_host] ||= @@network_host
|
92
|
+
arguments[:clear] ||= true
|
79
93
|
|
80
94
|
# Make sure `cluster_name` is not dangerous
|
81
95
|
if arguments[:cluster_name] =~ /^[\/\\]?$/
|
@@ -88,8 +102,8 @@ module Elasticsearch
|
|
88
102
|
return false
|
89
103
|
end
|
90
104
|
|
91
|
-
# Wipe out data for this cluster name
|
92
|
-
FileUtils.rm_rf "#{arguments[:path_data]}/#{arguments[:cluster_name]}"
|
105
|
+
# Wipe out data for this cluster name if requested
|
106
|
+
FileUtils.rm_rf "#{arguments[:path_data]}/#{arguments[:cluster_name]}" if arguments[:clear]
|
93
107
|
|
94
108
|
print "Starting ".ansi(:faint) +
|
95
109
|
@@number_of_nodes.to_s.ansi(:bold, :faint) +
|
@@ -99,7 +113,7 @@ module Elasticsearch
|
|
99
113
|
|
100
114
|
@@number_of_nodes.times do |n|
|
101
115
|
n += 1
|
102
|
-
|
116
|
+
command = <<-COMMAND
|
103
117
|
#{arguments[:command]} \
|
104
118
|
-D es.foreground=yes \
|
105
119
|
-D es.cluster.name=#{arguments[:cluster_name]} \
|
@@ -107,16 +121,24 @@ module Elasticsearch
|
|
107
121
|
-D es.http.port=#{arguments[:port].to_i + (n-1)} \
|
108
122
|
-D es.path.data=#{arguments[:path_data]} \
|
109
123
|
-D es.path.work=#{arguments[:path_work]} \
|
124
|
+
-D es.path.logs=#{arguments[:path_logs]} \
|
110
125
|
-D es.cluster.routing.allocation.disk.threshold_enabled=false \
|
111
|
-
-D es.network.host
|
112
|
-
-D es.discovery.zen.ping.multicast.enabled
|
113
|
-
-D es.script.
|
126
|
+
-D es.network.host=#{@@network_host} \
|
127
|
+
-D es.discovery.zen.ping.multicast.enabled=#{arguments[:multicast_enabled]} \
|
128
|
+
-D es.script.inline=on \
|
129
|
+
-D es.script.indexed=on \
|
114
130
|
-D es.node.test=true \
|
131
|
+
-D es.node.testattr=test \
|
115
132
|
-D es.node.bench=true \
|
133
|
+
-D es.path.repo=/tmp \
|
134
|
+
-D es.repositories.url.allowed_urls=http://snapshot.test* \
|
116
135
|
-D es.logger.level=DEBUG \
|
117
136
|
#{arguments[:es_params]} \
|
118
137
|
> /dev/null
|
119
138
|
COMMAND
|
139
|
+
STDERR.puts command.gsub(/ {1,}/, ' ') if ENV['DEBUG']
|
140
|
+
|
141
|
+
pid = Process.spawn(command)
|
120
142
|
Process.detach pid
|
121
143
|
pids << pid
|
122
144
|
end
|
@@ -146,9 +168,10 @@ module Elasticsearch
|
|
146
168
|
#
|
147
169
|
def stop(arguments={})
|
148
170
|
arguments[:port] ||= (ENV['TEST_CLUSTER_PORT'] || 9250).to_i
|
171
|
+
arguments[:network_host] ||= ENV.fetch('TEST_CLUSTER_NETWORK_HOST', @@network_host)
|
149
172
|
|
150
173
|
nodes = begin
|
151
|
-
JSON.parse(Net::HTTP.get(URI("http
|
174
|
+
JSON.parse(Net::HTTP.get(URI("http://#{arguments[:network_host]}:#{arguments[:port]}/_nodes/?process")))
|
152
175
|
rescue Exception => e
|
153
176
|
STDERR.puts "[!] Exception raised when stopping the cluster: #{e.inspect}".ansi(:red)
|
154
177
|
nil
|
@@ -161,10 +184,25 @@ module Elasticsearch
|
|
161
184
|
unless pids.empty?
|
162
185
|
print "\nStopping Elasticsearch nodes... ".ansi(:faint)
|
163
186
|
pids.each_with_index do |pid, i|
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
187
|
+
['INT','KILL'].each do |signal|
|
188
|
+
begin
|
189
|
+
Process.kill signal, pid
|
190
|
+
rescue Exception => e
|
191
|
+
print "[#{e.class}] PID #{pid} not found. ".ansi(:red)
|
192
|
+
end
|
193
|
+
|
194
|
+
# Give the system some breathing space to finish...
|
195
|
+
sleep 1
|
196
|
+
|
197
|
+
# Check that pid really is dead
|
198
|
+
begin
|
199
|
+
Process.getpgid( pid )
|
200
|
+
# `getpgid` will raise error if pid is dead, so if we get here, try next signal.
|
201
|
+
next
|
202
|
+
rescue Errno::ESRCH
|
203
|
+
print "stopped PID #{pid} with #{signal} signal. ".ansi(:green)
|
204
|
+
break # pid is dead
|
205
|
+
end
|
168
206
|
end
|
169
207
|
end
|
170
208
|
puts
|
@@ -184,7 +222,7 @@ module Elasticsearch
|
|
184
222
|
#
|
185
223
|
def running?(arguments={})
|
186
224
|
port = arguments[:on] || (ENV['TEST_CLUSTER_PORT'] || 9250).to_i
|
187
|
-
cluster_name = arguments[:as] ||
|
225
|
+
cluster_name = arguments[:as] || (ENV.fetch('TEST_CLUSTER_NAME', @@default_cluster_name).chomp)
|
188
226
|
|
189
227
|
if cluster_health = Timeout::timeout(0.25) { __get_cluster_health(port) } rescue nil
|
190
228
|
return cluster_health['cluster_name'] == cluster_name && \
|
@@ -219,18 +257,18 @@ module Elasticsearch
|
|
219
257
|
# @return Boolean
|
220
258
|
#
|
221
259
|
def __wait_for_status(status='green', port=9250, timeout=30)
|
222
|
-
uri = URI("http
|
260
|
+
uri = URI("http://#{@@network_host}:#{port}/_cluster/health?wait_for_status=#{status}")
|
223
261
|
|
224
262
|
Timeout::timeout(timeout) do
|
225
263
|
loop do
|
226
264
|
response = begin
|
227
265
|
JSON.parse(Net::HTTP.get(uri))
|
228
266
|
rescue Exception => e
|
229
|
-
puts e.inspect if ENV['DEBUG']
|
267
|
+
STDERR.puts e.inspect if ENV['DEBUG']
|
230
268
|
nil
|
231
269
|
end
|
232
270
|
|
233
|
-
puts response.inspect if ENV['DEBUG']
|
271
|
+
STDERR.puts response.inspect if response && ENV['DEBUG']
|
234
272
|
|
235
273
|
if response && response['status'] == status && ( @@number_of_nodes.nil? || @@number_of_nodes == response['number_of_nodes'].to_i )
|
236
274
|
__print_cluster_info(port) and break
|
@@ -249,9 +287,9 @@ module Elasticsearch
|
|
249
287
|
# @api private
|
250
288
|
#
|
251
289
|
def __print_cluster_info(port)
|
252
|
-
health = JSON.parse(Net::HTTP.get(URI("http
|
253
|
-
nodes = JSON.parse(Net::HTTP.get(URI("http
|
254
|
-
master = JSON.parse(Net::HTTP.get(URI("http
|
290
|
+
health = JSON.parse(Net::HTTP.get(URI("http://#{@@network_host}:#{port}/_cluster/health")))
|
291
|
+
nodes = JSON.parse(Net::HTTP.get(URI("http://#{@@network_host}:#{port}/_nodes/process,http")))
|
292
|
+
master = JSON.parse(Net::HTTP.get(URI("http://#{@@network_host}:#{port}/_cluster/state")))['master_node']
|
255
293
|
|
256
294
|
puts "\n",
|
257
295
|
('-'*80).ansi(:faint),
|
@@ -275,7 +313,7 @@ module Elasticsearch
|
|
275
313
|
# @api private
|
276
314
|
#
|
277
315
|
def __get_cluster_health(port=9250)
|
278
|
-
uri = URI("http
|
316
|
+
uri = URI("http://#{@@network_host}:#{port}/_cluster/health")
|
279
317
|
if response = Net::HTTP.get(uri) rescue nil
|
280
318
|
return JSON.parse(response)
|
281
319
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: elasticsearch-extensions
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.19
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Karel Minarik
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-
|
11
|
+
date: 2015-10-14 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: ansi
|