elasticsearch-transport 0.4.1 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/README.md CHANGED
@@ -82,11 +82,11 @@ Instead of Strings, you can pass host information as an array of Hashes:
82
82
 
83
83
  Scheme, HTTP authentication credentials and URL prefixes are handled automatically:
84
84
 
85
- Elasticsearch::Client.new url: 'https://myserver:4430/search'
85
+ Elasticsearch::Client.new url: 'https://username:password@api.server.org:4430/search'
86
86
 
87
87
  ### Logging
88
88
 
89
- To log requests and responses to standard output with the default logger (an instance of Ruby's {::Logger}) class):
89
+ To log requests and responses to standard output with the default logger (an instance of Ruby's {::Logger} class):
90
90
 
91
91
  Elasticsearch::Client.new log: true
92
92
 
@@ -169,7 +169,7 @@ and only when these are not be available, will use the rest:
169
169
  include Elasticsearch::Transport::Transport::Connections::Selector::Base
170
170
 
171
171
  def select(options={})
172
- connections.reject do |c|
172
+ connections.select do |c|
173
173
  # Try selecting the nodes with a `rack_id:x1` attribute first
174
174
  c.host[:attributes] && c.host[:attributes][:rack_id] == 'x1'
175
175
  end.sample || connections.to_a.sample
@@ -207,7 +207,9 @@ You can also use a [_Curb_](https://rubygems.org/gems/curb) based transport impl
207
207
 
208
208
  It's possible to customize the _Curb_ instance by passing a block to the constructor as well:
209
209
 
210
- configuration = lambda { |c| c.verbose = true }
210
+ configuration = lambda do |c|
211
+ c.verbose = true
212
+ end
211
213
 
212
214
  transport = Elasticsearch::Transport::Transport::HTTP::Curb.new \
213
215
  hosts: [ { host: 'localhost', port: '9200' } ],
@@ -217,14 +219,14 @@ It's possible to customize the _Curb_ instance by passing a block to the constru
217
219
 
218
220
  Instead of passing the transport to the constructor, you can inject it at run time:
219
221
 
220
- faraday_client = Elasticsearch::Transport::Transport::HTTP::Faraday.new \
221
- hosts: [ { host: '33.33.33.10', port: '443', user: 'USERNAME', password: 'PASSWORD', scheme: 'https' } ],
222
- & lambda { |f| f.instance_variable_set :@ssl, { verify: false }
223
- f.options[:ssl] = { verify: false }
224
- f.adapter :excon }
222
+ faraday_client = Elasticsearch::Transport::Transport::HTTP::Faraday.new \
223
+ hosts: [ { host: '33.33.33.10', port: '443', user: 'USERNAME', password: 'PASSWORD', scheme: 'https' } ],
224
+ & lambda { |f| f.instance_variable_set :@ssl, { verify: false }
225
+ f.options[:ssl] = { verify: false }
226
+ f.adapter :excon }
225
227
 
226
- client = Elasticsearch::Client.new
227
- client.transport = faraday_client
228
+ client = Elasticsearch::Client.new
229
+ client.transport = faraday_client
228
230
 
229
231
  You can write your own transport implementation easily, by including the
230
232
  {Elasticsearch::Transport::Transport::Base} module, implementing the required contract,
@@ -245,9 +247,9 @@ and passing it to the client as the `serializer_class` or `serializer` parameter
245
247
  For local development, clone the repository and run `bundle install`. See `rake -T` for a list of
246
248
  available Rake tasks for running tests, generating documentation, starting a testing cluster, etc.
247
249
 
248
- Bug fixes and features must be accompanying by unit tests. Integration tests are written in Ruby 1.9 syntax.
250
+ Bug fixes and features must be covered by unit tests. Integration tests are written in Ruby 1.9 syntax.
249
251
 
250
- Github's pull requests and issues are used to send code contributions and bug reports.
252
+ Github's pull requests and issues are used to communicate, send bug reports and code contributions.
251
253
 
252
254
  ## The Architecture
253
255
 
data/Rakefile CHANGED
@@ -49,17 +49,15 @@ namespace :test do
49
49
  desc "Start Elasticsearch nodes for tests"
50
50
  task :start do
51
51
  $LOAD_PATH << File.expand_path('../lib', __FILE__) << File.expand_path('../test', __FILE__)
52
- require 'elasticsearch/transport'
53
- require 'elasticsearch/transport/extensions/test_cluster'
54
- Elasticsearch::TestCluster.start
52
+ require 'elasticsearch/extensions/test/cluster'
53
+ Elasticsearch::Extensions::Test::Cluster.start
55
54
  end
56
55
 
57
56
  desc "Stop Elasticsearch nodes for tests"
58
57
  task :stop do
59
58
  $LOAD_PATH << File.expand_path('../lib', __FILE__) << File.expand_path('../test', __FILE__)
60
- require 'elasticsearch/transport'
61
- require 'elasticsearch/transport/extensions/test_cluster'
62
- Elasticsearch::TestCluster.stop
59
+ require 'elasticsearch/extensions/test/cluster'
60
+ Elasticsearch::Extensions::Test::Cluster.stop
63
61
  end
64
62
  end
65
63
  end
@@ -29,6 +29,8 @@ Gem::Specification.new do |s|
29
29
  s.add_development_dependency "bundler", "> 1"
30
30
  s.add_development_dependency "rake"
31
31
 
32
+ s.add_development_dependency "elasticsearch-extensions"
33
+
32
34
  s.add_development_dependency "ansi"
33
35
  s.add_development_dependency "shoulda-context"
34
36
  s.add_development_dependency "mocha"
@@ -40,7 +42,7 @@ Gem::Specification.new do |s|
40
42
 
41
43
  # Gems for testing integrations
42
44
  s.add_development_dependency "curb"
43
- s.add_development_dependency "typhoeus"
45
+ s.add_development_dependency "typhoeus", '~> 0.6'
44
46
 
45
47
  # Prevent unit test failures on Ruby 1.8
46
48
  if defined?(RUBY_VERSION) && RUBY_VERSION < '1.9'
@@ -1,5 +1,5 @@
1
1
  module Elasticsearch
2
2
  module Transport
3
- VERSION = "0.4.1"
3
+ VERSION = "0.4.2"
4
4
  end
5
5
  end
@@ -2,12 +2,13 @@ require 'test_helper'
2
2
 
3
3
  class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::IntegrationTestCase
4
4
  startup do
5
- Elasticsearch::TestCluster.start if ENV['SERVER'] and not Elasticsearch::TestCluster.running?
5
+ Elasticsearch::Extensions::Test::Cluster.start if ENV['SERVER'] and not Elasticsearch::Extensions::Test::Cluster.running?
6
6
  end
7
7
 
8
8
  context "Elasticsearch client" do
9
9
  setup do
10
- system "curl -X DELETE http://localhost:9250/_all > /dev/null 2>&1"
10
+ @port = (ENV['TEST_CLUSTER_PORT'] || 9250).to_i
11
+ system "curl -X DELETE http://localhost:#{@port}/_all > /dev/null 2>&1"
11
12
 
12
13
  @logger = Logger.new(STDERR)
13
14
  @logger.formatter = proc do |severity, datetime, progname, msg|
@@ -20,7 +21,7 @@ class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::Int
20
21
  ANSI.ansi(severity[0] + ' ', color, :faint) + ANSI.ansi(msg, :white, :faint) + "\n"
21
22
  end
22
23
 
23
- @client = Elasticsearch::Client.new host: 'localhost:9250'
24
+ @client = Elasticsearch::Client.new host: "localhost:#{@port}"
24
25
  end
25
26
 
26
27
  should "connect to the cluster" do
@@ -32,6 +33,8 @@ class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::Int
32
33
 
33
34
  should "handle paths and URL parameters" do
34
35
  @client.perform_request 'PUT', 'myindex/mydoc/1', {routing: 'XYZ'}, {foo: 'bar'}
36
+ @client.perform_request 'GET', '_cluster/health?wait_for_status=green', {}
37
+
35
38
  response = @client.perform_request 'GET', 'myindex/mydoc/1?routing=XYZ'
36
39
  assert_equal true, response.body['exists']
37
40
  assert_equal 'bar', response.body['_source']['foo']
@@ -45,7 +48,7 @@ class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::Int
45
48
  context "with round robin selector" do
46
49
  setup do
47
50
  @client = Elasticsearch::Client.new \
48
- hosts: %w| localhost:9250 localhost:9251 |,
51
+ hosts: ["localhost:#{@port}", "localhost:#{@port+1}" ],
49
52
  logger: @logger
50
53
  end
51
54
 
@@ -66,8 +69,9 @@ class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::Int
66
69
 
67
70
  context "with a sick node and retry on failure" do
68
71
  setup do
72
+ @port = (ENV['TEST_CLUSTER_PORT'] || 9250).to_i
69
73
  @client = Elasticsearch::Client.new \
70
- hosts: %w| localhost:9250 foobar1 |,
74
+ hosts: ["localhost:#{@port}", "foobar1"],
71
75
  logger: @logger,
72
76
  retry_on_failure: true
73
77
  end
@@ -80,7 +84,7 @@ class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::Int
80
84
 
81
85
  should "raise exception when it cannot get any healthy server" do
82
86
  @client = Elasticsearch::Client.new \
83
- hosts: %w| localhost:9250 foobar1 foobar2 foobar3 |,
87
+ hosts: ["localhost:#{@port}", "foobar1", "foobar2", "foobar3"],
84
88
  logger: @logger,
85
89
  retry_on_failure: 1
86
90
 
@@ -99,7 +103,7 @@ class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::Int
99
103
  context "with a sick node and reloading on failure" do
100
104
  setup do
101
105
  @client = Elasticsearch::Client.new \
102
- hosts: %w| localhost:9250 foobar1 foobar2 |,
106
+ hosts: ["localhost:#{@port}", "foobar1", "foobar2"],
103
107
  logger: @logger,
104
108
  reload_on_failure: true
105
109
  end
@@ -2,16 +2,20 @@ require 'test_helper'
2
2
 
3
3
  class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::IntegrationTestCase
4
4
  startup do
5
- Elasticsearch::TestCluster.start if ENV['SERVER'] and not Elasticsearch::TestCluster.running?
5
+ Elasticsearch::Extensions::Test::Cluster.start if ENV['SERVER'] and not Elasticsearch::Extensions::Test::Cluster.running?
6
6
  end
7
7
 
8
8
  context "Transport" do
9
+ setup do
10
+ @port = (ENV['TEST_CLUSTER_PORT'] || 9250).to_i
11
+ end
12
+
9
13
  should "allow to customize the Faraday adapter" do
10
14
  require 'typhoeus'
11
15
  require 'typhoeus/adapters/faraday'
12
16
 
13
17
  transport = Elasticsearch::Transport::Transport::HTTP::Faraday.new \
14
- :hosts => [ { :host => 'localhost', :port => '9250' } ] do |f|
18
+ :hosts => [ { :host => 'localhost', :port => @port } ] do |f|
15
19
  f.response :logger
16
20
  f.adapter :typhoeus
17
21
  end
@@ -25,7 +29,7 @@ class Elasticsearch::Transport::ClientIntegrationTest < Elasticsearch::Test::Int
25
29
  require 'elasticsearch/transport/transport/http/curb'
26
30
 
27
31
  transport = Elasticsearch::Transport::Transport::HTTP::Curb.new \
28
- :hosts => [ { :host => 'localhost', :port => '9250' } ] do |curl|
32
+ :hosts => [ { :host => 'localhost', :port => @port } ] do |curl|
29
33
  curl.verbose = true
30
34
  end
31
35
 
@@ -2,26 +2,27 @@ require 'test_helper'
2
2
 
3
3
  class Elasticsearch::Transport::ClientProfilingTest < Elasticsearch::Test::ProfilingTest
4
4
  startup do
5
- Elasticsearch::TestCluster.start if ENV['SERVER'] and not Elasticsearch::TestCluster.running?
5
+ Elasticsearch::Extensions::Test::Cluster.start if ENV['SERVER'] and not Elasticsearch::Extensions::Test::Cluster.running?
6
6
  end
7
7
 
8
8
  context "Elasticsearch client benchmark" do
9
9
  setup do
10
- client = Elasticsearch::Client.new host: 'localhost:9250'
10
+ @port = (ENV['TEST_CLUSTER_PORT'] || 9250).to_i
11
+ client = Elasticsearch::Client.new host: "localhost:#{@port}"
11
12
  client.perform_request 'DELETE', '/ruby_test_benchmark/' rescue nil
12
13
  client.perform_request 'POST', '/ruby_test_benchmark/', {index: {number_of_shards: 1, number_of_replicas: 0}}
13
14
  100.times do client.perform_request 'POST', '/ruby_test_benchmark_search/test/', {}, {foo: 'bar'}; end
14
15
  client.perform_request 'POST', '/ruby_test_benchmark_search/_refresh'
15
16
  end
16
17
  teardown do
17
- client = Elasticsearch::Client.new host: 'localhost:9250'
18
+ client = Elasticsearch::Client.new host: "localhost:#{@port}"
18
19
  client.perform_request 'DELETE', '/ruby_test_benchmark/'
19
20
  client.perform_request 'DELETE', '/ruby_test_benchmark_search/'
20
21
  end
21
22
 
22
23
  context "with a single-node cluster" do
23
24
  setup do
24
- @client = Elasticsearch::Client.new hosts: 'localhost:9250'
25
+ @client = Elasticsearch::Client.new hosts: "localhost:#{@port}"
25
26
  end
26
27
 
27
28
  measure "get the cluster info", count: 1_000 do
@@ -39,7 +40,7 @@ class Elasticsearch::Transport::ClientProfilingTest < Elasticsearch::Test::Profi
39
40
 
40
41
  context "with a two-node cluster" do
41
42
  setup do
42
- @client = Elasticsearch::Client.new hosts: ['localhost:9250', 'localhost:9251']
43
+ @client = Elasticsearch::Client.new hosts: ["localhost:#{@port}", "localhost:#{@port+1}"]
43
44
  end
44
45
 
45
46
  measure "get the cluster info", count: 1_000 do
@@ -59,7 +60,7 @@ class Elasticsearch::Transport::ClientProfilingTest < Elasticsearch::Test::Profi
59
60
  setup do
60
61
  require 'curb'
61
62
  require 'elasticsearch/transport/transport/http/curb'
62
- @client = Elasticsearch::Client.new host: 'localhost:9250',
63
+ @client = Elasticsearch::Client.new host: "localhost:#{@port}",
63
64
  transport_class: Elasticsearch::Transport::Transport::HTTP::Curb
64
65
  end
65
66
 
@@ -82,7 +83,7 @@ class Elasticsearch::Transport::ClientProfilingTest < Elasticsearch::Test::Profi
82
83
  require 'typhoeus/adapters/faraday'
83
84
 
84
85
  transport = Elasticsearch::Transport::Transport::HTTP::Faraday.new \
85
- :hosts => [ { :host => 'localhost', :port => '9250' } ] do |f|
86
+ :hosts => [ { :host => 'localhost', :port => @port } ] do |f|
86
87
  f.adapter :typhoeus
87
88
  end
88
89
 
@@ -19,15 +19,16 @@ require 'mocha/setup'
19
19
  require 'ansi/code'
20
20
  require 'turn' unless ENV["TM_FILEPATH"] || ENV["NOTURN"] || RUBY_1_8
21
21
 
22
- require File.expand_path('../test_extensions', __FILE__)
23
-
24
22
  require 'require-prof' if ENV["REQUIRE_PROF"]
25
23
  require 'elasticsearch-transport'
26
- require 'elasticsearch/transport/extensions/test_cluster'
27
24
  require 'logger'
28
25
 
29
26
  RequireProf.print_timing_infos if ENV["REQUIRE_PROF"]
30
27
 
28
+ require 'elasticsearch/extensions/test/cluster'
29
+ require 'elasticsearch/extensions/test/startup_shutdown'
30
+ require 'elasticsearch/extensions/test/profiling'
31
+
31
32
  class Test::Unit::TestCase
32
33
  def setup
33
34
  end
@@ -39,19 +40,19 @@ end
39
40
  module Elasticsearch
40
41
  module Test
41
42
  class IntegrationTestCase < ::Test::Unit::TestCase
42
- extend IntegrationTestStartupShutdown
43
+ extend Elasticsearch::Extensions::Test::StartupShutdown
43
44
 
44
- shutdown { Elasticsearch::TestCluster.stop if ENV['SERVER'] && started? }
45
+ shutdown { Elasticsearch::Extensions::Test::Cluster.stop if ENV['SERVER'] && started? }
45
46
  context "IntegrationTest" do; should "noop on Ruby 1.8" do; end; end if RUBY_1_8
46
47
  end
47
48
  end
48
49
 
49
50
  module Test
50
51
  class ProfilingTest < ::Test::Unit::TestCase
51
- extend IntegrationTestStartupShutdown
52
- extend ProfilingTestSupport
52
+ extend Elasticsearch::Extensions::Test::StartupShutdown
53
+ extend Elasticsearch::Extensions::Test::Profiling
53
54
 
54
- shutdown { Elasticsearch::TestCluster.stop if ENV['SERVER'] && started? }
55
+ shutdown { Elasticsearch::Extensions::Test::Cluster.stop if ENV['SERVER'] && started? }
55
56
  context "IntegrationTest" do; should "noop on Ruby 1.8" do; end; end if RUBY_1_8
56
57
  end
57
58
  end
@@ -153,7 +153,10 @@ class Elasticsearch::Transport::ClientTest < Test::Unit::TestCase
153
153
 
154
154
  should "randomize hosts" do
155
155
  hosts = [ {:host => 'host1'}, {:host => 'host2'}, {:host => 'host3'}, {:host => 'host4'}, {:host => 'host5'}]
156
- assert_not_equal hosts, @client.__extract_hosts(hosts, :randomize_hosts => true)
156
+
157
+ Array.any_instance.expects(:shuffle!).twice
158
+
159
+ @client.__extract_hosts(hosts, :randomize_hosts => true)
157
160
  assert_same_elements hosts, @client.__extract_hosts(hosts, :randomize_hosts => true)
158
161
  end
159
162
  end
@@ -135,10 +135,9 @@ class Elasticsearch::Transport::Transport::SnifferTest < Test::Unit::TestCase
135
135
  }
136
136
  JSON
137
137
 
138
- hosts = @sniffer.hosts
138
+ Array.any_instance.expects(:shuffle!)
139
139
 
140
- assert_not_equal ['Node 1', 'Node 2', 'Node 3', 'Node 4', 'Node 5'], hosts.map { |h| h['name'] }
141
- assert_same_elements ['Node 1', 'Node 2', 'Node 3', 'Node 4', 'Node 5'], hosts.map { |h| h['name'] }
140
+ hosts = @sniffer.hosts
142
141
  end
143
142
 
144
143
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: elasticsearch-transport
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.1
4
+ version: 0.4.2
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2013-10-23 00:00:00.000000000 Z
12
+ date: 2013-12-09 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: multi_json
@@ -75,6 +75,22 @@ dependencies:
75
75
  - - ! '>='
76
76
  - !ruby/object:Gem::Version
77
77
  version: '0'
78
+ - !ruby/object:Gem::Dependency
79
+ name: elasticsearch-extensions
80
+ requirement: !ruby/object:Gem::Requirement
81
+ none: false
82
+ requirements:
83
+ - - ! '>='
84
+ - !ruby/object:Gem::Version
85
+ version: '0'
86
+ type: :development
87
+ prerelease: false
88
+ version_requirements: !ruby/object:Gem::Requirement
89
+ none: false
90
+ requirements:
91
+ - - ! '>='
92
+ - !ruby/object:Gem::Version
93
+ version: '0'
78
94
  - !ruby/object:Gem::Dependency
79
95
  name: ansi
80
96
  requirement: !ruby/object:Gem::Requirement
@@ -224,17 +240,17 @@ dependencies:
224
240
  requirement: !ruby/object:Gem::Requirement
225
241
  none: false
226
242
  requirements:
227
- - - ! '>='
243
+ - - ~>
228
244
  - !ruby/object:Gem::Version
229
- version: '0'
245
+ version: '0.6'
230
246
  type: :development
231
247
  prerelease: false
232
248
  version_requirements: !ruby/object:Gem::Requirement
233
249
  none: false
234
250
  requirements:
235
- - - ! '>='
251
+ - - ~>
236
252
  - !ruby/object:Gem::Version
237
- version: '0'
253
+ version: '0.6'
238
254
  - !ruby/object:Gem::Dependency
239
255
  name: simplecov
240
256
  requirement: !ruby/object:Gem::Requirement
@@ -320,7 +336,6 @@ files:
320
336
  - lib/elasticsearch-transport.rb
321
337
  - lib/elasticsearch/transport.rb
322
338
  - lib/elasticsearch/transport/client.rb
323
- - lib/elasticsearch/transport/extensions/test_cluster.rb
324
339
  - lib/elasticsearch/transport/transport/base.rb
325
340
  - lib/elasticsearch/transport/transport/connections/collection.rb
326
341
  - lib/elasticsearch/transport/transport/connections/connection.rb
@@ -335,7 +350,6 @@ files:
335
350
  - test/integration/client_test.rb
336
351
  - test/integration/transport_test.rb
337
352
  - test/profile/client_benchmark_test.rb
338
- - test/test_extensions.rb
339
353
  - test/test_helper.rb
340
354
  - test/unit/client_test.rb
341
355
  - test/unit/connection_collection_test.rb
@@ -376,7 +390,6 @@ test_files:
376
390
  - test/integration/client_test.rb
377
391
  - test/integration/transport_test.rb
378
392
  - test/profile/client_benchmark_test.rb
379
- - test/test_extensions.rb
380
393
  - test/test_helper.rb
381
394
  - test/unit/client_test.rb
382
395
  - test/unit/connection_collection_test.rb
@@ -1,158 +0,0 @@
1
- require 'ansi/code'
2
-
3
- module Elasticsearch
4
-
5
- # A convenience Ruby class for starting and stopping a separate testing cluster,
6
- # to not depend on -- and not mess up -- <localhost:9200>.
7
- #
8
- module TestCluster
9
- require 'timeout'
10
- require 'net/http'
11
- require 'uri'
12
-
13
- @@number_of_nodes = 2
14
- @@pids = []
15
-
16
- # Start a cluster
17
- #
18
- # Starts the desired number of nodes in test-suitable configuration (memory store, no persistence, etc).
19
- #
20
- # @option arguments [String] :command Elasticsearch command (default: `elasticsearch`).
21
- # @option arguments [Integer] :count Number of desired nodes (default: 2).
22
- # @option arguments [String] :cluster_name Cluster name (default: `elasticsearch-ruby-test`).
23
- # @option arguments [String] :port Starting port number; will be auto-incremented (default: 9250).
24
- #
25
- # You can also use environment variables to set these options.
26
- #
27
- def start(arguments={})
28
- arguments[:command] = ENV['TEST_CLUSTER_COMMAND'] || 'elasticsearch'
29
-
30
- @@number_of_nodes = arguments[:count] if arguments[:count]
31
-
32
- arguments[:port] = (ENV['TEST_CLUSTER_PORT'] || 9250).to_i
33
- arguments[:cluster_name] = ENV['TEST_CLUSTER_NAME'] || 'elasticsearch-ruby-test'
34
- arguments[:node_name] = 'node'
35
-
36
- if running? :on => arguments[:port], :as => arguments[:cluster_name]
37
- print ANSI.red("Elasticsearch cluster already running")
38
- __wait_for_green(arguments[:port])
39
- exit(0)
40
- end
41
-
42
- print ANSI.faint("Starting ") + ANSI.ansi(@@number_of_nodes.to_s, :bold, :faint) + ANSI.faint(" Elasticsearch nodes")
43
-
44
- @@number_of_nodes.times do |n|
45
- n += 1
46
- pidfile = File.expand_path("tmp/elasticsearch-#{n}.pid", Dir.pwd)
47
- pid = Process.spawn <<-COMMAND
48
- #{arguments[:command]} \
49
- -D es.foreground=yes \
50
- -D es.cluster.name=#{arguments[:cluster_name]} \
51
- -D es.node.name=#{arguments[:node_name]}-#{n} \
52
- -D es.http.port=#{arguments[:port].to_i + (n-1)} \
53
- -D es.gateway.type=none \
54
- -D es.index.store.type=memory \
55
- -D es.network.host=0.0.0.0 \
56
- -D es.discovery.zen.ping.multicast.enabled=true \
57
- -D es.pidfile=#{pidfile} \
58
- > /dev/null 2>&1
59
- COMMAND
60
- Process.detach pid
61
- end
62
-
63
- __wait_for_green(arguments[:port])
64
- end
65
-
66
- # Stop the cluster.
67
- #
68
- # Gets the PID numbers from pidfiles in `$CWD/tmp` and stops any matching nodes.
69
- #
70
- def stop
71
- pids = __get_pids
72
- pidfiles = __get_pidfiles
73
-
74
- unless pids.empty?
75
- print "Stopping Elasticsearch nodes... "
76
- pids.each_with_index do |pid, i|
77
- begin
78
- print ANSI.green("stopped PID #{pid}. ") if Process.kill 'KILL', pid
79
- rescue Exception => e
80
- print ANSI.red("[#{e.class}] PID #{pid} not found. ")
81
- end
82
- File.delete pidfiles[i] if pidfiles[i] && File.exists?(pidfiles[i])
83
- end
84
- puts
85
- end
86
- end
87
-
88
- # Returns true when a specific test node is running.
89
- #
90
- # @option arguments [Integer] :on The port on which the node is running.
91
- # @option arguments [String] :as The cluster name.
92
- #
93
- def running?(arguments={})
94
- port = arguments[:on] || 9250
95
- cluster_name = arguments[:as] || 'elasticsearch-ruby-test'
96
-
97
- if cluster_health = Timeout::timeout(0.25) { __get_cluster_health(port) } rescue nil
98
- return cluster_health['cluster_name'] == cluster_name && \
99
- cluster_health['number_of_nodes'] == @@number_of_nodes
100
- end
101
- return false
102
- end
103
-
104
- # Blocks the process and waits for the cluster to be in a "green" state.
105
- # Prints information about the cluster on STDOUT.
106
- #
107
- def __wait_for_green(port=9250)
108
- uri = URI("http://localhost:#{port}/_cluster/health")
109
-
110
- Timeout::timeout(30) do
111
- loop do
112
- response = Net::HTTP.get(uri) rescue nil
113
- if response
114
- pids = __get_pids
115
-
116
- json = MultiJson.load(response)
117
- if json['status'] == 'green' && json['number_of_nodes'].to_i == @@number_of_nodes
118
- puts '',
119
- ANSI.faint('-'*80),
120
- ANSI.faint(
121
- 'Cluster: '.ljust(20) + json['cluster_name'].to_s + "\n" +
122
- 'Status: '.ljust(20) + json['status'].to_s + "\n" +
123
- 'Number of nodes: '.ljust(20) + json['number_of_nodes'].to_s + "\n" +
124
- 'PIDs'.ljust(20) + pids.inspect
125
- ),
126
- ANSI.faint('-'*80)
127
- break
128
- end
129
- end
130
- print ANSI.faint('.')
131
- sleep 1
132
- end
133
- end
134
- end
135
-
136
- # Tries to load cluster health information
137
- #
138
- def __get_cluster_health(port=9250)
139
- uri = URI("http://localhost:#{port}/_cluster/health")
140
- if response = Net::HTTP.get(uri) rescue nil
141
- return MultiJson.load(response)
142
- end
143
- end
144
-
145
- # Returns a collection of PID numbers from pidfiles.
146
- def __get_pids
147
- __get_pidfiles.map { |pidfile| File.read(pidfile).to_i }.uniq
148
- end
149
-
150
- # Returns a collection of files with PID information.
151
- #
152
- def __get_pidfiles
153
- Dir[File.expand_path('tmp/elasticsearch-*.pid', Dir.pwd)]
154
- end
155
-
156
- extend self
157
- end
158
- end
@@ -1,139 +0,0 @@
1
- require 'benchmark'
2
- require 'ruby-prof'
3
- require 'ansi/code'
4
- require 'ansi/terminal'
5
-
6
- module Elasticsearch
7
- module Test
8
-
9
- # Startup/shutdown support for test suites
10
- #
11
- # Example:
12
- #
13
- # class MyTest < Test::Unit::TestCase
14
- # extend IntegrationTestStartupShutdown
15
- #
16
- # startup { puts "Suite starting up..." }
17
- # shutdown { puts "Suite shutting down..." }
18
- # end
19
- #
20
- # *** IMPORTANT NOTE: **********************************************************
21
- #
22
- # You have to register the handler for shutdown before requiring 'test/unit':
23
- #
24
- # # File: test_helper.rb
25
- # at_exit { MyTest.__run_at_exit_hooks }
26
- # require 'test/unit'
27
- #
28
- # The API follows Test::Unit 2.0
29
- # <https://github.com/test-unit/test-unit/blob/master/lib/test/unit/testcase.rb>
30
- #
31
- module IntegrationTestStartupShutdown
32
- @@started = false
33
- @@shutdown_blocks ||= []
34
-
35
- def startup &block
36
- return if started?
37
- @@started = true
38
- yield block if block_given?
39
- end
40
-
41
- def shutdown &block
42
- @@shutdown_blocks << block if block_given?
43
- end
44
-
45
- def started?
46
- !! @@started
47
- end
48
-
49
- def __run_at_exit_hooks
50
- return unless started?
51
- STDERR.puts ANSI.faint("Running at_exit hooks...")
52
- puts ANSI.faint('-'*80)
53
- @@shutdown_blocks.each { |b| b.call }
54
- puts ANSI.faint('-'*80)
55
- end
56
- end
57
-
58
- # Profiling support for tests with [ruby-prof](https://github.com/ruby-prof/ruby-prof)
59
- #
60
- # Example:
61
- #
62
- # measure "divide numbers", count: 10_000 do
63
- # assert_nothing_raised { 1/2 }
64
- # end
65
- #
66
- # Will print out something like this along your test output:
67
- #
68
- # ---------------------------------------------------------------------
69
- # Context: My benchmark should divide numbers (10000x)
70
- # mean: 0.01ms | avg: 0.01ms | max: 6.19ms
71
- # ---------------------------------------------------------------------
72
- # ...
73
- # Total: 0.313283
74
- #
75
- # %self total self wait child calls name
76
- # 25.38 0.313 0.079 0.000 0.234 1 <Object::MyTets>#__bind_1368638677_723101
77
- # 14.42 0.118 0.045 0.000 0.073 20000 <Class::Time>#now
78
- # 7.57 0.088 0.033 0.000 0.055 10000 Time#-
79
- # ...
80
- #
81
- # PASS (0:00:00.322) test: My benchmark should divide numbers (10000x).
82
- #
83
- #
84
- module ProfilingTestSupport
85
-
86
- # Profiles the passed block of code.
87
- #
88
- # measure "divide numbers", count: 10_000 do
89
- # assert_nothing_raised { 1/2 }
90
- # end
91
- #
92
- # @todo Try to make progress bar not interfere with tests
93
- #
94
- def measure(name, options={}, &block)
95
- # require 'pry'; binding.pry
96
- ___ = '-'*ANSI::Terminal.terminal_width
97
- test_name = self.name.split('::').last
98
- context_name = self.context(nil) {}.first.parent.name
99
- count = Integer(ENV['COUNT'] || options[:count] || 1_000)
100
- ticks = []
101
- # progress = ANSI::Progressbar.new("#{name} (#{count}x)", count)
102
-
103
- should "#{name} (#{count}x)" do
104
- RubyProf.start
105
-
106
- count.times do
107
- ticks << Benchmark.realtime { self.instance_eval(&block) }
108
- # RubyProf.pause
109
- # progress.inc
110
- # RubyProf.resume
111
- end
112
-
113
- result = RubyProf.stop
114
- # progress.finish
115
-
116
- total = result.threads.reduce(0) { |total,info| total += info.total_time; total }
117
- mean = (ticks.sort[(ticks.size/2).round-1])*1000
118
- avg = (ticks.inject {|sum,el| sum += el; sum}.to_f/ticks.size)*1000
119
- max = ticks.max*1000
120
-
121
-
122
- result.eliminate_methods!([/Integer#times|Benchmark.realtime|ANSI::Code#.*|ANSI::ProgressBar#.*/])
123
- printer = RubyProf::FlatPrinter.new(result)
124
- # printer = RubyProf::GraphPrinter.new(result)
125
-
126
- puts "\n",
127
- ___,
128
- 'Context: ' + ANSI.bold(context_name) + ' should ' + ANSI.bold(name) + " (#{count}x)",
129
- "mean: #{sprintf('%.2f', mean)}ms | " +
130
- "avg: #{sprintf('%.2f', avg)}ms | " +
131
- "max: #{sprintf('%.2f', max)}ms",
132
- ___
133
- printer.print(STDOUT, {}) unless ENV['QUIET'] || options[:quiet]
134
- end
135
- end
136
- end
137
-
138
- end
139
- end