logstash-output-elasticsearch 1.1.0-java → 2.0.0.beta4-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/CHANGELOG.md +10 -3
  4. data/README.md +4 -4
  5. data/lib/logstash/outputs/elasticsearch/http_client.rb +144 -0
  6. data/lib/logstash/outputs/elasticsearch.rb +93 -319
  7. data/logstash-output-elasticsearch.gemspec +1 -3
  8. data/spec/es_spec_helper.rb +38 -34
  9. data/spec/integration/outputs/create_spec.rb +56 -0
  10. data/spec/integration/outputs/index_spec.rb +5 -7
  11. data/spec/integration/outputs/retry_spec.rb +118 -126
  12. data/spec/integration/outputs/routing_spec.rb +5 -33
  13. data/spec/integration/outputs/secure_spec.rb +4 -9
  14. data/spec/integration/outputs/templates_spec.rb +85 -91
  15. data/spec/integration/outputs/update_spec.rb +41 -46
  16. data/spec/unit/outputs/elasticsearch/protocol_spec.rb +45 -36
  17. data/spec/unit/outputs/elasticsearch_proxy_spec.rb +3 -4
  18. data/spec/unit/outputs/elasticsearch_spec.rb +2 -151
  19. data/spec/unit/outputs/elasticsearch_ssl_spec.rb +38 -63
  20. metadata +67 -101
  21. data/lib/logstash/outputs/elasticsearch/protocol.rb +0 -333
  22. data/lib/logstash-output-elasticsearch_jars.rb +0 -5
  23. data/spec/integration/outputs/elasticsearch/node_spec.rb +0 -36
  24. data/spec/integration/outputs/transport_create_spec.rb +0 -94
  25. data/vendor/jar-dependencies/runtime-jars/antlr-runtime-3.5.jar +0 -0
  26. data/vendor/jar-dependencies/runtime-jars/asm-4.1.jar +0 -0
  27. data/vendor/jar-dependencies/runtime-jars/asm-commons-4.1.jar +0 -0
  28. data/vendor/jar-dependencies/runtime-jars/elasticsearch-1.7.0.jar +0 -0
  29. data/vendor/jar-dependencies/runtime-jars/lucene-analyzers-common-4.10.4.jar +0 -0
  30. data/vendor/jar-dependencies/runtime-jars/lucene-core-4.10.4.jar +0 -0
  31. data/vendor/jar-dependencies/runtime-jars/lucene-grouping-4.10.4.jar +0 -0
  32. data/vendor/jar-dependencies/runtime-jars/lucene-highlighter-4.10.4.jar +0 -0
  33. data/vendor/jar-dependencies/runtime-jars/lucene-join-4.10.4.jar +0 -0
  34. data/vendor/jar-dependencies/runtime-jars/lucene-memory-4.10.4.jar +0 -0
  35. data/vendor/jar-dependencies/runtime-jars/lucene-misc-4.10.4.jar +0 -0
  36. data/vendor/jar-dependencies/runtime-jars/lucene-queries-4.10.4.jar +0 -0
  37. data/vendor/jar-dependencies/runtime-jars/lucene-queryparser-4.10.4.jar +0 -0
  38. data/vendor/jar-dependencies/runtime-jars/lucene-sandbox-4.10.4.jar +0 -0
  39. data/vendor/jar-dependencies/runtime-jars/lucene-spatial-4.10.4.jar +0 -0
  40. data/vendor/jar-dependencies/runtime-jars/lucene-suggest-4.10.4.jar +0 -0
  41. data/vendor/jar-dependencies/runtime-jars/spatial4j-0.4.1.jar +0 -0
@@ -9,57 +9,61 @@ CONTAINER_NAME = "logstash-output-elasticsearch-#{rand(999).to_s}"
9
9
  CONTAINER_IMAGE = "elasticsearch"
10
10
  CONTAINER_TAG = "1.6"
11
11
 
12
- module ESHelper
12
+ DOCKER_INTEGRATION = ENV["DOCKER_INTEGRATION"]
13
13
 
14
+ module ESHelper
14
15
  def get_host
15
- Longshoreman.new.get_host_ip
16
+ DOCKER_INTEGRATION ? Longshoreman.new.get_host_ip : "127.0.0.1"
16
17
  end
17
18
 
18
- def get_port(protocol)
19
+ def get_port
20
+ return 9200 unless DOCKER_INTEGRATION
21
+
19
22
  container = Longshoreman::Container.new
20
23
  container.get(CONTAINER_NAME)
21
- case protocol
22
- when "http"
23
- container.rport(9200)
24
- when "transport", "node"
25
- container.rport(9300)
26
- end
24
+ container.rport(9200)
27
25
  end
28
26
 
29
27
  def get_client
30
- Elasticsearch::Client.new(:host => "#{get_host}:#{get_port('http')}")
28
+ Elasticsearch::Client.new(:host => "#{get_host}:#{get_port}")
31
29
  end
32
30
  end
33
31
 
32
+
34
33
  RSpec.configure do |config|
35
34
  config.include ESHelper
36
35
 
37
- # this :all hook gets run before every describe block that is tagged with :integration => true.
38
- config.before(:all, :integration => true) do
39
- # check if container exists already before creating new one.
40
- begin
41
- ls = Longshoreman::new
42
- ls.container.get(CONTAINER_NAME)
43
- rescue Docker::Error::NotFoundError
44
- Longshoreman.new("#{CONTAINER_IMAGE}:#{CONTAINER_TAG}", CONTAINER_NAME)
45
- # TODO(talevy): verify ES is running instead of static timeout
46
- sleep 10
36
+
37
+ if DOCKER_INTEGRATION
38
+ # this :all hook gets run before every describe block that is tagged with :integration => true.
39
+ config.before(:all, :integration => true) do
40
+
41
+
42
+ # check if container exists already before creating new one.
43
+ begin
44
+ ls = Longshoreman::new
45
+ ls.container.get(CONTAINER_NAME)
46
+ rescue Docker::Error::NotFoundError
47
+ Longshoreman.new("#{CONTAINER_IMAGE}:#{CONTAINER_TAG}", CONTAINER_NAME)
48
+ # TODO(talevy): verify ES is running instead of static timeout
49
+ sleep 10
50
+ end
47
51
  end
48
- end
49
52
 
50
- # we want to do a final cleanup after all :integration runs,
51
- # but we don't want to clean up before the last block.
52
- # This is a final blind check to see if the ES docker container is running and
53
- # needs to be cleaned up. If no container can be found and/or docker is not
54
- # running on the system, we do nothing.
55
- config.after(:suite) do
56
- # only cleanup docker container if system has docker and the container is running
57
- begin
58
- ls = Longshoreman::new
59
- ls.container.get(CONTAINER_NAME)
60
- ls.cleanup
61
- rescue Docker::Error::NotFoundError, Excon::Errors::SocketError
62
- # do nothing
53
+ # we want to do a final cleanup after all :integration runs,
54
+ # but we don't want to clean up before the last block.
55
+ # This is a final blind check to see if the ES docker container is running and
56
+ # needs to be cleaned up. If no container can be found and/or docker is not
57
+ # running on the system, we do nothing.
58
+ config.after(:suite) do
59
+ # only cleanup docker container if system has docker and the container is running
60
+ begin
61
+ ls = Longshoreman::new
62
+ ls.container.get(CONTAINER_NAME)
63
+ ls.cleanup
64
+ rescue Docker::Error::NotFoundError, Excon::Errors::SocketError
65
+ # do nothing
66
+ end
63
67
  end
64
68
  end
65
69
  end
@@ -0,0 +1,56 @@
1
+ require_relative "../../../spec/es_spec_helper"
2
+
3
+ describe "client create actions", :integration => true do
4
+ require "logstash/outputs/elasticsearch"
5
+ require "elasticsearch"
6
+
7
+ def get_es_output(action, id = nil)
8
+ settings = {
9
+ "manage_template" => true,
10
+ "index" => "logstash-create",
11
+ "template_overwrite" => true,
12
+ "hosts" => get_host(),
13
+ "port" => get_port(),
14
+ "action" => action
15
+ }
16
+ settings['document_id'] = id unless id.nil?
17
+ LogStash::Outputs::ElasticSearch.new(settings)
18
+ end
19
+
20
+ before :each do
21
+ @es = get_client
22
+ # Delete all templates first.
23
+ # Clean ES of data before we start.
24
+ @es.indices.delete_template(:name => "*")
25
+ # This can fail if there are no indexes, ignore failure.
26
+ @es.indices.delete(:index => "*") rescue nil
27
+ end
28
+
29
+ context "when action => create" do
30
+ it "should create new documents with or without id" do
31
+ subject = get_es_output("create", "id123")
32
+ subject.register
33
+ subject.receive(LogStash::Event.new("message" => "sample message here"))
34
+ subject.buffer_flush(:final => true)
35
+ @es.indices.refresh
36
+ # Wait or fail until everything's indexed.
37
+ Stud::try(3.times) do
38
+ r = @es.search
39
+ insist { r["hits"]["total"] } == 1
40
+ end
41
+ end
42
+
43
+ it "should create new documents without id" do
44
+ subject = get_es_output("create")
45
+ subject.register
46
+ subject.receive(LogStash::Event.new("message" => "sample message here"))
47
+ subject.buffer_flush(:final => true)
48
+ @es.indices.refresh
49
+ # Wait or fail until everything's indexed.
50
+ Stud::try(3.times) do
51
+ r = @es.search
52
+ insist { r["hits"]["total"] } == 1
53
+ end
54
+ end
55
+ end
56
+ end
@@ -13,7 +13,7 @@ shared_examples "an indexer" do
13
13
  pipeline = LogStash::Pipeline.new(config)
14
14
  pipeline.run
15
15
 
16
- index_url = "http://#{get_host}:#{get_port('http')}/#{index}"
16
+ index_url = "http://#{get_host}:#{get_port}/#{index}"
17
17
 
18
18
  ftw = FTW::Agent.new
19
19
  ftw.post!("#{index_url}/_refresh")
@@ -52,9 +52,8 @@ describe "an indexer with custom index_type", :integration => true do
52
52
  }
53
53
  output {
54
54
  elasticsearch {
55
- host => "#{get_host()}"
56
- port => "#{get_port('http')}"
57
- protocol => "http"
55
+ hosts => "#{get_host()}"
56
+ port => "#{get_port}"
58
57
  index => "#{index}"
59
58
  flush_size => #{flush_size}
60
59
  }
@@ -77,9 +76,8 @@ describe "an indexer with no type value set (default to logs)", :integration =>
77
76
  }
78
77
  output {
79
78
  elasticsearch {
80
- host => "#{get_host()}"
81
- port => "#{get_port('http')}"
82
- protocol => "http"
79
+ hosts => "#{get_host()}"
80
+ port => "#{get_port}"
83
81
  index => "#{index}"
84
82
  flush_size => #{flush_size}
85
83
  }
@@ -11,146 +11,138 @@ describe "failures in bulk class expected behavior", :integration => true do
11
11
  let(:max_retries) { 3 }
12
12
 
13
13
  def mock_actions_with_response(*resp)
14
- LogStash::Outputs::Elasticsearch::Protocols::HTTPClient
15
- .any_instance.stub(:bulk).and_return(*resp)
16
- LogStash::Outputs::Elasticsearch::Protocols::NodeClient
14
+ LogStash::Outputs::Elasticsearch::HttpClient
17
15
  .any_instance.stub(:bulk).and_return(*resp)
18
16
  end
19
17
 
20
- ["transport", "http"].each do |protocol|
21
- context "with protocol => #{protocol}" do
22
- subject! do
23
- settings = {
24
- "manage_template" => true,
25
- "index" => "logstash-2014.11.17",
26
- "template_overwrite" => true,
27
- "protocol" => protocol,
28
- "host" => get_host(),
29
- "port" => get_port(protocol),
30
- "retry_max_items" => 10,
31
- "retry_max_interval" => 1,
32
- "max_retries" => max_retries
33
- }
34
- next LogStash::Outputs::ElasticSearch.new(settings)
35
- end
18
+ subject! do
19
+ settings = {
20
+ "manage_template" => true,
21
+ "index" => "logstash-2014.11.17",
22
+ "template_overwrite" => true,
23
+ "hosts" => get_host(),
24
+ "port" => get_port(),
25
+ "retry_max_items" => 10,
26
+ "retry_max_interval" => 1,
27
+ "max_retries" => max_retries
28
+ }
29
+ next LogStash::Outputs::ElasticSearch.new(settings)
30
+ end
36
31
 
37
- before :each do
38
- # Delete all templates first.
39
- require "elasticsearch"
32
+ before :each do
33
+ # Delete all templates first.
34
+ require "elasticsearch"
40
35
 
41
- # Clean ES of data before we start.
42
- @es = get_client
43
- @es.indices.delete_template(:name => "*")
44
- @es.indices.delete(:index => "*")
45
- @es.indices.refresh
46
- end
36
+ # Clean ES of data before we start.
37
+ @es = get_client
38
+ @es.indices.delete_template(:name => "*")
39
+ @es.indices.delete(:index => "*")
40
+ @es.indices.refresh
41
+ end
47
42
 
48
- it "should return no errors if all bulk actions are successful" do
49
- mock_actions_with_response({"errors" => false})
50
- expect(subject).to receive(:submit).with([action1, action2]).once.and_call_original
51
- subject.register
52
- subject.receive(event1)
53
- subject.receive(event2)
54
- subject.buffer_flush(:final => true)
55
- sleep(2)
56
- end
43
+ it "should return no errors if all bulk actions are successful" do
44
+ mock_actions_with_response({"errors" => false})
45
+ expect(subject).to receive(:submit).with([action1, action2]).once.and_call_original
46
+ subject.register
47
+ subject.receive(event1)
48
+ subject.receive(event2)
49
+ subject.buffer_flush(:final => true)
50
+ sleep(2)
51
+ end
57
52
 
58
- it "should raise exception and be retried by stud::buffer" do
59
- call_count = 0
60
- expect(subject).to receive(:submit).with([action1]).exactly(3).times do
61
- if (call_count += 1) <= 2
62
- raise "error first two times"
63
- else
64
- {"errors" => false}
65
- end
66
- end
67
- subject.register
68
- subject.receive(event1)
69
- subject.teardown
53
+ it "should raise exception and be retried by stud::buffer" do
54
+ call_count = 0
55
+ expect(subject).to receive(:submit).with([action1]).exactly(3).times do
56
+ if (call_count += 1) <= 2
57
+ raise "error first two times"
58
+ else
59
+ {"errors" => false}
70
60
  end
61
+ end
62
+ subject.register
63
+ subject.receive(event1)
64
+ subject.teardown
65
+ end
71
66
 
72
- it "should retry actions with response status of 503" do
73
- mock_actions_with_response({"errors" => true, "statuses" => [200, 200, 503, 503]},
74
- {"errors" => true, "statuses" => [200, 503]},
75
- {"errors" => false})
76
- expect(subject).to receive(:submit).with([action1, action1, action1, action2]).ordered.once.and_call_original
77
- expect(subject).to receive(:submit).with([action1, action2]).ordered.once.and_call_original
78
- expect(subject).to receive(:submit).with([action2]).ordered.once.and_call_original
79
-
80
- subject.register
81
- subject.receive(event1)
82
- subject.receive(event1)
83
- subject.receive(event1)
84
- subject.receive(event2)
85
- subject.buffer_flush(:final => true)
86
- sleep(3)
87
- end
67
+ it "should retry actions with response status of 503" do
68
+ mock_actions_with_response({"errors" => true, "statuses" => [200, 200, 503, 503]},
69
+ {"errors" => true, "statuses" => [200, 503]},
70
+ {"errors" => false})
71
+ expect(subject).to receive(:submit).with([action1, action1, action1, action2]).ordered.once.and_call_original
72
+ expect(subject).to receive(:submit).with([action1, action2]).ordered.once.and_call_original
73
+ expect(subject).to receive(:submit).with([action2]).ordered.once.and_call_original
74
+
75
+ subject.register
76
+ subject.receive(event1)
77
+ subject.receive(event1)
78
+ subject.receive(event1)
79
+ subject.receive(event2)
80
+ subject.buffer_flush(:final => true)
81
+ sleep(3)
82
+ end
88
83
 
89
- it "should retry actions with response status of 429" do
90
- mock_actions_with_response({"errors" => true, "statuses" => [429]},
91
- {"errors" => false})
92
- expect(subject).to receive(:submit).with([action1]).twice.and_call_original
93
- subject.register
94
- subject.receive(event1)
95
- subject.buffer_flush(:final => true)
96
- sleep(3)
97
- end
84
+ it "should retry actions with response status of 429" do
85
+ mock_actions_with_response({"errors" => true, "statuses" => [429]},
86
+ {"errors" => false})
87
+ expect(subject).to receive(:submit).with([action1]).twice.and_call_original
88
+ subject.register
89
+ subject.receive(event1)
90
+ subject.buffer_flush(:final => true)
91
+ sleep(3)
92
+ end
98
93
 
99
- it "should retry an event until max_retries reached" do
100
- mock_actions_with_response({"errors" => true, "statuses" => [429]},
101
- {"errors" => true, "statuses" => [429]},
102
- {"errors" => true, "statuses" => [429]},
103
- {"errors" => true, "statuses" => [429]},
104
- {"errors" => true, "statuses" => [429]},
105
- {"errors" => true, "statuses" => [429]})
106
- expect(subject).to receive(:submit).with([action1]).exactly(max_retries).times.and_call_original
107
- subject.register
108
- subject.receive(event1)
109
- subject.buffer_flush(:final => true)
110
- sleep(3)
111
- end
94
+ it "should retry an event until max_retries reached" do
95
+ mock_actions_with_response({"errors" => true, "statuses" => [429]},
96
+ {"errors" => true, "statuses" => [429]},
97
+ {"errors" => true, "statuses" => [429]},
98
+ {"errors" => true, "statuses" => [429]},
99
+ {"errors" => true, "statuses" => [429]},
100
+ {"errors" => true, "statuses" => [429]})
101
+ expect(subject).to receive(:submit).with([action1]).exactly(max_retries).times.and_call_original
102
+ subject.register
103
+ subject.receive(event1)
104
+ subject.buffer_flush(:final => true)
105
+ sleep(3)
106
+ end
112
107
 
113
- it "non-retryable errors like mapping errors (400) should be dropped and not be retried (unfortunetly)" do
114
- subject.register
115
- subject.receive(invalid_event)
116
- expect(subject).not_to receive(:retry_push)
117
- subject.teardown
118
-
119
- @es.indices.refresh
120
- sleep(5)
121
- Stud::try(10.times) do
122
- r = @es.search
123
- insist { r["hits"]["total"] } == 0
124
- end
125
- end
108
+ it "non-retryable errors like mapping errors (400) should be dropped and not be retried (unfortunately)" do
109
+ subject.register
110
+ subject.receive(invalid_event)
111
+ expect(subject).not_to receive(:retry_push)
112
+ subject.teardown
113
+
114
+ @es.indices.refresh
115
+ sleep(5)
116
+ Stud::try(10.times) do
117
+ r = @es.search
118
+ insist { r["hits"]["total"] } == 0
119
+ end
120
+ end
126
121
 
127
- it "successful requests should not be appended to retry queue" do
128
- subject.register
129
- subject.receive(event1)
130
- expect(subject).not_to receive(:retry_push)
131
- subject.teardown
132
-
133
- @es.indices.refresh
134
- sleep(5)
135
- Stud::try(10.times) do
136
- r = @es.search
137
- insist { r["hits"]["total"] } == 1
138
- end
139
- end
122
+ it "successful requests should not be appended to retry queue" do
123
+ subject.register
124
+ subject.receive(event1)
125
+ expect(subject).not_to receive(:retry_push)
126
+ subject.teardown
127
+ @es.indices.refresh
128
+ sleep(5)
129
+ Stud::try(10.times) do
130
+ r = @es.search
131
+ insist { r["hits"]["total"] } == 1
132
+ end
133
+ end
140
134
 
141
- it "should only index proper events" do
142
- subject.register
143
- subject.receive(invalid_event)
144
- subject.receive(event1)
145
- subject.teardown
146
-
147
- @es.indices.refresh
148
- sleep(5)
149
- Stud::try(10.times) do
150
- r = @es.search
151
- insist { r["hits"]["total"] } == 1
152
- end
153
- end
135
+ it "should only index proper events" do
136
+ subject.register
137
+ subject.receive(invalid_event)
138
+ subject.receive(event1)
139
+ subject.teardown
140
+
141
+ @es.indices.refresh
142
+ sleep(5)
143
+ Stud::try(10.times) do
144
+ r = @es.search
145
+ insist { r["hits"]["total"] } == 1
154
146
  end
155
147
  end
156
148
  end
@@ -15,7 +15,7 @@ shared_examples "a routing indexer" do
15
15
  pipeline = LogStash::Pipeline.new(config)
16
16
  pipeline.run
17
17
 
18
- index_url = "http://#{get_host()}:#{get_port('http')}/#{index}"
18
+ index_url = "http://#{get_host()}:#{get_port()}/#{index}"
19
19
 
20
20
  ftw = FTW::Agent.new
21
21
  ftw.post!("#{index_url}/_refresh")
@@ -46,9 +46,8 @@ describe "(http protocol) index events with static routing", :integration => tru
46
46
  }
47
47
  output {
48
48
  elasticsearch {
49
- host => "#{get_host()}"
50
- port => "#{get_port('http')}"
51
- protocol => "http"
49
+ hosts => "#{get_host()}"
50
+ port => "#{get_port()}"
52
51
  index => "#{index}"
53
52
  flush_size => #{flush_size}
54
53
  routing => "#{routing}"
@@ -73,9 +72,8 @@ describe "(http_protocol) index events with fieldref in routing value", :integra
73
72
  }
74
73
  output {
75
74
  elasticsearch {
76
- host => "#{get_host()}"
77
- port => "#{get_port('http')}"
78
- protocol => "http"
75
+ hosts => "#{get_host()}"
76
+ port => "#{get_port()}"
79
77
  index => "#{index}"
80
78
  flush_size => #{flush_size}
81
79
  routing => "%{message}"
@@ -86,29 +84,3 @@ describe "(http_protocol) index events with fieldref in routing value", :integra
86
84
  end
87
85
  end
88
86
 
89
- describe "(transport protocol) index events with fieldref in routing value", :integration => true do
90
- it_behaves_like 'a routing indexer' do
91
- let(:routing) { "test" }
92
- let(:config) {
93
- <<-CONFIG
94
- input {
95
- generator {
96
- message => "#{routing}"
97
- count => #{event_count}
98
- type => "#{type}"
99
- }
100
- }
101
- output {
102
- elasticsearch {
103
- host => "#{get_host()}"
104
- port => "#{get_port('transport')}"
105
- protocol => "transport"
106
- index => "#{index}"
107
- flush_size => #{flush_size}
108
- routing => "%{message}"
109
- }
110
- }
111
- CONFIG
112
- }
113
- end
114
- end
@@ -4,10 +4,9 @@ describe "send messages to ElasticSearch using HTTPS", :elasticsearch_secure =>
4
4
  subject do
5
5
  require "logstash/outputs/elasticsearch"
6
6
  settings = {
7
- "protocol" => "http",
8
7
  "node_name" => "logstash",
9
8
  "cluster" => "elasticsearch",
10
- "host" => "node01",
9
+ "hosts" => "node01",
11
10
  "user" => "user",
12
11
  "password" => "changeme",
13
12
  "ssl" => true,
@@ -35,9 +34,8 @@ describe "connect using HTTP Authentication", :elasticsearch_secure => true do
35
34
  subject do
36
35
  require "logstash/outputs/elasticsearch"
37
36
  settings = {
38
- "protocol" => "http",
39
37
  "cluster" => "elasticsearch",
40
- "host" => "node01",
38
+ "hosts" => "node01",
41
39
  "user" => "user",
42
40
  "password" => "changeme",
43
41
  }
@@ -60,10 +58,9 @@ describe "send messages to ElasticSearch using HTTPS", :elasticsearch_secure =>
60
58
  subject do
61
59
  require "logstash/outputs/elasticsearch"
62
60
  settings = {
63
- "protocol" => "http",
64
61
  "node_name" => "logstash",
65
62
  "cluster" => "elasticsearch",
66
- "host" => "node01",
63
+ "hosts" => "node01",
67
64
  "user" => "user",
68
65
  "password" => "changeme",
69
66
  "ssl" => true,
@@ -91,9 +88,7 @@ describe "connect using HTTP Authentication", :elasticsearch_secure => true do
91
88
  subject do
92
89
  require "logstash/outputs/elasticsearch"
93
90
  settings = {
94
- "protocol" => "http",
95
- "cluster" => "elasticsearch",
96
- "host" => "node01",
91
+ "hosts" => "node01",
97
92
  "user" => "user",
98
93
  "password" => "changeme",
99
94
  }