logstash-output-elasticsearch-test 10.3.0-x86_64-linux
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +397 -0
- data/CONTRIBUTORS +33 -0
- data/Gemfile +15 -0
- data/LICENSE +13 -0
- data/NOTICE.TXT +5 -0
- data/README.md +106 -0
- data/docs/index.asciidoc +899 -0
- data/lib/logstash/outputs/elasticsearch/common.rb +441 -0
- data/lib/logstash/outputs/elasticsearch/common_configs.rb +167 -0
- data/lib/logstash/outputs/elasticsearch/default-ilm-policy.json +14 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template-es2x.json +95 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template-es5x.json +46 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template-es6x.json +45 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template-es7x.json +44 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template-es8x.json +44 -0
- data/lib/logstash/outputs/elasticsearch/http_client/manticore_adapter.rb +131 -0
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +495 -0
- data/lib/logstash/outputs/elasticsearch/http_client.rb +432 -0
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +159 -0
- data/lib/logstash/outputs/elasticsearch/ilm.rb +113 -0
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +61 -0
- data/lib/logstash/outputs/elasticsearch.rb +263 -0
- data/logstash-output-elasticsearch.gemspec +33 -0
- data/spec/es_spec_helper.rb +189 -0
- data/spec/fixtures/_nodes/2x_1x.json +27 -0
- data/spec/fixtures/_nodes/5x_6x.json +81 -0
- data/spec/fixtures/_nodes/7x.json +92 -0
- data/spec/fixtures/htpasswd +2 -0
- data/spec/fixtures/nginx_reverse_proxy.conf +22 -0
- data/spec/fixtures/scripts/groovy/scripted_update.groovy +2 -0
- data/spec/fixtures/scripts/groovy/scripted_update_nested.groovy +2 -0
- data/spec/fixtures/scripts/groovy/scripted_upsert.groovy +2 -0
- data/spec/fixtures/scripts/painless/scripted_update.painless +2 -0
- data/spec/fixtures/scripts/painless/scripted_update_nested.painless +1 -0
- data/spec/fixtures/scripts/painless/scripted_upsert.painless +1 -0
- data/spec/fixtures/template-with-policy-es6x.json +48 -0
- data/spec/fixtures/template-with-policy-es7x.json +45 -0
- data/spec/fixtures/test_certs/ca/ca.crt +32 -0
- data/spec/fixtures/test_certs/ca/ca.key +51 -0
- data/spec/fixtures/test_certs/test.crt +36 -0
- data/spec/fixtures/test_certs/test.key +51 -0
- data/spec/integration/outputs/compressed_indexing_spec.rb +69 -0
- data/spec/integration/outputs/create_spec.rb +67 -0
- data/spec/integration/outputs/delete_spec.rb +65 -0
- data/spec/integration/outputs/groovy_update_spec.rb +150 -0
- data/spec/integration/outputs/ilm_spec.rb +531 -0
- data/spec/integration/outputs/index_spec.rb +178 -0
- data/spec/integration/outputs/index_version_spec.rb +102 -0
- data/spec/integration/outputs/ingest_pipeline_spec.rb +74 -0
- data/spec/integration/outputs/metrics_spec.rb +70 -0
- data/spec/integration/outputs/no_es_on_startup_spec.rb +58 -0
- data/spec/integration/outputs/painless_update_spec.rb +189 -0
- data/spec/integration/outputs/parent_spec.rb +102 -0
- data/spec/integration/outputs/retry_spec.rb +169 -0
- data/spec/integration/outputs/routing_spec.rb +61 -0
- data/spec/integration/outputs/sniffer_spec.rb +133 -0
- data/spec/integration/outputs/templates_5x_spec.rb +98 -0
- data/spec/integration/outputs/templates_spec.rb +98 -0
- data/spec/integration/outputs/update_spec.rb +116 -0
- data/spec/support/elasticsearch/api/actions/delete_ilm_policy.rb +19 -0
- data/spec/support/elasticsearch/api/actions/get_alias.rb +18 -0
- data/spec/support/elasticsearch/api/actions/get_ilm_policy.rb +18 -0
- data/spec/support/elasticsearch/api/actions/put_alias.rb +24 -0
- data/spec/support/elasticsearch/api/actions/put_ilm_policy.rb +25 -0
- data/spec/unit/http_client_builder_spec.rb +185 -0
- data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +149 -0
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +274 -0
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +250 -0
- data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +25 -0
- data/spec/unit/outputs/elasticsearch_proxy_spec.rb +72 -0
- data/spec/unit/outputs/elasticsearch_spec.rb +675 -0
- data/spec/unit/outputs/elasticsearch_ssl_spec.rb +82 -0
- data/spec/unit/outputs/error_whitelist_spec.rb +54 -0
- metadata +300 -0
@@ -0,0 +1,274 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require "logstash/outputs/elasticsearch/http_client"
|
3
|
+
require "json"
|
4
|
+
|
5
|
+
describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
6
|
+
let(:logger) { Cabin::Channel.get }
|
7
|
+
let(:adapter) { LogStash::Outputs::ElasticSearch::HttpClient::ManticoreAdapter.new(logger) }
|
8
|
+
let(:initial_urls) { [::LogStash::Util::SafeURI.new("http://localhost:9200")] }
|
9
|
+
let(:options) { {:resurrect_delay => 2, :url_normalizer => proc {|u| u}} } # Shorten the delay a bit to speed up tests
|
10
|
+
let(:es_node_versions) { [ "0.0.0" ] }
|
11
|
+
let(:oss) { true }
|
12
|
+
let(:valid_license) { true }
|
13
|
+
|
14
|
+
subject { described_class.new(logger, adapter, initial_urls, options) }
|
15
|
+
|
16
|
+
let(:manticore_double) { double("manticore a") }
|
17
|
+
before(:each) do
|
18
|
+
|
19
|
+
allow(::LogStash::Outputs::ElasticSearch).to receive(:oss?).and_return(oss)
|
20
|
+
response_double = double("manticore response").as_null_object
|
21
|
+
# Allow healtchecks
|
22
|
+
allow(manticore_double).to receive(:head).with(any_args).and_return(response_double)
|
23
|
+
allow(manticore_double).to receive(:get).with(any_args).and_return(response_double)
|
24
|
+
allow(manticore_double).to receive(:close)
|
25
|
+
|
26
|
+
allow(::Manticore::Client).to receive(:new).and_return(manticore_double)
|
27
|
+
|
28
|
+
allow(subject).to receive(:get_es_version).with(any_args).and_return(*es_node_versions)
|
29
|
+
allow(subject).to receive(:oss?).and_return(oss)
|
30
|
+
allow(subject).to receive(:valid_es_license?).and_return(valid_license)
|
31
|
+
end
|
32
|
+
|
33
|
+
after do
|
34
|
+
subject.close
|
35
|
+
end
|
36
|
+
|
37
|
+
describe "initialization" do
|
38
|
+
it "should be successful" do
|
39
|
+
expect { subject }.not_to raise_error
|
40
|
+
subject.start
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
describe "the resurrectionist" do
|
45
|
+
before(:each) { subject.start }
|
46
|
+
it "should start the resurrectionist when created" do
|
47
|
+
expect(subject.resurrectionist_alive?).to eql(true)
|
48
|
+
end
|
49
|
+
|
50
|
+
it "should attempt to resurrect connections after the ressurrect delay" do
|
51
|
+
expect(subject).to receive(:healthcheck!).once
|
52
|
+
sleep(subject.resurrect_delay + 1)
|
53
|
+
end
|
54
|
+
|
55
|
+
describe "healthcheck url handling" do
|
56
|
+
let(:initial_urls) { [::LogStash::Util::SafeURI.new("http://localhost:9200")] }
|
57
|
+
|
58
|
+
context "and not setting healthcheck_path" do
|
59
|
+
it "performs the healthcheck to the root" do
|
60
|
+
expect(adapter).to receive(:perform_request) do |url, method, req_path, _, _|
|
61
|
+
expect(method).to eq(:head)
|
62
|
+
expect(url.path).to be_empty
|
63
|
+
expect(req_path).to eq("/")
|
64
|
+
end
|
65
|
+
subject.healthcheck!
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
context "and setting healthcheck_path" do
|
70
|
+
let(:healthcheck_path) { "/my/health" }
|
71
|
+
let(:options) { super.merge(:healthcheck_path => healthcheck_path) }
|
72
|
+
it "performs the healthcheck to the healthcheck_path" do
|
73
|
+
expect(adapter).to receive(:perform_request) do |url, method, req_path, _, _|
|
74
|
+
expect(method).to eq(:head)
|
75
|
+
expect(url.path).to be_empty
|
76
|
+
expect(req_path).to eq(healthcheck_path)
|
77
|
+
end
|
78
|
+
subject.healthcheck!
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
describe 'resolving the address from Elasticsearch node info' do
|
85
|
+
let(:host) { "node.elastic.co"}
|
86
|
+
let(:ip_address) { "192.168.1.0"}
|
87
|
+
let(:port) { 9200 }
|
88
|
+
|
89
|
+
context 'in Elasticsearch 1.x format' do
|
90
|
+
context 'with host and ip address' do
|
91
|
+
let(:publish_address) { "inet[#{host}/#{ip_address}:#{port}]"}
|
92
|
+
it 'should correctly extract the host' do
|
93
|
+
expect(subject.address_str_to_uri(publish_address)).to eq (LogStash::Util::SafeURI.new("#{host}:#{port}"))
|
94
|
+
end
|
95
|
+
end
|
96
|
+
context 'with ip address' do
|
97
|
+
let(:publish_address) { "inet[/#{ip_address}:#{port}]"}
|
98
|
+
it 'should correctly extract the ip address' do
|
99
|
+
expect(subject.address_str_to_uri(publish_address)).to eq (LogStash::Util::SafeURI.new("#{ip_address}:#{port}"))
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
context 'in Elasticsearch 2.x-6.x format' do
|
105
|
+
let(:publish_address) { "#{ip_address}:#{port}"}
|
106
|
+
it 'should correctly extract the ip address' do
|
107
|
+
expect(subject.address_str_to_uri(publish_address)).to eq (LogStash::Util::SafeURI.new("//#{ip_address}:#{port}"))
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
context 'in Elasticsearch 7.x'
|
112
|
+
context 'with host and ip address' do
|
113
|
+
let(:publish_address) { "#{host}/#{ip_address}:#{port}"}
|
114
|
+
it 'should correctly extract the host' do
|
115
|
+
expect(subject.address_str_to_uri(publish_address)).to eq (LogStash::Util::SafeURI.new("#{host}:#{port}"))
|
116
|
+
end
|
117
|
+
end
|
118
|
+
context 'with ip address' do
|
119
|
+
let(:publish_address) { "#{ip_address}:#{port}"}
|
120
|
+
it 'should correctly extract the ip address' do
|
121
|
+
expect(subject.address_str_to_uri(publish_address)).to eq (LogStash::Util::SafeURI.new("#{ip_address}:#{port}"))
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
describe "the sniffer" do
|
127
|
+
before(:each) { subject.start }
|
128
|
+
it "should not start the sniffer by default" do
|
129
|
+
expect(subject.sniffer_alive?).to eql(nil)
|
130
|
+
end
|
131
|
+
|
132
|
+
context "when enabled" do
|
133
|
+
let(:options) { super.merge(:sniffing => true)}
|
134
|
+
|
135
|
+
it "should start the sniffer" do
|
136
|
+
expect(subject.sniffer_alive?).to eql(true)
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
describe "closing" do
|
142
|
+
before do
|
143
|
+
subject.start
|
144
|
+
# Simulate a single in use connection on the first check of this
|
145
|
+
allow(adapter).to receive(:close).and_call_original
|
146
|
+
allow(subject).to receive(:wait_for_in_use_connections).and_call_original
|
147
|
+
allow(subject).to receive(:in_use_connections).and_return([subject.empty_url_meta()],[])
|
148
|
+
allow(subject).to receive(:start)
|
149
|
+
subject.close
|
150
|
+
end
|
151
|
+
|
152
|
+
it "should close the adapter" do
|
153
|
+
expect(adapter).to have_received(:close)
|
154
|
+
end
|
155
|
+
|
156
|
+
it "should stop the resurrectionist" do
|
157
|
+
expect(subject.resurrectionist_alive?).to eql(false)
|
158
|
+
end
|
159
|
+
|
160
|
+
it "should stop the sniffer" do
|
161
|
+
# If no sniffer (the default) returns nil
|
162
|
+
expect(subject.sniffer_alive?).to be_falsey
|
163
|
+
end
|
164
|
+
|
165
|
+
it "should wait for in use connections to terminate" do
|
166
|
+
expect(subject).to have_received(:wait_for_in_use_connections).once
|
167
|
+
expect(subject).to have_received(:in_use_connections).twice
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
describe "connection management" do
|
172
|
+
before(:each) { subject.start }
|
173
|
+
context "with only one URL in the list" do
|
174
|
+
it "should use the only URL in 'with_connection'" do
|
175
|
+
subject.with_connection do |c|
|
176
|
+
expect(c).to eq(initial_urls.first)
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
context "with multiple URLs in the list" do
|
182
|
+
before :each do
|
183
|
+
allow(adapter).to receive(:perform_request).with(anything, :head, subject.healthcheck_path, {}, nil)
|
184
|
+
end
|
185
|
+
let(:initial_urls) { [ ::LogStash::Util::SafeURI.new("http://localhost:9200"), ::LogStash::Util::SafeURI.new("http://localhost:9201"), ::LogStash::Util::SafeURI.new("http://localhost:9202") ] }
|
186
|
+
|
187
|
+
it "should minimize the number of connections to a single URL" do
|
188
|
+
connected_urls = []
|
189
|
+
|
190
|
+
# If we make 2x the number requests as we have URLs we should
|
191
|
+
# connect to each URL exactly 2 times
|
192
|
+
(initial_urls.size*2).times do
|
193
|
+
u, meta = subject.get_connection
|
194
|
+
connected_urls << u
|
195
|
+
end
|
196
|
+
|
197
|
+
connected_urls.each {|u| subject.return_connection(u) }
|
198
|
+
initial_urls.each do |url|
|
199
|
+
conn_count = connected_urls.select {|u| u == url}.size
|
200
|
+
expect(conn_count).to eql(2)
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
it "should correctly resurrect the dead" do
|
205
|
+
u,m = subject.get_connection
|
206
|
+
|
207
|
+
# The resurrectionist will call this to check on the backend
|
208
|
+
response = double("response")
|
209
|
+
expect(adapter).to receive(:perform_request).with(u, :head, subject.healthcheck_path, {}, nil).and_return(response)
|
210
|
+
|
211
|
+
subject.return_connection(u)
|
212
|
+
subject.mark_dead(u, Exception.new)
|
213
|
+
|
214
|
+
expect(subject.url_meta(u)[:state]).to eql(:dead)
|
215
|
+
sleep subject.resurrect_delay + 1
|
216
|
+
expect(subject.url_meta(u)[:state]).to eql(:alive)
|
217
|
+
end
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
describe "version tracking" do
|
222
|
+
let(:initial_urls) { [
|
223
|
+
::LogStash::Util::SafeURI.new("http://somehost:9200"),
|
224
|
+
::LogStash::Util::SafeURI.new("http://otherhost:9201")
|
225
|
+
] }
|
226
|
+
|
227
|
+
before(:each) do
|
228
|
+
allow(subject).to receive(:perform_request_to_url).and_return(nil)
|
229
|
+
subject.start
|
230
|
+
end
|
231
|
+
|
232
|
+
it "picks the largest major version" do
|
233
|
+
expect(subject.maximum_seen_major_version).to eq(0)
|
234
|
+
end
|
235
|
+
|
236
|
+
context "if there are nodes with multiple major versions" do
|
237
|
+
let(:es_node_versions) { [ "0.0.0", "6.0.0" ] }
|
238
|
+
it "picks the largest major version" do
|
239
|
+
expect(subject.maximum_seen_major_version).to eq(6)
|
240
|
+
end
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
describe "license checking" do
|
245
|
+
before(:each) do
|
246
|
+
allow(subject).to receive(:health_check_request)
|
247
|
+
end
|
248
|
+
context "when using default logstash distribution" do
|
249
|
+
let(:oss) { false }
|
250
|
+
context "if ES doesn't return a valid license" do
|
251
|
+
let(:valid_license) { false }
|
252
|
+
it "marks the url as active" do
|
253
|
+
subject.update_initial_urls
|
254
|
+
expect(subject.alive_urls_count).to eq(1)
|
255
|
+
end
|
256
|
+
it "logs a warning" do
|
257
|
+
expect(subject).to receive(:log_license_deprecation_warn).once
|
258
|
+
subject.update_initial_urls
|
259
|
+
end
|
260
|
+
end
|
261
|
+
context "if ES returns a valid license" do
|
262
|
+
let(:valid_license) { true }
|
263
|
+
it "marks the url as active" do
|
264
|
+
subject.update_initial_urls
|
265
|
+
expect(subject.alive_urls_count).to eq(1)
|
266
|
+
end
|
267
|
+
it "does not log a warning" do
|
268
|
+
expect(subject).to_not receive(:log_license_deprecation_warn)
|
269
|
+
subject.update_initial_urls
|
270
|
+
end
|
271
|
+
end
|
272
|
+
end
|
273
|
+
end
|
274
|
+
end
|
@@ -0,0 +1,250 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require "logstash/outputs/elasticsearch/http_client"
|
3
|
+
require "java"
|
4
|
+
|
5
|
+
describe LogStash::Outputs::ElasticSearch::HttpClient do
|
6
|
+
let(:ssl) { nil }
|
7
|
+
let(:base_options) do
|
8
|
+
opts = {
|
9
|
+
:hosts => [::LogStash::Util::SafeURI.new("127.0.0.1")],
|
10
|
+
:logger => Cabin::Channel.get,
|
11
|
+
:metric => ::LogStash::Instrument::NullMetric.new(:dummy).namespace(:alsodummy)
|
12
|
+
}
|
13
|
+
|
14
|
+
if !ssl.nil? # Shortcut to set this
|
15
|
+
opts[:client_settings] = {:ssl => {:enabled => ssl}}
|
16
|
+
end
|
17
|
+
|
18
|
+
opts
|
19
|
+
end
|
20
|
+
|
21
|
+
describe "Host/URL Parsing" do
|
22
|
+
subject { described_class.new(base_options) }
|
23
|
+
|
24
|
+
let(:true_hostname) { "my-dash.hostname" }
|
25
|
+
let(:ipv6_hostname) { "[::1]" }
|
26
|
+
let(:ipv4_hostname) { "127.0.0.1" }
|
27
|
+
let(:port) { 9202 }
|
28
|
+
let(:hostname_port) { "#{hostname}:#{port}" }
|
29
|
+
let(:hostname_port_uri) { ::LogStash::Util::SafeURI.new("//#{hostname_port}") }
|
30
|
+
let(:http_hostname_port) { ::LogStash::Util::SafeURI.new("http://#{hostname_port}") }
|
31
|
+
let(:https_hostname_port) { ::LogStash::Util::SafeURI.new("https://#{hostname_port}") }
|
32
|
+
let(:http_hostname_port_path) { ::LogStash::Util::SafeURI.new("http://#{hostname_port}/path") }
|
33
|
+
|
34
|
+
shared_examples("proper host handling") do
|
35
|
+
it "should properly transform a host:port string to a URL" do
|
36
|
+
expect(subject.host_to_url(hostname_port_uri).to_s).to eq(http_hostname_port.to_s + "/")
|
37
|
+
end
|
38
|
+
|
39
|
+
it "should not raise an error with a / for a path" do
|
40
|
+
expect(subject.host_to_url(::LogStash::Util::SafeURI.new("#{http_hostname_port}/"))).to eq(LogStash::Util::SafeURI.new("#{http_hostname_port}/"))
|
41
|
+
end
|
42
|
+
|
43
|
+
it "should parse full URLs correctly" do
|
44
|
+
expect(subject.host_to_url(http_hostname_port).to_s).to eq(http_hostname_port.to_s + "/")
|
45
|
+
end
|
46
|
+
|
47
|
+
describe "ssl" do
|
48
|
+
context "when SSL is true" do
|
49
|
+
let(:ssl) { true }
|
50
|
+
let(:base_options) { super.merge(:hosts => [http_hostname_port]) }
|
51
|
+
|
52
|
+
it "should refuse to handle an http url" do
|
53
|
+
expect {
|
54
|
+
subject.host_to_url(http_hostname_port)
|
55
|
+
}.to raise_error(LogStash::ConfigurationError)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
context "when SSL is false" do
|
60
|
+
let(:ssl) { false }
|
61
|
+
let(:base_options) { super.merge(:hosts => [https_hostname_port]) }
|
62
|
+
|
63
|
+
it "should refuse to handle an https url" do
|
64
|
+
expect {
|
65
|
+
subject.host_to_url(https_hostname_port)
|
66
|
+
}.to raise_error(LogStash::ConfigurationError)
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
describe "ssl is nil" do
|
71
|
+
let(:base_options) { super.merge(:hosts => [https_hostname_port]) }
|
72
|
+
it "should handle an ssl url correctly when SSL is nil" do
|
73
|
+
subject
|
74
|
+
expect(subject.host_to_url(https_hostname_port).to_s).to eq(https_hostname_port.to_s + "/")
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
describe "path" do
|
80
|
+
let(:url) { http_hostname_port_path }
|
81
|
+
let(:base_options) { super.merge(:hosts => [url]) }
|
82
|
+
|
83
|
+
it "should allow paths in a url" do
|
84
|
+
expect(subject.host_to_url(url)).to eq(url)
|
85
|
+
end
|
86
|
+
|
87
|
+
context "with the path option set" do
|
88
|
+
let(:base_options) { super.merge(:client_settings => {:path => "/otherpath"}) }
|
89
|
+
|
90
|
+
it "should not allow paths in two places" do
|
91
|
+
expect {
|
92
|
+
subject.host_to_url(url)
|
93
|
+
}.to raise_error(LogStash::ConfigurationError)
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
context "with a path missing a leading /" do
|
98
|
+
let(:url) { http_hostname_port }
|
99
|
+
let(:base_options) { super.merge(:client_settings => {:path => "otherpath"}) }
|
100
|
+
|
101
|
+
|
102
|
+
it "should automatically insert a / in front of path overlays" do
|
103
|
+
expected = url.clone
|
104
|
+
expected.path = url.path + "/otherpath"
|
105
|
+
expect(subject.host_to_url(url)).to eq(expected)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
describe "an regular hostname" do
|
112
|
+
let(:hostname) { true_hostname }
|
113
|
+
include_examples("proper host handling")
|
114
|
+
end
|
115
|
+
|
116
|
+
describe "an ipv4 host" do
|
117
|
+
let(:hostname) { ipv4_hostname }
|
118
|
+
include_examples("proper host handling")
|
119
|
+
end
|
120
|
+
|
121
|
+
describe "an ipv6 host" do
|
122
|
+
let(:hostname) { ipv6_hostname }
|
123
|
+
include_examples("proper host handling")
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
describe "get" do
|
128
|
+
subject { described_class.new(base_options) }
|
129
|
+
let(:body) { "foobar" }
|
130
|
+
let(:path) { "/hello-id" }
|
131
|
+
let(:get_response) {
|
132
|
+
double("response", :body => LogStash::Json::dump( { "body" => body }))
|
133
|
+
}
|
134
|
+
|
135
|
+
it "returns the hash response" do
|
136
|
+
expect(subject.pool).to receive(:get).with(path, nil).and_return(get_response)
|
137
|
+
expect(subject.get(path)["body"]).to eq(body)
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
describe "join_bulk_responses" do
|
142
|
+
subject { described_class.new(base_options) }
|
143
|
+
|
144
|
+
context "when items key is available" do
|
145
|
+
require "json"
|
146
|
+
let(:bulk_response) {
|
147
|
+
LogStash::Json.load ('[{
|
148
|
+
"items": [{
|
149
|
+
"delete": {
|
150
|
+
"_index": "website",
|
151
|
+
"_type": "blog",
|
152
|
+
"_id": "123",
|
153
|
+
"_version": 2,
|
154
|
+
"status": 200,
|
155
|
+
"found": true
|
156
|
+
}
|
157
|
+
}],
|
158
|
+
"errors": false
|
159
|
+
}]')
|
160
|
+
}
|
161
|
+
it "should be handled properly" do
|
162
|
+
s = subject.send(:join_bulk_responses, bulk_response)
|
163
|
+
expect(s["errors"]).to be false
|
164
|
+
expect(s["items"].size).to be 1
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
context "when items key is not available" do
|
169
|
+
require "json"
|
170
|
+
let(:bulk_response) {
|
171
|
+
JSON.parse ('[{
|
172
|
+
"took": 4,
|
173
|
+
"errors": false
|
174
|
+
}]')
|
175
|
+
}
|
176
|
+
it "should be handled properly" do
|
177
|
+
s = subject.send(:join_bulk_responses, bulk_response)
|
178
|
+
expect(s["errors"]).to be false
|
179
|
+
expect(s["items"].size).to be 0
|
180
|
+
end
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
describe "#bulk" do
|
185
|
+
subject { described_class.new(base_options) }
|
186
|
+
|
187
|
+
require "json"
|
188
|
+
let(:message) { "hey" }
|
189
|
+
let(:actions) { [
|
190
|
+
["index", {:_id=>nil, :_index=>"logstash"}, {"message"=> message}],
|
191
|
+
]}
|
192
|
+
|
193
|
+
context "if a message is over TARGET_BULK_BYTES" do
|
194
|
+
let(:target_bulk_bytes) { LogStash::Outputs::ElasticSearch::TARGET_BULK_BYTES }
|
195
|
+
let(:message) { "a" * (target_bulk_bytes + 1) }
|
196
|
+
|
197
|
+
it "should be handled properly" do
|
198
|
+
allow(subject).to receive(:join_bulk_responses)
|
199
|
+
expect(subject).to receive(:bulk_send).once do |data|
|
200
|
+
expect(data.size).to be > target_bulk_bytes
|
201
|
+
end
|
202
|
+
s = subject.send(:bulk, actions)
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
context "with two messages" do
|
207
|
+
let(:message1) { "hey" }
|
208
|
+
let(:message2) { "you" }
|
209
|
+
let(:actions) { [
|
210
|
+
["index", {:_id=>nil, :_index=>"logstash"}, {"message"=> message1}],
|
211
|
+
["index", {:_id=>nil, :_index=>"logstash"}, {"message"=> message2}],
|
212
|
+
]}
|
213
|
+
it "executes one bulk_send operation" do
|
214
|
+
allow(subject).to receive(:join_bulk_responses)
|
215
|
+
expect(subject).to receive(:bulk_send).once
|
216
|
+
s = subject.send(:bulk, actions)
|
217
|
+
end
|
218
|
+
|
219
|
+
context "if one exceeds TARGET_BULK_BYTES" do
|
220
|
+
let(:target_bulk_bytes) { LogStash::Outputs::ElasticSearch::TARGET_BULK_BYTES }
|
221
|
+
let(:message1) { "a" * (target_bulk_bytes + 1) }
|
222
|
+
it "executes two bulk_send operations" do
|
223
|
+
allow(subject).to receive(:join_bulk_responses)
|
224
|
+
expect(subject).to receive(:bulk_send).twice
|
225
|
+
s = subject.send(:bulk, actions)
|
226
|
+
end
|
227
|
+
end
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
describe "sniffing" do
|
232
|
+
let(:client) { LogStash::Outputs::ElasticSearch::HttpClient.new(base_options.merge(client_opts)) }
|
233
|
+
|
234
|
+
context "with sniffing enabled" do
|
235
|
+
let(:client_opts) { {:sniffing => true, :sniffing_delay => 1 } }
|
236
|
+
|
237
|
+
it "should start the sniffer" do
|
238
|
+
expect(client.pool.sniffing).to be_truthy
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
context "with sniffing disabled" do
|
243
|
+
let(:client_opts) { {:sniffing => false} }
|
244
|
+
|
245
|
+
it "should not start the sniffer" do
|
246
|
+
expect(client.pool.sniffing).to be_falsey
|
247
|
+
end
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
require "logstash/devutils/rspec/spec_helper"
|
2
|
+
require "logstash/outputs/elasticsearch/http_client"
|
3
|
+
require "java"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
describe LogStash::Outputs::ElasticSearch::TemplateManager do
|
7
|
+
|
8
|
+
describe ".default_template_path" do
|
9
|
+
context "elasticsearch 1.x" do
|
10
|
+
it "chooses the 2x template" do
|
11
|
+
expect(described_class.default_template_path(1)).to match(/elasticsearch-template-es2x.json/)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
context "elasticsearch 2.x" do
|
15
|
+
it "chooses the 2x template" do
|
16
|
+
expect(described_class.default_template_path(2)).to match(/elasticsearch-template-es2x.json/)
|
17
|
+
end
|
18
|
+
end
|
19
|
+
context "elasticsearch 5.x" do
|
20
|
+
it "chooses the 5x template" do
|
21
|
+
expect(described_class.default_template_path(5)).to match(/elasticsearch-template-es5x.json/)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
require_relative "../../../spec/es_spec_helper"
|
2
|
+
require 'stud/temporary'
|
3
|
+
require "logstash/outputs/elasticsearch"
|
4
|
+
require 'manticore/client'
|
5
|
+
|
6
|
+
describe "Proxy option" do
|
7
|
+
let(:settings) { { "hosts" => "node01" } }
|
8
|
+
subject {
|
9
|
+
LogStash::Outputs::ElasticSearch.new(settings)
|
10
|
+
}
|
11
|
+
|
12
|
+
before do
|
13
|
+
allow(::Manticore::Client).to receive(:new).with(any_args).and_call_original
|
14
|
+
end
|
15
|
+
|
16
|
+
describe "valid configs" do
|
17
|
+
before do
|
18
|
+
subject.register
|
19
|
+
end
|
20
|
+
|
21
|
+
after do
|
22
|
+
subject.close
|
23
|
+
end
|
24
|
+
|
25
|
+
context "when specified as a URI" do
|
26
|
+
shared_examples("hash conversion") do |hash|
|
27
|
+
let(:settings) { super.merge("proxy" => proxy)}
|
28
|
+
|
29
|
+
it "should set the proxy to the correct hash value" do
|
30
|
+
expect(::Manticore::Client).to have_received(:new) do |options|
|
31
|
+
expect(options[:proxy]).to eq(hash)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
describe "simple proxy" do
|
37
|
+
let(:proxy) { LogStash::Util::SafeURI.new("http://127.0.0.1:1234") }
|
38
|
+
|
39
|
+
include_examples("hash conversion",
|
40
|
+
{
|
41
|
+
:host => "127.0.0.1",
|
42
|
+
:scheme => "http",
|
43
|
+
:port => 1234
|
44
|
+
}
|
45
|
+
)
|
46
|
+
end
|
47
|
+
|
48
|
+
|
49
|
+
describe "a secure authed proxy" do
|
50
|
+
let(:proxy) { LogStash::Util::SafeURI.new("https://myuser:mypass@127.0.0.1:1234") }
|
51
|
+
|
52
|
+
include_examples("hash conversion",
|
53
|
+
{
|
54
|
+
:host => "127.0.0.1",
|
55
|
+
:scheme => "https",
|
56
|
+
:user => "myuser",
|
57
|
+
:password => "mypass",
|
58
|
+
:port => 1234
|
59
|
+
}
|
60
|
+
)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
context "when not specified" do
|
65
|
+
it "should not send the proxy option to manticore" do
|
66
|
+
expect(::Manticore::Client).to have_received(:new) do |options|
|
67
|
+
expect(options).not_to include(:proxy)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|