logstash-output-elasticsearch-test 11.16.0-x86_64-linux
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +649 -0
- data/CONTRIBUTORS +34 -0
- data/Gemfile +16 -0
- data/LICENSE +202 -0
- data/NOTICE.TXT +5 -0
- data/README.md +106 -0
- data/docs/index.asciidoc +1369 -0
- data/lib/logstash/outputs/elasticsearch/data_stream_support.rb +282 -0
- data/lib/logstash/outputs/elasticsearch/default-ilm-policy.json +14 -0
- data/lib/logstash/outputs/elasticsearch/http_client/manticore_adapter.rb +155 -0
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +534 -0
- data/lib/logstash/outputs/elasticsearch/http_client.rb +497 -0
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +201 -0
- data/lib/logstash/outputs/elasticsearch/ilm.rb +92 -0
- data/lib/logstash/outputs/elasticsearch/license_checker.rb +52 -0
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +131 -0
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-6x.json +45 -0
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-7x.json +44 -0
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-8x.json +50 -0
- data/lib/logstash/outputs/elasticsearch.rb +699 -0
- data/lib/logstash/plugin_mixins/elasticsearch/api_configs.rb +237 -0
- data/lib/logstash/plugin_mixins/elasticsearch/common.rb +409 -0
- data/lib/logstash/plugin_mixins/elasticsearch/noop_license_checker.rb +9 -0
- data/logstash-output-elasticsearch.gemspec +40 -0
- data/spec/es_spec_helper.rb +225 -0
- data/spec/fixtures/_nodes/6x.json +81 -0
- data/spec/fixtures/_nodes/7x.json +92 -0
- data/spec/fixtures/htpasswd +2 -0
- data/spec/fixtures/license_check/active.json +16 -0
- data/spec/fixtures/license_check/inactive.json +5 -0
- data/spec/fixtures/nginx_reverse_proxy.conf +22 -0
- data/spec/fixtures/scripts/painless/scripted_update.painless +2 -0
- data/spec/fixtures/scripts/painless/scripted_update_nested.painless +1 -0
- data/spec/fixtures/scripts/painless/scripted_upsert.painless +1 -0
- data/spec/fixtures/template-with-policy-es6x.json +48 -0
- data/spec/fixtures/template-with-policy-es7x.json +45 -0
- data/spec/fixtures/template-with-policy-es8x.json +50 -0
- data/spec/fixtures/test_certs/ca.crt +29 -0
- data/spec/fixtures/test_certs/ca.der.sha256 +1 -0
- data/spec/fixtures/test_certs/ca.key +51 -0
- data/spec/fixtures/test_certs/renew.sh +13 -0
- data/spec/fixtures/test_certs/test.crt +30 -0
- data/spec/fixtures/test_certs/test.der.sha256 +1 -0
- data/spec/fixtures/test_certs/test.key +51 -0
- data/spec/fixtures/test_certs/test.p12 +0 -0
- data/spec/fixtures/test_certs/test_invalid.crt +36 -0
- data/spec/fixtures/test_certs/test_invalid.key +51 -0
- data/spec/fixtures/test_certs/test_invalid.p12 +0 -0
- data/spec/fixtures/test_certs/test_self_signed.crt +32 -0
- data/spec/fixtures/test_certs/test_self_signed.key +54 -0
- data/spec/fixtures/test_certs/test_self_signed.p12 +0 -0
- data/spec/integration/outputs/compressed_indexing_spec.rb +70 -0
- data/spec/integration/outputs/create_spec.rb +67 -0
- data/spec/integration/outputs/data_stream_spec.rb +68 -0
- data/spec/integration/outputs/delete_spec.rb +63 -0
- data/spec/integration/outputs/ilm_spec.rb +534 -0
- data/spec/integration/outputs/index_spec.rb +421 -0
- data/spec/integration/outputs/index_version_spec.rb +98 -0
- data/spec/integration/outputs/ingest_pipeline_spec.rb +75 -0
- data/spec/integration/outputs/metrics_spec.rb +66 -0
- data/spec/integration/outputs/no_es_on_startup_spec.rb +78 -0
- data/spec/integration/outputs/painless_update_spec.rb +99 -0
- data/spec/integration/outputs/parent_spec.rb +94 -0
- data/spec/integration/outputs/retry_spec.rb +182 -0
- data/spec/integration/outputs/routing_spec.rb +61 -0
- data/spec/integration/outputs/sniffer_spec.rb +94 -0
- data/spec/integration/outputs/templates_spec.rb +133 -0
- data/spec/integration/outputs/unsupported_actions_spec.rb +75 -0
- data/spec/integration/outputs/update_spec.rb +114 -0
- data/spec/spec_helper.rb +10 -0
- data/spec/support/elasticsearch/api/actions/delete_ilm_policy.rb +19 -0
- data/spec/support/elasticsearch/api/actions/get_alias.rb +18 -0
- data/spec/support/elasticsearch/api/actions/get_ilm_policy.rb +18 -0
- data/spec/support/elasticsearch/api/actions/put_alias.rb +24 -0
- data/spec/support/elasticsearch/api/actions/put_ilm_policy.rb +25 -0
- data/spec/unit/http_client_builder_spec.rb +185 -0
- data/spec/unit/outputs/elasticsearch/data_stream_support_spec.rb +612 -0
- data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +151 -0
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +501 -0
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +339 -0
- data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +189 -0
- data/spec/unit/outputs/elasticsearch_proxy_spec.rb +103 -0
- data/spec/unit/outputs/elasticsearch_spec.rb +1573 -0
- data/spec/unit/outputs/elasticsearch_ssl_spec.rb +197 -0
- data/spec/unit/outputs/error_whitelist_spec.rb +56 -0
- data/spec/unit/outputs/license_check_spec.rb +57 -0
- metadata +423 -0
@@ -0,0 +1,78 @@
|
|
1
|
+
require "logstash/outputs/elasticsearch"
|
2
|
+
require_relative "../../../spec/es_spec_helper"
|
3
|
+
|
4
|
+
describe "elasticsearch is down on startup", :integration => true do
|
5
|
+
let(:event1) { LogStash::Event.new("somevalue" => 100, "@timestamp" => "2014-11-17T20:37:17.223Z", "@metadata" => {"retry_count" => 0}) }
|
6
|
+
let(:event2) { LogStash::Event.new("message" => "a") }
|
7
|
+
|
8
|
+
subject {
|
9
|
+
LogStash::Outputs::ElasticSearch.new({
|
10
|
+
"manage_template" => true,
|
11
|
+
"index" => "logstash-2014.11.17",
|
12
|
+
"template_overwrite" => true,
|
13
|
+
"hosts" => get_host_port(),
|
14
|
+
"retry_max_interval" => 64,
|
15
|
+
"retry_initial_interval" => 2,
|
16
|
+
'ecs_compatibility' => 'disabled'
|
17
|
+
})
|
18
|
+
}
|
19
|
+
|
20
|
+
before :each do
|
21
|
+
# Delete all templates first.
|
22
|
+
allow(Stud).to receive(:stoppable_sleep)
|
23
|
+
|
24
|
+
# Clean ES of data before we start.
|
25
|
+
@es = get_client
|
26
|
+
@es.indices.delete_template(:name => "*")
|
27
|
+
@es.indices.delete(:index => "*")
|
28
|
+
@es.indices.refresh
|
29
|
+
end
|
30
|
+
|
31
|
+
after :each do
|
32
|
+
subject.close
|
33
|
+
end
|
34
|
+
|
35
|
+
it 'should ingest events when Elasticsearch recovers before documents are sent' do
|
36
|
+
allow_any_instance_of(LogStash::Outputs::ElasticSearch::HttpClient::Pool).to receive(:get_es_version).and_raise(
|
37
|
+
::LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError.new StandardError.new("TEST: before docs are sent"), 'http://test.es/'
|
38
|
+
)
|
39
|
+
subject.register
|
40
|
+
allow_any_instance_of(LogStash::Outputs::ElasticSearch::HttpClient::Pool).to receive(:get_es_version).and_return(ESHelper.es_version)
|
41
|
+
subject.multi_receive([event1, event2])
|
42
|
+
@es.indices.refresh
|
43
|
+
r = @es.search(index: 'logstash-*')
|
44
|
+
expect(r).to have_hits(2)
|
45
|
+
end
|
46
|
+
|
47
|
+
it 'should ingest events when Elasticsearch recovers after documents are sent' do
|
48
|
+
allow_any_instance_of(LogStash::Outputs::ElasticSearch::HttpClient::Pool).to receive(:get_es_version).and_raise(
|
49
|
+
::LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError.new StandardError.new("TEST: after docs are sent"), 'http://test.es/'
|
50
|
+
)
|
51
|
+
subject.register
|
52
|
+
Thread.new do
|
53
|
+
sleep 4
|
54
|
+
allow_any_instance_of(LogStash::Outputs::ElasticSearch::HttpClient::Pool).to receive(:get_es_version).and_return(ESHelper.es_version)
|
55
|
+
end
|
56
|
+
subject.multi_receive([event1, event2])
|
57
|
+
@es.indices.refresh
|
58
|
+
r = @es.search(index: 'logstash-*')
|
59
|
+
expect(r).to have_hits(2)
|
60
|
+
end
|
61
|
+
|
62
|
+
it 'should get cluster_uuid when Elasticsearch recovers from license check failure' do
|
63
|
+
allow_any_instance_of(LogStash::Outputs::ElasticSearch::HttpClient::Pool).to receive(:get_license).and_raise(
|
64
|
+
::LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError.new StandardError.new("TEST: docs are sent"), 'http://test.es/_license'
|
65
|
+
)
|
66
|
+
subject.register
|
67
|
+
Thread.new do
|
68
|
+
sleep 4
|
69
|
+
allow_any_instance_of(LogStash::Outputs::ElasticSearch::HttpClient::Pool).to receive(:get_license).and_call_original
|
70
|
+
end
|
71
|
+
subject.multi_receive([event1, event2])
|
72
|
+
@es.indices.refresh
|
73
|
+
r = @es.search(index: 'logstash-*')
|
74
|
+
expect(r).to have_hits(2)
|
75
|
+
expect(subject.plugin_metadata.get(:cluster_uuid)).not_to be_empty
|
76
|
+
expect(subject.plugin_metadata.get(:cluster_uuid)).not_to eq("_na_")
|
77
|
+
end if ESHelper.es_version_satisfies?(">=7")
|
78
|
+
end
|
@@ -0,0 +1,99 @@
|
|
1
|
+
require_relative "../../../spec/es_spec_helper"
|
2
|
+
|
3
|
+
describe "Update actions using painless scripts", :integration => true, :update_tests => 'painless' do
|
4
|
+
require "logstash/outputs/elasticsearch"
|
5
|
+
|
6
|
+
def get_es_output( options={} )
|
7
|
+
settings = {
|
8
|
+
"manage_template" => true,
|
9
|
+
"index" => "logstash-update",
|
10
|
+
"template_overwrite" => true,
|
11
|
+
"hosts" => get_host_port(),
|
12
|
+
"action" => "update"
|
13
|
+
}
|
14
|
+
LogStash::Outputs::ElasticSearch.new(settings.merge!(options))
|
15
|
+
end
|
16
|
+
|
17
|
+
before :each do
|
18
|
+
@es = get_client
|
19
|
+
# Delete all templates first.
|
20
|
+
# Clean ES of data before we start.
|
21
|
+
@es.indices.delete_template(:name => "*")
|
22
|
+
# This can fail if there are no indexes, ignore failure.
|
23
|
+
@es.indices.delete(:index => "*") rescue nil
|
24
|
+
@es.index(
|
25
|
+
:index => 'logstash-update',
|
26
|
+
:type => doc_type,
|
27
|
+
:id => "123",
|
28
|
+
:body => { :message => 'Test', :counter => 1 }
|
29
|
+
)
|
30
|
+
@es.indices.refresh
|
31
|
+
end
|
32
|
+
|
33
|
+
context "scripted updates" do
|
34
|
+
context 'with an indexed script' do
|
35
|
+
it "should increment a counter with event/doc 'count' variable with indexed script" do
|
36
|
+
@es.perform_request(:put, "_scripts/indexed_update", {}, {"script" => {"source" => "ctx._source.counter += params.event.count", "lang" => "painless"}})
|
37
|
+
|
38
|
+
plugin_parameters = {
|
39
|
+
'document_id' => "123",
|
40
|
+
'script' => 'indexed_update',
|
41
|
+
'script_type' => 'indexed'
|
42
|
+
}
|
43
|
+
|
44
|
+
plugin_parameters.merge!('script_lang' => '')
|
45
|
+
|
46
|
+
subject = get_es_output(plugin_parameters)
|
47
|
+
subject.register
|
48
|
+
subject.multi_receive([LogStash::Event.new("count" => 4 )])
|
49
|
+
r = @es.get(:index => 'logstash-update', :type => doc_type, :id => "123", :refresh => true)
|
50
|
+
expect(r["_source"]["counter"]).to eq(5)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
context "when update with upsert" do
|
56
|
+
it "should create new documents with provided upsert" do
|
57
|
+
subject = get_es_output({ 'document_id' => "456", 'upsert' => '{"message": "upsert message"}' })
|
58
|
+
subject.register
|
59
|
+
subject.multi_receive([LogStash::Event.new("message" => "sample message here")])
|
60
|
+
r = @es.get(:index => 'logstash-update', :type => doc_type, :id => "456", :refresh => true)
|
61
|
+
expect(r["_source"]["message"]).to eq('upsert message')
|
62
|
+
end
|
63
|
+
|
64
|
+
it "should create new documents with event/doc as upsert" do
|
65
|
+
subject = get_es_output({ 'document_id' => "456", 'doc_as_upsert' => true })
|
66
|
+
subject.register
|
67
|
+
subject.multi_receive([LogStash::Event.new("message" => "sample message here")])
|
68
|
+
r = @es.get(:index => 'logstash-update', :type => doc_type, :id => "456", :refresh => true)
|
69
|
+
expect(r["_source"]["message"]).to eq('sample message here')
|
70
|
+
end
|
71
|
+
|
72
|
+
it "should fail on documents with event/doc as upsert at external version" do
|
73
|
+
subject = get_es_output({ 'document_id' => "456", 'doc_as_upsert' => true, 'version' => 999, "version_type" => "external" })
|
74
|
+
expect { subject.register }.to raise_error(LogStash::ConfigurationError)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
context "updates with scripted upsert" do
|
79
|
+
context 'with an inline script' do
|
80
|
+
it "should create new documents with upsert content" do
|
81
|
+
subject = get_es_output({ 'document_id' => "456", 'script' => 'ctx._source.counter = params.event.counter', 'upsert' => '{"message": "upsert message"}', 'script_type' => 'inline' })
|
82
|
+
subject.register
|
83
|
+
|
84
|
+
subject.multi_receive([LogStash::Event.new("message" => "sample message here")])
|
85
|
+
r = @es.get(:index => 'logstash-update', :type => doc_type, :id => "456", :refresh => true)
|
86
|
+
expect(r["_source"]["message"]).to eq('upsert message')
|
87
|
+
end
|
88
|
+
|
89
|
+
it "should create new documents with event/doc as script params" do
|
90
|
+
subject = get_es_output({ 'document_id' => "456", 'script' => 'ctx._source.counter = params.event.counter', 'scripted_upsert' => true, 'script_type' => 'inline' })
|
91
|
+
subject.register
|
92
|
+
subject.multi_receive([LogStash::Event.new("counter" => 1)])
|
93
|
+
@es.indices.refresh
|
94
|
+
r = @es.get(:index => 'logstash-update', :type => doc_type, :id => "456", :refresh => true)
|
95
|
+
expect(r["_source"]["counter"]).to eq(1)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
@@ -0,0 +1,94 @@
|
|
1
|
+
require_relative "../../../spec/es_spec_helper"
|
2
|
+
require "logstash/outputs/elasticsearch"
|
3
|
+
|
4
|
+
describe "join type field", :integration => true do
|
5
|
+
|
6
|
+
shared_examples "a join field based parent indexer" do
|
7
|
+
let(:index) { 10.times.collect { rand(10).to_s }.join("") }
|
8
|
+
|
9
|
+
let(:type) { ESHelper.es_version_satisfies?("< 7") ? "doc" : "_doc" }
|
10
|
+
|
11
|
+
let(:event_count) { 10000 + rand(500) }
|
12
|
+
let(:parent) { "not_implemented" }
|
13
|
+
let(:config) { "not_implemented" }
|
14
|
+
let(:parent_id) { "test" }
|
15
|
+
let(:join_field) { "join_field" }
|
16
|
+
let(:parent_relation) { "parent_type" }
|
17
|
+
let(:child_relation) { "child_type" }
|
18
|
+
let(:default_headers) {
|
19
|
+
{"Content-Type" => "application/json"}
|
20
|
+
}
|
21
|
+
subject { LogStash::Outputs::ElasticSearch.new(config) }
|
22
|
+
|
23
|
+
before do
|
24
|
+
# Add mapping and a parent document
|
25
|
+
index_url = "http://#{get_host_port()}/#{index}"
|
26
|
+
|
27
|
+
properties = {
|
28
|
+
"properties" => {
|
29
|
+
join_field => {
|
30
|
+
"type" => "join",
|
31
|
+
"relations" => { parent_relation => child_relation }
|
32
|
+
}
|
33
|
+
}
|
34
|
+
}
|
35
|
+
|
36
|
+
mapping = ESHelper.es_version_satisfies?('<7') ? { "mappings" => { type => properties } }
|
37
|
+
: { "mappings" => properties}
|
38
|
+
|
39
|
+
Manticore.put("#{index_url}", {:body => mapping.to_json, :headers => default_headers}).call
|
40
|
+
pdoc = { "message" => "ohayo", join_field => parent_relation }
|
41
|
+
Manticore.put("#{index_url}/#{type}/#{parent_id}", {:body => pdoc.to_json, :headers => default_headers}).call
|
42
|
+
|
43
|
+
subject.register
|
44
|
+
subject.multi_receive(event_count.times.map { LogStash::Event.new("link_to" => parent_id, "message" => "Hello World!", join_field => child_relation) })
|
45
|
+
end
|
46
|
+
|
47
|
+
|
48
|
+
it "ships events" do
|
49
|
+
index_url = "http://#{get_host_port()}/#{index}"
|
50
|
+
|
51
|
+
Manticore.post("#{index_url}/_refresh").call
|
52
|
+
|
53
|
+
# Wait until all events are available.
|
54
|
+
Stud::try(10.times) do
|
55
|
+
query = { "query" => { "has_parent" => { "parent_type" => parent_relation, "query" => { "match_all" => { } } } } }
|
56
|
+
response = Manticore.post("#{index_url}/_count", {:body => query.to_json, :headers => default_headers})
|
57
|
+
data = response.body
|
58
|
+
result = LogStash::Json.load(data)
|
59
|
+
cur_count = result["count"]
|
60
|
+
expect(cur_count).to eq(event_count)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
describe "(http protocol) index events with static parent" do
|
66
|
+
it_behaves_like 'a join field based parent indexer' do
|
67
|
+
let(:config) {
|
68
|
+
{
|
69
|
+
"hosts" => get_host_port,
|
70
|
+
"index" => index,
|
71
|
+
"parent" => parent_id,
|
72
|
+
"document_type" => type,
|
73
|
+
"join_field" => join_field,
|
74
|
+
"manage_template" => false
|
75
|
+
}
|
76
|
+
}
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
describe "(http_protocol) index events with fieldref in parent value" do
|
81
|
+
it_behaves_like 'a join field based parent indexer' do
|
82
|
+
let(:config) {
|
83
|
+
{
|
84
|
+
"hosts" => get_host_port,
|
85
|
+
"index" => index,
|
86
|
+
"parent" => "%{link_to}",
|
87
|
+
"document_type" => type,
|
88
|
+
"join_field" => join_field,
|
89
|
+
"manage_template" => false
|
90
|
+
}
|
91
|
+
}
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
@@ -0,0 +1,182 @@
|
|
1
|
+
require "logstash/outputs/elasticsearch"
|
2
|
+
require_relative "../../../spec/es_spec_helper"
|
3
|
+
|
4
|
+
describe "failures in bulk class expected behavior", :integration => true do
|
5
|
+
let(:template) { '{"template" : "not important, will be updated by :index"}' }
|
6
|
+
let(:event1) { LogStash::Event.new("somevalue" => 100, "@timestamp" => "2014-11-17T20:37:17.223Z", "@metadata" => {"retry_count" => 0}) }
|
7
|
+
let(:action1) do
|
8
|
+
if ESHelper.es_version_satisfies?("< 7")
|
9
|
+
ESHelper.action_for_version(["index", {:_id=>nil, routing_field_name =>nil, :_index=>"logstash-2014.11.17", :_type=> doc_type }, event1.to_hash])
|
10
|
+
else
|
11
|
+
ESHelper.action_for_version(["index", {:_id=>nil, routing_field_name =>nil, :_index=>"logstash-2014.11.17" }, event1.to_hash])
|
12
|
+
end
|
13
|
+
end
|
14
|
+
let(:event2) { LogStash::Event.new("geoip" => { "location" => [ 0.0, 0.0] }, "@timestamp" => "2014-11-17T20:37:17.223Z", "@metadata" => {"retry_count" => 0}) }
|
15
|
+
let(:action2) do
|
16
|
+
if ESHelper.es_version_satisfies?("< 7")
|
17
|
+
ESHelper.action_for_version(["index", {:_id=>nil, routing_field_name =>nil, :_index=>"logstash-2014.11.17", :_type=> doc_type }, event2.to_hash])
|
18
|
+
else
|
19
|
+
ESHelper.action_for_version(["index", {:_id=>nil, routing_field_name =>nil, :_index=>"logstash-2014.11.17" }, event2.to_hash])
|
20
|
+
end
|
21
|
+
end
|
22
|
+
let(:invalid_event) { LogStash::Event.new("geoip" => { "location" => "notlatlon" }, "@timestamp" => "2014-11-17T20:37:17.223Z") }
|
23
|
+
|
24
|
+
def mock_actions_with_response(*resp)
|
25
|
+
raise ArgumentError, "Cannot mock actions until subject is registered and has a client!" unless subject.client
|
26
|
+
|
27
|
+
expanded_responses = resp.map do |resp|
|
28
|
+
items = resp["statuses"] && resp["statuses"].map do |status|
|
29
|
+
{"create" => {"status" => status, "error" => "Error for #{status}"}}
|
30
|
+
end
|
31
|
+
|
32
|
+
{
|
33
|
+
"errors" => resp["errors"],
|
34
|
+
"items" => items
|
35
|
+
}
|
36
|
+
end
|
37
|
+
|
38
|
+
allow(subject.client).to receive(:bulk).and_return(*expanded_responses)
|
39
|
+
end
|
40
|
+
|
41
|
+
subject! do
|
42
|
+
settings = {
|
43
|
+
"manage_template" => true,
|
44
|
+
"index" => "logstash-2014.11.17",
|
45
|
+
"template_overwrite" => true,
|
46
|
+
"hosts" => get_host_port(),
|
47
|
+
"retry_max_interval" => 64,
|
48
|
+
"retry_initial_interval" => 2,
|
49
|
+
"ecs_compatibility" => "disabled", # specs are tightly tied to non-ECS defaults
|
50
|
+
}
|
51
|
+
next LogStash::Outputs::ElasticSearch.new(settings)
|
52
|
+
end
|
53
|
+
|
54
|
+
before :each do
|
55
|
+
# Delete all templates first.
|
56
|
+
require "elasticsearch"
|
57
|
+
allow(Stud).to receive(:stoppable_sleep)
|
58
|
+
|
59
|
+
# Clean ES of data before we start.
|
60
|
+
@es = get_client
|
61
|
+
@es.indices.delete_template(:name => "*")
|
62
|
+
@es.indices.delete(:index => "*")
|
63
|
+
@es.indices.refresh
|
64
|
+
end
|
65
|
+
|
66
|
+
after :each do
|
67
|
+
subject.close
|
68
|
+
end
|
69
|
+
|
70
|
+
it "should retry exactly once if all bulk actions are successful" do
|
71
|
+
expect(subject).to receive(:submit).with([action1, action2]).once.and_call_original
|
72
|
+
subject.register
|
73
|
+
mock_actions_with_response({"errors" => false})
|
74
|
+
subject.multi_receive([event1, event2])
|
75
|
+
end
|
76
|
+
|
77
|
+
it "retry exceptions within the submit body" do
|
78
|
+
call_count = 0
|
79
|
+
subject.register
|
80
|
+
|
81
|
+
expect(subject.client).to receive(:bulk).with(anything).exactly(3).times do
|
82
|
+
if (call_count += 1) <= 2
|
83
|
+
raise "error first two times"
|
84
|
+
else
|
85
|
+
{"errors" => false}
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
subject.multi_receive([event1])
|
90
|
+
end
|
91
|
+
|
92
|
+
it "should retry actions with response status of 503" do expect(subject).to receive(:submit).with([action1, action1, action1, action2]).ordered.once.and_call_original
|
93
|
+
expect(subject).to receive(:submit).with([action1, action2]).ordered.once.and_call_original
|
94
|
+
expect(subject).to receive(:submit).with([action2]).ordered.once.and_call_original
|
95
|
+
|
96
|
+
subject.register
|
97
|
+
mock_actions_with_response({"errors" => true, "statuses" => [200, 200, 503, 503]},
|
98
|
+
{"errors" => true, "statuses" => [200, 503]},
|
99
|
+
{"errors" => false})
|
100
|
+
|
101
|
+
subject.multi_receive([event1, event1, event1, event2])
|
102
|
+
end
|
103
|
+
|
104
|
+
retryable_codes = [429, 502, 503]
|
105
|
+
|
106
|
+
retryable_codes.each do |code|
|
107
|
+
it "should retry actions with response status of #{code}" do
|
108
|
+
subject.register
|
109
|
+
|
110
|
+
mock_actions_with_response({"errors" => true, "statuses" => [code]},
|
111
|
+
{"errors" => false})
|
112
|
+
expect(subject).to receive(:submit).with([action1]).twice.and_call_original
|
113
|
+
|
114
|
+
subject.multi_receive([event1])
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
it "should retry an event infinitely until a non retryable status occurs" do
|
119
|
+
expect(subject).to receive(:submit).with([action1]).exactly(6).times.and_call_original
|
120
|
+
subject.register
|
121
|
+
|
122
|
+
mock_actions_with_response({"errors" => true, "statuses" => [429]},
|
123
|
+
{"errors" => true, "statuses" => [429]},
|
124
|
+
{"errors" => true, "statuses" => [429]},
|
125
|
+
{"errors" => true, "statuses" => [429]},
|
126
|
+
{"errors" => true, "statuses" => [429]},
|
127
|
+
{"errors" => true, "statuses" => [400]})
|
128
|
+
|
129
|
+
subject.multi_receive([event1])
|
130
|
+
end
|
131
|
+
|
132
|
+
it "should sleep for an exponentially increasing amount of time on each retry, capped by the max" do
|
133
|
+
[2, 4, 8, 16, 32, 64, 64].each_with_index do |interval,i|
|
134
|
+
expect(Stud).to receive(:stoppable_sleep).with(interval).ordered
|
135
|
+
end
|
136
|
+
|
137
|
+
subject.register
|
138
|
+
|
139
|
+
mock_actions_with_response({"errors" => true, "statuses" => [429]},
|
140
|
+
{"errors" => true, "statuses" => [429]},
|
141
|
+
{"errors" => true, "statuses" => [429]},
|
142
|
+
{"errors" => true, "statuses" => [429]},
|
143
|
+
{"errors" => true, "statuses" => [429]},
|
144
|
+
{"errors" => true, "statuses" => [429]},
|
145
|
+
{"errors" => true, "statuses" => [429]},
|
146
|
+
{"errors" => true, "statuses" => [400]})
|
147
|
+
|
148
|
+
subject.multi_receive([event1])
|
149
|
+
end
|
150
|
+
|
151
|
+
it "non-retryable errors like mapping errors (400) should be dropped and not be retried (unfortunately)" do
|
152
|
+
subject.register
|
153
|
+
expect(subject).to receive(:submit).once.and_call_original
|
154
|
+
subject.multi_receive([invalid_event])
|
155
|
+
subject.close
|
156
|
+
|
157
|
+
@es.indices.refresh
|
158
|
+
r = @es.search(index: 'logstash-*')
|
159
|
+
expect(r).to have_hits(0)
|
160
|
+
end
|
161
|
+
|
162
|
+
it "successful requests should not be appended to retry queue" do
|
163
|
+
expect(subject).to receive(:submit).once.and_call_original
|
164
|
+
|
165
|
+
subject.register
|
166
|
+
subject.multi_receive([event1])
|
167
|
+
subject.close
|
168
|
+
@es.indices.refresh
|
169
|
+
r = @es.search(index: 'logstash-*')
|
170
|
+
expect(r).to have_hits(1)
|
171
|
+
end
|
172
|
+
|
173
|
+
it "should only index proper events" do
|
174
|
+
subject.register
|
175
|
+
subject.multi_receive([invalid_event, event1])
|
176
|
+
subject.close
|
177
|
+
|
178
|
+
@es.indices.refresh
|
179
|
+
r = @es.search(index: 'logstash-*')
|
180
|
+
expect(r).to have_hits(1)
|
181
|
+
end
|
182
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require_relative "../../../spec/es_spec_helper"
|
2
|
+
|
3
|
+
shared_examples "a routing indexer" do
|
4
|
+
let(:index) { 10.times.collect { rand(10).to_s }.join("") }
|
5
|
+
let(:type) { 10.times.collect { rand(10).to_s }.join("") }
|
6
|
+
let(:event_count) { 10000 + rand(500) }
|
7
|
+
let(:routing) { "not_implemented" }
|
8
|
+
let(:config) { "not_implemented" }
|
9
|
+
subject { LogStash::Outputs::ElasticSearch.new(config) }
|
10
|
+
|
11
|
+
before do
|
12
|
+
subject.register
|
13
|
+
event_count.times do
|
14
|
+
subject.multi_receive([LogStash::Event.new("message" => "test", "type" => type)])
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
|
19
|
+
it "ships events" do
|
20
|
+
index_url = "http://#{get_host_port()}/#{index}"
|
21
|
+
|
22
|
+
client = Manticore::Client.new
|
23
|
+
client.post("#{index_url}/_refresh").call
|
24
|
+
|
25
|
+
# Wait until all events are available.
|
26
|
+
Stud::try(10.times) do
|
27
|
+
data = ""
|
28
|
+
|
29
|
+
response = client.get("#{index_url}/_count?q=*&routing=#{routing}").call
|
30
|
+
result = LogStash::Json.load(response.body)
|
31
|
+
cur_count = result["count"]
|
32
|
+
expect(cur_count).to eq(event_count)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
describe "(http protocol) index events with static routing", :integration => true do
|
38
|
+
it_behaves_like 'a routing indexer' do
|
39
|
+
let(:routing) { "test" }
|
40
|
+
let(:config) {
|
41
|
+
{
|
42
|
+
"hosts" => get_host_port,
|
43
|
+
"index" => index,
|
44
|
+
"routing" => routing
|
45
|
+
}
|
46
|
+
}
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
describe "(http_protocol) index events with fieldref in routing value", :integration => true do
|
51
|
+
it_behaves_like 'a routing indexer' do
|
52
|
+
let(:routing) { "test" }
|
53
|
+
let(:config) {
|
54
|
+
{
|
55
|
+
"hosts" => get_host_port,
|
56
|
+
"index" => index,
|
57
|
+
"routing" => "%{message}"
|
58
|
+
}
|
59
|
+
}
|
60
|
+
end
|
61
|
+
end
|
@@ -0,0 +1,94 @@
|
|
1
|
+
require_relative "../../../spec/es_spec_helper"
|
2
|
+
require "logstash/outputs/elasticsearch/http_client"
|
3
|
+
require "json"
|
4
|
+
require "socket"
|
5
|
+
|
6
|
+
describe "pool sniffer", :integration => true do
|
7
|
+
let(:logger) { Cabin::Channel.get }
|
8
|
+
let(:adapter) { LogStash::Outputs::ElasticSearch::HttpClient::ManticoreAdapter.new(logger, {}) }
|
9
|
+
let(:es_host) { get_host_port.split(":").first }
|
10
|
+
let(:es_port) { get_host_port.split(":").last }
|
11
|
+
let(:es_ip) { IPSocket.getaddress(es_host) }
|
12
|
+
let(:initial_urls) { [::LogStash::Util::SafeURI.new("http://#{get_host_port}")] }
|
13
|
+
let(:options) do
|
14
|
+
{
|
15
|
+
:resurrect_delay => 2, # Shorten the delay a bit to speed up tests
|
16
|
+
:url_normalizer => proc {|u| u},
|
17
|
+
:metric => ::LogStash::Instrument::NullMetric.new(:dummy).namespace(:alsodummy)
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
subject { LogStash::Outputs::ElasticSearch::HttpClient::Pool.new(logger, adapter, initial_urls, options) }
|
22
|
+
|
23
|
+
describe("Simple sniff parsing") do
|
24
|
+
before(:each) { subject.start }
|
25
|
+
|
26
|
+
context "with single node" do
|
27
|
+
it "should execute a sniff without error" do
|
28
|
+
expect { subject.check_sniff }.not_to raise_error
|
29
|
+
end
|
30
|
+
|
31
|
+
it "should return single sniff URL" do
|
32
|
+
uris = subject.check_sniff
|
33
|
+
|
34
|
+
expect(uris.size).to eq(1)
|
35
|
+
end
|
36
|
+
|
37
|
+
it "should return the correct sniff URL" do
|
38
|
+
if ESHelper.es_version_satisfies?("<7")
|
39
|
+
# We do a more thorough check on these versions because we can more reliably guess the ip
|
40
|
+
uris = subject.check_sniff
|
41
|
+
|
42
|
+
expect(uris).to include(::LogStash::Util::SafeURI.new("//#{es_ip}:#{es_port}"))
|
43
|
+
else
|
44
|
+
# ES 1.x (and ES 7.x) returned the public hostname by default. This is hard to approximate
|
45
|
+
# so for ES1.x and 7.x we don't check the *exact* hostname
|
46
|
+
skip
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
if ESHelper.es_version_satisfies?(">= 7")
|
53
|
+
describe("Complex sniff parsing ES 7x") do
|
54
|
+
before(:each) do
|
55
|
+
response_double = double("_nodes/http", body: File.read("spec/fixtures/_nodes/7x.json"))
|
56
|
+
allow(subject).to receive(:perform_request).and_return([nil, { version: "7.0" }, response_double])
|
57
|
+
subject.start
|
58
|
+
end
|
59
|
+
|
60
|
+
context "with mixed master-only, data-only, and data + master nodes" do
|
61
|
+
it "should execute a sniff without error" do
|
62
|
+
expect { subject.check_sniff }.not_to raise_error
|
63
|
+
end
|
64
|
+
|
65
|
+
it "should return the correct sniff URLs" do
|
66
|
+
# ie. with the master-only node, and with the node name correctly set.
|
67
|
+
uris = subject.check_sniff
|
68
|
+
|
69
|
+
expect(uris).to include(::LogStash::Util::SafeURI.new("//dev-masterdata:9201"), ::LogStash::Util::SafeURI.new("//dev-data:9202"))
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
describe("Complex sniff parsing ES") do
|
75
|
+
before(:each) do
|
76
|
+
response_double = double("_nodes/http", body: File.read("spec/fixtures/_nodes/6x.json"))
|
77
|
+
allow(subject).to receive(:perform_request).and_return([nil, { version: "6.8" }, response_double])
|
78
|
+
subject.start
|
79
|
+
end
|
80
|
+
|
81
|
+
context "with mixed master-only, data-only, and data + master nodes" do
|
82
|
+
it "should execute a sniff without error" do
|
83
|
+
expect { subject.check_sniff }.not_to raise_error
|
84
|
+
end
|
85
|
+
|
86
|
+
it "should return the correct sniff URLs" do
|
87
|
+
# ie. without the master-only node
|
88
|
+
uris = subject.check_sniff
|
89
|
+
|
90
|
+
expect(uris).to include(::LogStash::Util::SafeURI.new("//127.0.0.1:9201"), ::LogStash::Util::SafeURI.new("//127.0.0.1:9202"), ::LogStash::Util::SafeURI.new("//127.0.0.1:9203"))
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|