logstash-output-elasticsearch 9.0.0-java → 9.0.2-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/CHANGELOG.md +6 -0
- data/docs/index.asciidoc +7 -5
- data/lib/logstash/outputs/elasticsearch/common.rb +11 -1
- data/lib/logstash/outputs/elasticsearch/http_client.rb +2 -2
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +17 -2
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +1 -7
- data/logstash-output-elasticsearch.gemspec +2 -2
- data/spec/integration/outputs/compressed_indexing_spec.rb +5 -1
- data/spec/integration/outputs/index_spec.rb +5 -1
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +31 -5
- data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +0 -20
- data/spec/unit/outputs/elasticsearch_spec.rb +84 -85
- data/spec/unit/outputs/error_whitelist_spec.rb +1 -0
- metadata +7 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 910353088f57a02bea121f9f2c92e27d27664372dbf6b934e256103178c093cd
|
4
|
+
data.tar.gz: a5ba64f0511129dbc9fa8890457f25317e7541aecaf4e694f4f1b835143ce999
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 03a87ed4822764503c6d0d5269ac5c19df9b0d5566659f0ca0df6ced8abd4f6aebd1e384cd6f2fa829787cd864730b32a90235378b88954eba25ebceff240228
|
7
|
+
data.tar.gz: 357011f25a5e41559e995bf35cbb25de6d0ef4c5db0524994dcb24bd2d8300e36068793a403b40e3fc5face5495a9e9963546146c89870c0d219c269864f8afb
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,9 @@
|
|
1
|
+
## 9.0.2
|
2
|
+
- Ignore event's type field for the purpose of setting document `_type` if cluster is es 6.x or above
|
3
|
+
|
4
|
+
## 9.0.1
|
5
|
+
- Update gemspec summary
|
6
|
+
|
1
7
|
### 9.0.0
|
2
8
|
- Change default document type to 'doc' from 'logs' to align with beats and reflect the generic nature of logstash.
|
3
9
|
- Deprecate 'document_type' option
|
data/docs/index.asciidoc
CHANGED
@@ -67,8 +67,8 @@ HTTP requests to the bulk API are expected to return a 200 response code. All ot
|
|
67
67
|
|
68
68
|
The following document errors are handled as follows:
|
69
69
|
|
70
|
-
* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <<dlq-policy>> for more info.
|
71
|
-
* 409 errors (conflict) are logged as a warning and dropped.
|
70
|
+
* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <<dlq-policy>> for more info.
|
71
|
+
* 409 errors (conflict) are logged as a warning and dropped.
|
72
72
|
|
73
73
|
Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
|
74
74
|
It is more performant for Elasticsearch to retry these exceptions than this plugin.
|
@@ -242,8 +242,10 @@ Note: This option is deprecated due to the https://www.elastic.co/guide/en/elast
|
|
242
242
|
It will be removed in the next major version of Logstash.
|
243
243
|
This sets the document type to write events to. Generally you should try to write only
|
244
244
|
similar events to the same 'type'. String expansion `%{foo}` works here.
|
245
|
-
|
246
|
-
|
245
|
+
If you don't set a value for this option:
|
246
|
+
|
247
|
+
- for elasticsearch clusters 6.x and above: the value of 'doc' will be used;
|
248
|
+
- for elasticsearch clusters 5.x and below: the event's 'type' field will be used, if the field is not present the value of 'doc' will be used.
|
247
249
|
|
248
250
|
[id="plugins-{type}s-{plugin}-failure_type_logging_whitelist"]
|
249
251
|
===== `failure_type_logging_whitelist`
|
@@ -681,4 +683,4 @@ See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-in
|
|
681
683
|
|
682
684
|
|
683
685
|
[id="plugins-{type}s-{plugin}-common-options"]
|
684
|
-
include::{include_path}/{type}.asciidoc[]
|
686
|
+
include::{include_path}/{type}.asciidoc[]
|
@@ -22,6 +22,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
22
22
|
|
23
23
|
setup_hosts # properly sets @hosts
|
24
24
|
build_client
|
25
|
+
|
25
26
|
install_template
|
26
27
|
check_action_validity
|
27
28
|
|
@@ -48,6 +49,10 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
48
49
|
end
|
49
50
|
end
|
50
51
|
|
52
|
+
def maximum_seen_major_version
|
53
|
+
client.maximum_seen_major_version
|
54
|
+
end
|
55
|
+
|
51
56
|
def install_template
|
52
57
|
TemplateManager.install_template(self)
|
53
58
|
end
|
@@ -199,12 +204,17 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
199
204
|
end
|
200
205
|
|
201
206
|
# Determine the correct value for the 'type' field for the given event
|
207
|
+
DEFAULT_EVENT_TYPE="doc".freeze
|
202
208
|
def get_event_type(event)
|
203
209
|
# Set the 'type' value for the index.
|
204
210
|
type = if @document_type
|
205
211
|
event.sprintf(@document_type)
|
206
212
|
else
|
207
|
-
|
213
|
+
if client.maximum_seen_major_version < 6
|
214
|
+
event.get("type") || DEFAULT_EVENT_TYPE
|
215
|
+
else
|
216
|
+
DEFAULT_EVENT_TYPE
|
217
|
+
end
|
208
218
|
end
|
209
219
|
|
210
220
|
if !(type.is_a?(String) || type.is_a?(Numeric))
|
@@ -82,8 +82,8 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
82
82
|
template_put(name, template)
|
83
83
|
end
|
84
84
|
|
85
|
-
def
|
86
|
-
@pool.
|
85
|
+
def maximum_seen_major_version
|
86
|
+
@pool.maximum_seen_major_version
|
87
87
|
end
|
88
88
|
|
89
89
|
def bulk(actions)
|
@@ -107,9 +107,9 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
107
107
|
@state_mutex.synchronize { @url_info }
|
108
108
|
end
|
109
109
|
|
110
|
-
def
|
110
|
+
def maximum_seen_major_version
|
111
111
|
@state_mutex.synchronize do
|
112
|
-
@
|
112
|
+
@maximum_seen_major_version
|
113
113
|
end
|
114
114
|
end
|
115
115
|
|
@@ -245,6 +245,14 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
245
245
|
es_version = get_es_version(url)
|
246
246
|
@state_mutex.synchronize do
|
247
247
|
meta[:version] = es_version
|
248
|
+
major = major_version(es_version)
|
249
|
+
if !@maximum_seen_major_version
|
250
|
+
@logger.info("ES Output version determined", :es_version => @maximum_seen_major_version)
|
251
|
+
set_new_major_version(major)
|
252
|
+
elsif major > @maximum_seen_major_version
|
253
|
+
@logger.warn("Detected a node with a higher major version than previously observed. This could be the result of an elasticsearch cluster upgrade.", :previous_major => @maximum_seen_major_version, :new_major => major, :node_url => url)
|
254
|
+
set_new_major_version(major)
|
255
|
+
end
|
248
256
|
meta[:state] = :alive
|
249
257
|
end
|
250
258
|
rescue HostUnreachableError, BadResponseCodeError => e
|
@@ -434,5 +442,12 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
434
442
|
request = perform_request_to_url(url, :get, ROOT_URI_PATH)
|
435
443
|
LogStash::Json.load(request.body)["version"]["number"]
|
436
444
|
end
|
445
|
+
|
446
|
+
def set_new_major_version(version)
|
447
|
+
@maximum_seen_major_version = version
|
448
|
+
if @maximum_seen_major_version >= 6
|
449
|
+
@logger.warn("Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type", :es_version => @maximum_seen_major_version)
|
450
|
+
end
|
451
|
+
end
|
437
452
|
end
|
438
453
|
end; end; end; end;
|
@@ -4,7 +4,7 @@ module LogStash; module Outputs; class ElasticSearch
|
|
4
4
|
def self.install_template(plugin)
|
5
5
|
return unless plugin.manage_template
|
6
6
|
plugin.logger.info("Using mapping template from", :path => plugin.template)
|
7
|
-
template = get_template(plugin.template,
|
7
|
+
template = get_template(plugin.template, plugin.maximum_seen_major_version)
|
8
8
|
plugin.logger.info("Attempting to install template", :manage_template => template)
|
9
9
|
install(plugin.client, plugin.template_name, template, plugin.template_overwrite)
|
10
10
|
rescue => e
|
@@ -12,12 +12,6 @@ module LogStash; module Outputs; class ElasticSearch
|
|
12
12
|
end
|
13
13
|
|
14
14
|
private
|
15
|
-
def self.get_es_major_version(client)
|
16
|
-
# get the elasticsearch version of each node in the pool and
|
17
|
-
# pick the biggest major version
|
18
|
-
client.connected_es_versions.uniq.map {|version| version.split(".").first.to_i}.max
|
19
|
-
end
|
20
|
-
|
21
15
|
def self.get_template(path, es_major_version)
|
22
16
|
template_path = path || default_template_path(es_major_version)
|
23
17
|
read_template_file(template_path)
|
@@ -1,8 +1,8 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-output-elasticsearch'
|
3
|
-
s.version = '9.0.
|
3
|
+
s.version = '9.0.2'
|
4
4
|
s.licenses = ['apache-2.0']
|
5
|
-
s.summary = "
|
5
|
+
s.summary = "Stores logs in Elasticsearch"
|
6
6
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
7
7
|
s.authors = ["Elastic"]
|
8
8
|
s.email = 'info@elastic.co'
|
@@ -50,7 +50,11 @@ if ESHelper.es_version_satisfies?(">= 5")
|
|
50
50
|
response = http_client.get("#{index_url}/_search?q=*&size=1000")
|
51
51
|
result = LogStash::Json.load(response.body)
|
52
52
|
result["hits"]["hits"].each do |doc|
|
53
|
-
|
53
|
+
if ESHelper.es_version_satisfies?(">= 6")
|
54
|
+
expect(doc["_type"]).to eq("doc")
|
55
|
+
else
|
56
|
+
expect(doc["_type"]).to eq(type)
|
57
|
+
end
|
54
58
|
expect(doc["_index"]).to eq(index)
|
55
59
|
end
|
56
60
|
end
|
@@ -78,7 +78,11 @@ describe "indexing" do
|
|
78
78
|
response = http_client.get("#{index_url}/_search?q=*&size=1000")
|
79
79
|
result = LogStash::Json.load(response.body)
|
80
80
|
result["hits"]["hits"].each do |doc|
|
81
|
-
|
81
|
+
if ESHelper.es_version_satisfies?(">= 6")
|
82
|
+
expect(doc["_type"]).to eq("doc")
|
83
|
+
else
|
84
|
+
expect(doc["_type"]).to eq(type)
|
85
|
+
end
|
82
86
|
expect(doc["_index"]).to eq(index)
|
83
87
|
end
|
84
88
|
end
|
@@ -7,11 +7,12 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
7
7
|
let(:adapter) { LogStash::Outputs::ElasticSearch::HttpClient::ManticoreAdapter.new(logger) }
|
8
8
|
let(:initial_urls) { [::LogStash::Util::SafeURI.new("http://localhost:9200")] }
|
9
9
|
let(:options) { {:resurrect_delay => 2, :url_normalizer => proc {|u| u}} } # Shorten the delay a bit to speed up tests
|
10
|
+
let(:es_node_versions) { [ "0.0.0" ] }
|
10
11
|
|
11
12
|
subject { described_class.new(logger, adapter, initial_urls, options) }
|
12
13
|
|
13
14
|
let(:manticore_double) { double("manticore a") }
|
14
|
-
before do
|
15
|
+
before(:each) do
|
15
16
|
|
16
17
|
response_double = double("manticore response").as_null_object
|
17
18
|
# Allow healtchecks
|
@@ -21,10 +22,7 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
21
22
|
|
22
23
|
allow(::Manticore::Client).to receive(:new).and_return(manticore_double)
|
23
24
|
|
24
|
-
allow(subject).to receive(:
|
25
|
-
allow(subject).to receive(:get_es_version).with(any_args).and_return("0.0.0")
|
26
|
-
|
27
|
-
subject.start
|
25
|
+
allow(subject).to receive(:get_es_version).with(any_args).and_return(*es_node_versions)
|
28
26
|
end
|
29
27
|
|
30
28
|
after do
|
@@ -34,10 +32,12 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
34
32
|
describe "initialization" do
|
35
33
|
it "should be successful" do
|
36
34
|
expect { subject }.not_to raise_error
|
35
|
+
subject.start
|
37
36
|
end
|
38
37
|
end
|
39
38
|
|
40
39
|
describe "the resurrectionist" do
|
40
|
+
before(:each) { subject.start }
|
41
41
|
it "should start the resurrectionist when created" do
|
42
42
|
expect(subject.resurrectionist_alive?).to eql(true)
|
43
43
|
end
|
@@ -77,6 +77,7 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
77
77
|
end
|
78
78
|
|
79
79
|
describe "the sniffer" do
|
80
|
+
before(:each) { subject.start }
|
80
81
|
it "should not start the sniffer by default" do
|
81
82
|
expect(subject.sniffer_alive?).to eql(nil)
|
82
83
|
end
|
@@ -92,6 +93,7 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
92
93
|
|
93
94
|
describe "closing" do
|
94
95
|
before do
|
96
|
+
subject.start
|
95
97
|
# Simulate a single in use connection on the first check of this
|
96
98
|
allow(adapter).to receive(:close).and_call_original
|
97
99
|
allow(subject).to receive(:wait_for_in_use_connections).and_call_original
|
@@ -120,6 +122,7 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
120
122
|
end
|
121
123
|
|
122
124
|
describe "connection management" do
|
125
|
+
before(:each) { subject.start }
|
123
126
|
context "with only one URL in the list" do
|
124
127
|
it "should use the only URL in 'with_connection'" do
|
125
128
|
subject.with_connection do |c|
|
@@ -167,4 +170,27 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
167
170
|
end
|
168
171
|
end
|
169
172
|
end
|
173
|
+
|
174
|
+
describe "version tracking" do
|
175
|
+
let(:initial_urls) { [
|
176
|
+
::LogStash::Util::SafeURI.new("http://somehost:9200"),
|
177
|
+
::LogStash::Util::SafeURI.new("http://otherhost:9201")
|
178
|
+
] }
|
179
|
+
|
180
|
+
before(:each) do
|
181
|
+
allow(subject).to receive(:perform_request_to_url).and_return(nil)
|
182
|
+
subject.start
|
183
|
+
end
|
184
|
+
|
185
|
+
it "picks the largest major version" do
|
186
|
+
expect(subject.maximum_seen_major_version).to eq(0)
|
187
|
+
end
|
188
|
+
|
189
|
+
context "if there are nodes with multiple major versions" do
|
190
|
+
let(:es_node_versions) { [ "0.0.0", "6.0.0" ] }
|
191
|
+
it "picks the largest major version" do
|
192
|
+
expect(subject.maximum_seen_major_version).to eq(6)
|
193
|
+
end
|
194
|
+
end
|
195
|
+
end
|
170
196
|
end
|
@@ -5,26 +5,6 @@ require "json"
|
|
5
5
|
|
6
6
|
describe LogStash::Outputs::ElasticSearch::TemplateManager do
|
7
7
|
|
8
|
-
describe ".get_es_major_version" do
|
9
|
-
let(:client) { double("client") }
|
10
|
-
|
11
|
-
before(:each) do
|
12
|
-
allow(client).to receive(:connected_es_versions).and_return(["5.3.0"])
|
13
|
-
end
|
14
|
-
|
15
|
-
it "picks the largest major version" do
|
16
|
-
expect(described_class.get_es_major_version(client)).to eq(5)
|
17
|
-
end
|
18
|
-
context "if there are nodes with multiple major versions" do
|
19
|
-
before(:each) do
|
20
|
-
allow(client).to receive(:connected_es_versions).and_return(["5.3.0", "6.0.0"])
|
21
|
-
end
|
22
|
-
it "picks the largest major version" do
|
23
|
-
expect(described_class.get_es_major_version(client)).to eq(6)
|
24
|
-
end
|
25
|
-
end
|
26
|
-
end
|
27
|
-
|
28
8
|
describe ".default_template_path" do
|
29
9
|
context "elasticsearch 1.x" do
|
30
10
|
it "chooses the 2x template" do
|
@@ -2,7 +2,31 @@ require_relative "../../../spec/es_spec_helper"
|
|
2
2
|
require "flores/random"
|
3
3
|
require "logstash/outputs/elasticsearch"
|
4
4
|
|
5
|
-
describe
|
5
|
+
describe LogStash::Outputs::ElasticSearch do
|
6
|
+
subject { described_class.new(options) }
|
7
|
+
let(:options) { {} }
|
8
|
+
let(:maximum_seen_major_version) { rand(100) }
|
9
|
+
|
10
|
+
let(:do_register) { true }
|
11
|
+
|
12
|
+
before(:each) do
|
13
|
+
if do_register
|
14
|
+
subject.register
|
15
|
+
|
16
|
+
# Rspec mocks can't handle background threads, so... we can't use any
|
17
|
+
allow(subject.client.pool).to receive(:start_resurrectionist)
|
18
|
+
allow(subject.client.pool).to receive(:start_sniffer)
|
19
|
+
allow(subject.client.pool).to receive(:healthcheck!)
|
20
|
+
allow(subject.client).to receive(:maximum_seen_major_version).and_return(maximum_seen_major_version)
|
21
|
+
subject.client.pool.adapter.manticore.respond_with(:body => "{}")
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
after(:each) do
|
26
|
+
subject.close
|
27
|
+
end
|
28
|
+
|
29
|
+
|
6
30
|
context "with an active instance" do
|
7
31
|
let(:options) {
|
8
32
|
{
|
@@ -12,57 +36,51 @@ describe "outputs/elasticsearch" do
|
|
12
36
|
"manage_template" => false
|
13
37
|
}
|
14
38
|
}
|
15
|
-
|
16
|
-
let(:eso) { LogStash::Outputs::ElasticSearch.new(options) }
|
17
39
|
|
18
|
-
let(:manticore_urls) {
|
40
|
+
let(:manticore_urls) { subject.client.pool.urls }
|
19
41
|
let(:manticore_url) { manticore_urls.first }
|
20
|
-
|
21
|
-
let(:do_register) { true }
|
22
|
-
|
23
|
-
before(:each) do
|
24
|
-
if do_register
|
25
|
-
eso.register
|
26
|
-
|
27
|
-
# Rspec mocks can't handle background threads, so... we can't use any
|
28
|
-
allow(eso.client.pool).to receive(:start_resurrectionist)
|
29
|
-
allow(eso.client.pool).to receive(:start_sniffer)
|
30
|
-
allow(eso.client.pool).to receive(:healthcheck!)
|
31
|
-
eso.client.pool.adapter.manticore.respond_with(:body => "{}")
|
32
|
-
end
|
33
|
-
end
|
34
|
-
|
35
|
-
after(:each) do
|
36
|
-
eso.close
|
37
|
-
end
|
38
42
|
|
39
43
|
describe "getting a document type" do
|
40
44
|
it "should default to 'doc'" do
|
41
|
-
expect(
|
45
|
+
expect(subject.send(:get_event_type, LogStash::Event.new)).to eql("doc")
|
42
46
|
end
|
43
47
|
|
44
|
-
|
45
|
-
|
48
|
+
context "if document_type isn't set" do
|
49
|
+
let(:options) { super.merge("document_type" => nil)}
|
50
|
+
context "for 6.x elasticsearch clusters" do
|
51
|
+
let(:maximum_seen_major_version) { 6 }
|
52
|
+
it "should return 'doc'" do
|
53
|
+
expect(subject.send(:get_event_type, LogStash::Event.new("type" => "foo"))).to eql("doc")
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
context "for < 6.0 elasticsearch clusters" do
|
58
|
+
let(:maximum_seen_major_version) { 5 }
|
59
|
+
it "should get the type from the event" do
|
60
|
+
expect(subject.send(:get_event_type, LogStash::Event.new("type" => "foo"))).to eql("foo")
|
61
|
+
end
|
62
|
+
end
|
46
63
|
end
|
47
64
|
|
48
65
|
context "with 'document type set'" do
|
49
66
|
let(:options) { super.merge("document_type" => "bar")}
|
50
67
|
it "should get the event type from the 'document_type' setting" do
|
51
|
-
expect(
|
68
|
+
expect(subject.send(:get_event_type, LogStash::Event.new())).to eql("bar")
|
52
69
|
end
|
53
70
|
end
|
54
71
|
|
55
|
-
context "with a bad type" do
|
72
|
+
context "with a bad type event field in a < 6.0 es cluster" do
|
73
|
+
let(:maximum_seen_major_version) { 5 }
|
56
74
|
let(:type_arg) { ["foo"] }
|
57
|
-
let(:result) {
|
75
|
+
let(:result) { subject.send(:get_event_type, LogStash::Event.new("type" => type_arg)) }
|
58
76
|
|
59
77
|
before do
|
60
|
-
allow(
|
78
|
+
allow(subject.instance_variable_get(:@logger)).to receive(:warn)
|
61
79
|
result
|
62
80
|
end
|
63
81
|
|
64
82
|
it "should call @logger.warn and return nil" do
|
65
|
-
expect(
|
83
|
+
expect(subject.instance_variable_get(:@logger)).to have_received(:warn).with(/Bad event type!/, anything).once
|
66
84
|
end
|
67
85
|
|
68
86
|
it "should set the type to the stringified value" do
|
@@ -103,7 +121,7 @@ describe "outputs/elasticsearch" do
|
|
103
121
|
|
104
122
|
describe "with path" do
|
105
123
|
it "should properly create a URI with the path" do
|
106
|
-
expect(
|
124
|
+
expect(subject.path).to eql(options["path"])
|
107
125
|
end
|
108
126
|
|
109
127
|
it "should properly set the path on the HTTP client adding slashes" do
|
@@ -129,7 +147,7 @@ describe "outputs/elasticsearch" do
|
|
129
147
|
let(:client_host_path) { manticore_url.path }
|
130
148
|
|
131
149
|
it "should initialize without error" do
|
132
|
-
expect {
|
150
|
+
expect { subject }.not_to raise_error
|
133
151
|
end
|
134
152
|
|
135
153
|
it "should use the URI path" do
|
@@ -145,7 +163,7 @@ describe "outputs/elasticsearch" do
|
|
145
163
|
end
|
146
164
|
|
147
165
|
it "should initialize without error" do
|
148
|
-
expect {
|
166
|
+
expect { subject }.not_to raise_error
|
149
167
|
end
|
150
168
|
|
151
169
|
it "should use the option path" do
|
@@ -164,7 +182,7 @@ describe "outputs/elasticsearch" do
|
|
164
182
|
end
|
165
183
|
|
166
184
|
it "should initialize with an error" do
|
167
|
-
expect {
|
185
|
+
expect { subject.register }.to raise_error(LogStash::ConfigurationError)
|
168
186
|
end
|
169
187
|
end
|
170
188
|
end
|
@@ -188,12 +206,12 @@ describe "outputs/elasticsearch" do
|
|
188
206
|
let(:events_tuples) { [double("one t"), double("two t"), double("three t")] }
|
189
207
|
|
190
208
|
before do
|
191
|
-
allow(
|
209
|
+
allow(subject).to receive(:retrying_submit).with(anything)
|
192
210
|
events.each_with_index do |e,i|
|
193
211
|
et = events_tuples[i]
|
194
|
-
allow(
|
212
|
+
allow(subject).to receive(:event_action_tuple).with(e).and_return(et)
|
195
213
|
end
|
196
|
-
|
214
|
+
subject.multi_receive(events)
|
197
215
|
end
|
198
216
|
|
199
217
|
end
|
@@ -213,24 +231,24 @@ describe "outputs/elasticsearch" do
|
|
213
231
|
i = 0
|
214
232
|
bulk_param = [["index", anything, event.to_hash]]
|
215
233
|
|
216
|
-
allow(
|
234
|
+
allow(subject).to receive(:logger).and_return(logger)
|
217
235
|
|
218
236
|
# Fail the first time bulk is called, succeed the next time
|
219
|
-
allow(
|
237
|
+
allow(subject.client).to receive(:bulk).with(bulk_param) do
|
220
238
|
i += 1
|
221
239
|
if i == 1
|
222
240
|
raise error
|
223
241
|
end
|
224
242
|
end.and_return(response)
|
225
|
-
|
243
|
+
subject.multi_receive([event])
|
226
244
|
end
|
227
245
|
|
228
246
|
it "should retry the 429 till it goes away" do
|
229
|
-
expect(
|
247
|
+
expect(subject.client).to have_received(:bulk).twice
|
230
248
|
end
|
231
249
|
|
232
250
|
it "should log a debug message" do
|
233
|
-
expect(
|
251
|
+
expect(subject.logger).to have_received(:debug).with(/Encountered a retryable error/i, anything)
|
234
252
|
end
|
235
253
|
end
|
236
254
|
end
|
@@ -245,19 +263,16 @@ describe "outputs/elasticsearch" do
|
|
245
263
|
"timeout" => 0.1, # fast timeout
|
246
264
|
}
|
247
265
|
end
|
248
|
-
let(:eso) {LogStash::Outputs::ElasticSearch.new(options)}
|
249
266
|
|
250
267
|
before do
|
251
|
-
eso.register
|
252
|
-
|
253
268
|
# Expect a timeout to be logged.
|
254
|
-
expect(
|
255
|
-
expect(
|
269
|
+
expect(subject.logger).to receive(:error).with(/Attempted to send a bulk request to Elasticsearch/i, anything).at_least(:once)
|
270
|
+
expect(subject.client).to receive(:bulk).at_least(:twice).and_call_original
|
256
271
|
end
|
257
272
|
|
258
273
|
it "should fail after the timeout" do
|
259
274
|
#pending("This is tricky now that we do healthchecks on instantiation")
|
260
|
-
Thread.new {
|
275
|
+
Thread.new { subject.multi_receive([LogStash::Event.new]) }
|
261
276
|
|
262
277
|
# Allow the timeout to occur
|
263
278
|
sleep 6
|
@@ -265,19 +280,19 @@ describe "outputs/elasticsearch" do
|
|
265
280
|
end
|
266
281
|
|
267
282
|
describe "the action option" do
|
268
|
-
subject(:eso) {LogStash::Outputs::ElasticSearch.new(options)}
|
269
283
|
context "with a sprintf action" do
|
270
|
-
let(:options) { {"action" => "%{myactionfield}"} }
|
284
|
+
let(:options) { {"action" => "%{myactionfield}" } }
|
271
285
|
|
272
286
|
let(:event) { LogStash::Event.new("myactionfield" => "update", "message" => "blah") }
|
273
287
|
|
274
288
|
it "should interpolate the requested action value when creating an event_action_tuple" do
|
275
|
-
expect(
|
289
|
+
expect(subject.event_action_tuple(event).first).to eql("update")
|
276
290
|
end
|
277
291
|
end
|
278
292
|
|
279
293
|
context "with an invalid action" do
|
280
294
|
let(:options) { {"action" => "SOME Garbaaage"} }
|
295
|
+
let(:do_register) { false } # this is what we want to test, so we disable the before(:each) call
|
281
296
|
|
282
297
|
it "should raise a configuration error" do
|
283
298
|
expect { subject.register }.to raise_error(LogStash::ConfigurationError)
|
@@ -289,8 +304,6 @@ describe "outputs/elasticsearch" do
|
|
289
304
|
let(:manticore_double) do
|
290
305
|
double("manticoreX#{self.inspect}")
|
291
306
|
end
|
292
|
-
|
293
|
-
let(:eso) {LogStash::Outputs::ElasticSearch.new(options)}
|
294
307
|
|
295
308
|
before(:each) do
|
296
309
|
response_double = double("manticore response").as_null_object
|
@@ -300,17 +313,12 @@ describe "outputs/elasticsearch" do
|
|
300
313
|
allow(manticore_double).to receive(:close)
|
301
314
|
|
302
315
|
allow(::Manticore::Client).to receive(:new).and_return(manticore_double)
|
303
|
-
|
304
|
-
end
|
305
|
-
|
306
|
-
after(:each) do
|
307
|
-
eso.close
|
316
|
+
subject.register
|
308
317
|
end
|
309
318
|
|
310
|
-
|
311
319
|
shared_examples("an encrypted client connection") do
|
312
320
|
it "should enable SSL in manticore" do
|
313
|
-
expect(
|
321
|
+
expect(subject.client.pool.urls.map(&:scheme).uniq).to eql(['https'])
|
314
322
|
end
|
315
323
|
end
|
316
324
|
|
@@ -330,22 +338,22 @@ describe "outputs/elasticsearch" do
|
|
330
338
|
describe "retry_on_conflict" do
|
331
339
|
let(:num_retries) { 123 }
|
332
340
|
let(:event) { LogStash::Event.new("message" => "blah") }
|
333
|
-
|
341
|
+
let(:options) { { 'retry_on_conflict' => num_retries } }
|
334
342
|
|
335
343
|
context "with a regular index" do
|
336
|
-
let(:options) {
|
344
|
+
let(:options) { super.merge("action" => "index") }
|
337
345
|
|
338
346
|
it "should interpolate the requested action value when creating an event_action_tuple" do
|
339
|
-
action, params, event_data =
|
347
|
+
action, params, event_data = subject.event_action_tuple(event)
|
340
348
|
expect(params).not_to include({:_retry_on_conflict => num_retries})
|
341
349
|
end
|
342
350
|
end
|
343
351
|
|
344
352
|
context "using a plain update" do
|
345
|
-
let(:options) {
|
353
|
+
let(:options) { super.merge("action" => "update", "retry_on_conflict" => num_retries, "document_id" => 1) }
|
346
354
|
|
347
355
|
it "should interpolate the requested action value when creating an event_action_tuple" do
|
348
|
-
action, params, event_data =
|
356
|
+
action, params, event_data = subject.event_action_tuple(event)
|
349
357
|
expect(params).to include({:_retry_on_conflict => num_retries})
|
350
358
|
end
|
351
359
|
end
|
@@ -353,17 +361,17 @@ describe "outputs/elasticsearch" do
|
|
353
361
|
|
354
362
|
describe "sleep interval calculation" do
|
355
363
|
let(:retry_max_interval) { 64 }
|
356
|
-
|
364
|
+
let(:options) { { "retry_max_interval" => retry_max_interval } }
|
357
365
|
|
358
366
|
it "should double the given value" do
|
359
|
-
expect(
|
360
|
-
expect(
|
367
|
+
expect(subject.next_sleep_interval(2)).to eql(4)
|
368
|
+
expect(subject.next_sleep_interval(32)).to eql(64)
|
361
369
|
end
|
362
370
|
|
363
371
|
it "should not increase the value past the max retry interval" do
|
364
372
|
sleep_interval = 2
|
365
373
|
100.times do
|
366
|
-
sleep_interval =
|
374
|
+
sleep_interval = subject.next_sleep_interval(sleep_interval)
|
367
375
|
expect(sleep_interval).to be <= retry_max_interval
|
368
376
|
end
|
369
377
|
end
|
@@ -371,14 +379,15 @@ describe "outputs/elasticsearch" do
|
|
371
379
|
|
372
380
|
describe "stale connection check" do
|
373
381
|
let(:validate_after_inactivity) { 123 }
|
374
|
-
|
382
|
+
let(:options) { { "validate_after_inactivity" => validate_after_inactivity } }
|
383
|
+
let(:do_register) { false }
|
375
384
|
|
376
|
-
before do
|
385
|
+
before :each do
|
377
386
|
allow(::Manticore::Client).to receive(:new).with(any_args).and_call_original
|
378
|
-
subject.register
|
379
387
|
end
|
380
388
|
|
381
|
-
it "should set the correct http client option for 'validate_after_inactivity" do
|
389
|
+
it "should set the correct http client option for 'validate_after_inactivity'" do
|
390
|
+
subject.register
|
382
391
|
expect(::Manticore::Client).to have_received(:new) do |options|
|
383
392
|
expect(options[:check_connection_timeout]).to eq(validate_after_inactivity)
|
384
393
|
end
|
@@ -387,22 +396,12 @@ describe "outputs/elasticsearch" do
|
|
387
396
|
|
388
397
|
describe "custom parameters" do
|
389
398
|
|
390
|
-
let(:
|
391
|
-
|
392
|
-
let(:manticore_urls) { eso.client.pool.urls }
|
399
|
+
let(:manticore_urls) { subject.client.pool.urls }
|
393
400
|
let(:manticore_url) { manticore_urls.first }
|
394
401
|
|
395
402
|
let(:custom_parameters_hash) { { "id" => 1, "name" => "logstash" } }
|
396
403
|
let(:custom_parameters_query) { custom_parameters_hash.map {|k,v| "#{k}=#{v}" }.join("&") }
|
397
404
|
|
398
|
-
before(:each) do
|
399
|
-
eso.register
|
400
|
-
end
|
401
|
-
|
402
|
-
after(:each) do
|
403
|
-
eso.close rescue nil
|
404
|
-
end
|
405
|
-
|
406
405
|
context "using non-url hosts" do
|
407
406
|
|
408
407
|
let(:options) {
|
@@ -415,7 +414,7 @@ describe "outputs/elasticsearch" do
|
|
415
414
|
}
|
416
415
|
|
417
416
|
it "creates a URI with the added parameters" do
|
418
|
-
expect(
|
417
|
+
expect(subject.parameters).to eql(custom_parameters_hash)
|
419
418
|
end
|
420
419
|
|
421
420
|
it "sets the query string on the HTTP client" do
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-output-elasticsearch
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 9.0.
|
4
|
+
version: 9.0.2
|
5
5
|
platform: java
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-
|
11
|
+
date: 2017-11-30 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|
@@ -196,7 +196,9 @@ dependencies:
|
|
196
196
|
- - ">="
|
197
197
|
- !ruby/object:Gem::Version
|
198
198
|
version: '0'
|
199
|
-
description: This gem is a Logstash plugin required to be installed on top of the
|
199
|
+
description: This gem is a Logstash plugin required to be installed on top of the
|
200
|
+
Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This
|
201
|
+
gem is not a stand-alone program
|
200
202
|
email: info@elastic.co
|
201
203
|
executables: []
|
202
204
|
extensions: []
|
@@ -280,10 +282,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
280
282
|
version: '0'
|
281
283
|
requirements: []
|
282
284
|
rubyforge_project:
|
283
|
-
rubygems_version: 2.
|
285
|
+
rubygems_version: 2.6.13
|
284
286
|
signing_key:
|
285
287
|
specification_version: 4
|
286
|
-
summary:
|
288
|
+
summary: Stores logs in Elasticsearch
|
287
289
|
test_files:
|
288
290
|
- spec/es_spec_helper.rb
|
289
291
|
- spec/fixtures/htpasswd
|