logstash-output-elasticsearch 5.2.1-java → 5.3.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +2 -3
- data/lib/logstash/outputs/elasticsearch/common.rb +11 -7
- data/lib/logstash/outputs/elasticsearch/common_configs.rb +1 -1
- data/lib/logstash/outputs/elasticsearch/http_client/manticore_adapter.rb +1 -1
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +3 -13
- data/lib/logstash/outputs/elasticsearch/http_client.rb +48 -7
- data/lib/logstash/outputs/elasticsearch.rb +11 -7
- data/logstash-output-elasticsearch.gemspec +2 -2
- data/spec/integration/outputs/index_spec.rb +51 -8
- data/spec/integration/outputs/parent_spec.rb +1 -5
- data/spec/integration/outputs/routing_spec.rb +0 -4
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +0 -15
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +1 -1
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7185b4fb6010e9fc50370f2585835985d867e935
|
4
|
+
data.tar.gz: 799792dd65f34457cacdb14d969d7f69f7a56791
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c356227a6c159996b0bffee8998eae408ed9c7c7dffb3c1e5c36558562bb4065b498b7f0c588dfcd70915a85d24c4f98e75847d7e5663129beb0f842a706e849
|
7
|
+
data.tar.gz: 1460ab9245da87f9b60e08779c1a72fe14fcbd2e68d5e1d9de0ab0720de085cab65dac19ec92930cbf7c48d68bede750bb1c888cfc97e3165caa3f804ee18b0e
|
data/CHANGELOG.md
CHANGED
@@ -1,6 +1,5 @@
|
|
1
|
-
## 5.
|
2
|
-
-
|
3
|
-
- depends on Adressable ~> 2.3.0 to satisfy development dependency of the core ([logstash/#6204](https://github.com/elastic/logstash/issues/6204))
|
1
|
+
## 5.3.0
|
2
|
+
- Bulk operations will now target 20MB chunks at a time to reduce heap usage
|
4
3
|
|
5
4
|
## 5.2.0
|
6
5
|
- Change default lang for scripts to be painless, inline with ES 5.0. Earlier there was no default.
|
@@ -19,8 +19,12 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
19
19
|
|
20
20
|
# Receive an array of events and immediately attempt to index them (no buffering)
|
21
21
|
def multi_receive(events)
|
22
|
-
|
23
|
-
|
22
|
+
if @flush_size
|
23
|
+
events.each_slice(@flush_size) do |slice|
|
24
|
+
retrying_submit(slice.map {|e| event_action_tuple(e) })
|
25
|
+
end
|
26
|
+
else
|
27
|
+
retrying_submit(events.map {|e| event_action_tuple(e)})
|
24
28
|
end
|
25
29
|
end
|
26
30
|
|
@@ -59,14 +63,14 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
59
63
|
VALID_HTTP_ACTIONS
|
60
64
|
end
|
61
65
|
|
62
|
-
def retrying_submit(actions)
|
66
|
+
def retrying_submit(actions)
|
63
67
|
# Initially we submit the full list of actions
|
64
68
|
submit_actions = actions
|
65
69
|
|
66
70
|
sleep_interval = @retry_initial_interval
|
67
71
|
|
68
72
|
while submit_actions && submit_actions.length > 0
|
69
|
-
|
73
|
+
|
70
74
|
# We retry with whatever is didn't succeed
|
71
75
|
begin
|
72
76
|
submit_actions = submit(submit_actions)
|
@@ -103,7 +107,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
103
107
|
|
104
108
|
def submit(actions)
|
105
109
|
bulk_response = safe_bulk(actions)
|
106
|
-
|
110
|
+
|
107
111
|
# If the response is nil that means we were in a retry loop
|
108
112
|
# and aborted since we're shutting down
|
109
113
|
# If it did return and there are no errors we're good as well
|
@@ -216,7 +220,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
216
220
|
sleep_interval = sleep_for_interval(sleep_interval)
|
217
221
|
retry unless @stopping.true?
|
218
222
|
else
|
219
|
-
@logger.error("Got a bad response code from server, but this code is not considered retryable. Request will be dropped", :code => e.response_code)
|
223
|
+
@logger.error("Got a bad response code from server, but this code is not considered retryable. Request will be dropped", :code => e.response_code, :body => e.response.body)
|
220
224
|
end
|
221
225
|
rescue => e
|
222
226
|
# Stuff that should never happen
|
@@ -231,7 +235,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
231
235
|
@logger.debug("Failed actions for last bad bulk request!", :actions => actions)
|
232
236
|
|
233
237
|
# We retry until there are no errors! Errors should all go to the retry queue
|
234
|
-
sleep_interval = sleep_for_interval(sleep_interval)
|
238
|
+
sleep_interval = sleep_for_interval(sleep_interval)
|
235
239
|
retry unless @stopping.true?
|
236
240
|
end
|
237
241
|
end
|
@@ -81,7 +81,7 @@ module LogStash; module Outputs; class ElasticSearch
|
|
81
81
|
# If you specify a number larger than the batch size of your pipeline it will have no effect,
|
82
82
|
# save for the case where a filter increases the size of an inflight batch by outputting
|
83
83
|
# events.
|
84
|
-
mod.config :flush_size, :validate => :number, :
|
84
|
+
mod.config :flush_size, :validate => :number, :deprecate => "This setting is no longer necessary as we now try to restrict bulk requests to sane sizes. See the 'Batch Sizes' section of the docs. If you think you still need to restrict payloads based on the number, not size, of events, please open a ticket."
|
85
85
|
|
86
86
|
# The amount of time since last flush before a flush is forced.
|
87
87
|
#
|
@@ -45,7 +45,7 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
45
45
|
# but for our current purposes this is correct
|
46
46
|
if resp.code < 200 || resp.code > 299 && resp.code != 404
|
47
47
|
safe_url = ::LogStash::Outputs::ElasticSearch::SafeURL.without_credentials(url)
|
48
|
-
raise ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(resp.code, safe_url + path, body)
|
48
|
+
raise ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(resp.code, safe_url + path, resp.body)
|
49
49
|
end
|
50
50
|
|
51
51
|
resp
|
@@ -261,7 +261,7 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
261
261
|
new_urls.each do |url|
|
262
262
|
# URI objects don't have real hash equality! So, since this isn't perf sensitive we do a linear scan
|
263
263
|
unless @url_info.keys.include?(url)
|
264
|
-
state_changes[:added] << url
|
264
|
+
state_changes[:added] << url.to_s
|
265
265
|
add_url(url)
|
266
266
|
end
|
267
267
|
end
|
@@ -269,24 +269,14 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
269
269
|
# Delete connections not in the new list
|
270
270
|
@url_info.each do |url,_|
|
271
271
|
unless new_urls.include?(url)
|
272
|
-
state_changes[:removed] << url
|
272
|
+
state_changes[:removed] << url.to_s
|
273
273
|
remove_url(url)
|
274
274
|
end
|
275
275
|
end
|
276
276
|
end
|
277
277
|
|
278
278
|
if state_changes[:removed].size > 0 || state_changes[:added].size > 0
|
279
|
-
|
280
|
-
logger.info("Elasticsearch pool URLs updated", :changes => safe_state_changes(state_changes))
|
281
|
-
end
|
282
|
-
end
|
283
|
-
end
|
284
|
-
|
285
|
-
def safe_state_changes(state_changes)
|
286
|
-
state_changes.reduce({}) do |acc, kv|
|
287
|
-
k,v = kv
|
288
|
-
acc[k] = v.map(&LogStash::Outputs::ElasticSearch::SafeURL.method(:without_credentials)).map(&:to_s)
|
289
|
-
acc
|
279
|
+
logger.info("Elasticsearch pool URLs updated", :changes => state_changes)
|
290
280
|
end
|
291
281
|
end
|
292
282
|
|
@@ -5,6 +5,21 @@ require 'logstash/outputs/elasticsearch/http_client/pool'
|
|
5
5
|
require 'logstash/outputs/elasticsearch/http_client/manticore_adapter'
|
6
6
|
|
7
7
|
module LogStash; module Outputs; class ElasticSearch;
|
8
|
+
# This is a constant instead of a config option because
|
9
|
+
# there really isn't a good reason to configure it.
|
10
|
+
#
|
11
|
+
# The criteria used are:
|
12
|
+
# 1. We need a number that's less than 100MiB because ES
|
13
|
+
# won't accept bulks larger than that.
|
14
|
+
# 2. It must be large enough to amortize the connection constant
|
15
|
+
# across multiple requests.
|
16
|
+
# 3. It must be small enough that even if multiple threads hit this size
|
17
|
+
# we won't use a lot of heap.
|
18
|
+
#
|
19
|
+
# We wound up agreeing that a number greater than 10 MiB and less than 100MiB
|
20
|
+
# made sense. We picked one on the lowish side to not use too much heap.
|
21
|
+
TARGET_BULK_BYTES = 20 * 1024 * 1024 # 20MiB
|
22
|
+
|
8
23
|
class HttpClient
|
9
24
|
attr_reader :client, :options, :logger, :pool, :action_count, :recv_count
|
10
25
|
# This is here in case we use DEFAULT_OPTIONS in the future
|
@@ -38,9 +53,11 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
38
53
|
def bulk(actions)
|
39
54
|
@action_count ||= 0
|
40
55
|
@action_count += actions.size
|
41
|
-
|
56
|
+
|
42
57
|
return if actions.empty?
|
43
|
-
|
58
|
+
|
59
|
+
|
60
|
+
bulk_actions = actions.collect do |action, args, source|
|
44
61
|
args, source = update_action_builder(args, source) if action == 'update'
|
45
62
|
|
46
63
|
if source && action != 'delete'
|
@@ -48,13 +65,37 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
48
65
|
else
|
49
66
|
next { action => args }
|
50
67
|
end
|
51
|
-
end.
|
52
|
-
flatten.
|
53
|
-
reduce("") do |acc,line|
|
54
|
-
acc << LogStash::Json.dump(line)
|
55
|
-
acc << "\n"
|
56
68
|
end
|
57
69
|
|
70
|
+
bulk_body = ""
|
71
|
+
bulk_responses = []
|
72
|
+
bulk_actions.each do |action|
|
73
|
+
as_json = action.is_a?(Array) ?
|
74
|
+
action.map {|line| LogStash::Json.dump(line)}.join("\n") :
|
75
|
+
LogStash::Json.dump(action)
|
76
|
+
as_json << "\n"
|
77
|
+
|
78
|
+
if (bulk_body.size + as_json.size) > TARGET_BULK_BYTES
|
79
|
+
bulk_responses << bulk_send(bulk_body)
|
80
|
+
bulk_body = as_json
|
81
|
+
else
|
82
|
+
bulk_body << as_json
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
bulk_responses << bulk_send(bulk_body) if bulk_body.size > 0
|
87
|
+
|
88
|
+
join_bulk_responses(bulk_responses)
|
89
|
+
end
|
90
|
+
|
91
|
+
def join_bulk_responses(bulk_responses)
|
92
|
+
{
|
93
|
+
"errors" => bulk_responses.any? {|r| r["errors"] == true},
|
94
|
+
"items" => bulk_responses.reduce([]) {|m,r| m.concat(r["items"])}
|
95
|
+
}
|
96
|
+
end
|
97
|
+
|
98
|
+
def bulk_send(bulk_body)
|
58
99
|
# Discard the URL
|
59
100
|
url, response = @pool.post("_bulk", nil, bulk_body)
|
60
101
|
LogStash::Json.load(response.body)
|
@@ -17,20 +17,20 @@ require "uri" # for escaping user input
|
|
17
17
|
# yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having
|
18
18
|
# to upgrade Logstash in lock-step. For those still wishing to use the node or transport protocols please see
|
19
19
|
# the <<plugins-outputs-elasticsearch_java,elasticsearch_java output plugin>>.
|
20
|
-
#
|
20
|
+
#
|
21
21
|
# You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
|
22
22
|
#
|
23
23
|
# ==== Template management for Elasticsearch 5.x
|
24
|
-
# Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0.
|
25
|
-
# Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default
|
24
|
+
# Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0.
|
25
|
+
# Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default
|
26
26
|
# behavior.
|
27
27
|
#
|
28
28
|
# ** Users installing ES 5.x and LS 5.x **
|
29
29
|
# This change will not affect you and you will continue to use the ES defaults.
|
30
30
|
#
|
31
31
|
# ** Users upgrading from LS 2.x to LS 5.x with ES 5.x **
|
32
|
-
# LS will not force upgrade the template, if `logstash` template already exists. This means you will still use
|
33
|
-
# `.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after
|
32
|
+
# LS will not force upgrade the template, if `logstash` template already exists. This means you will still use
|
33
|
+
# `.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after
|
34
34
|
# the new template is installed.
|
35
35
|
#
|
36
36
|
# ==== Retry Policy
|
@@ -48,6 +48,10 @@ require "uri" # for escaping user input
|
|
48
48
|
# NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
|
49
49
|
# It is more performant for Elasticsearch to retry these exceptions than this plugin.
|
50
50
|
#
|
51
|
+
# ==== Batch Sizes ====
|
52
|
+
# This plugin attempts to send batches of events as a single request. However, if
|
53
|
+
# a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request.
|
54
|
+
#
|
51
55
|
# ==== DNS Caching
|
52
56
|
#
|
53
57
|
# This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl],
|
@@ -146,7 +150,7 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
146
150
|
# a timeout occurs, the request will be retried.
|
147
151
|
config :timeout, :validate => :number, :default => 60
|
148
152
|
|
149
|
-
# Set the Elasticsearch errors in the whitelist that you don't want to log.
|
153
|
+
# Set the Elasticsearch errors in the whitelist that you don't want to log.
|
150
154
|
# A useful example is when you want to skip all 409 errors
|
151
155
|
# which are `document_already_exists_exception`.
|
152
156
|
config :failure_type_logging_whitelist, :validate => :array, :default => []
|
@@ -173,7 +177,7 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
173
177
|
# Resurrection is the process by which backend endpoints marked 'down' are checked
|
174
178
|
# to see if they have come back to life
|
175
179
|
config :resurrect_delay, :validate => :number, :default => 5
|
176
|
-
|
180
|
+
|
177
181
|
# How long to wait before checking if the connection is stale before executing a request on a connection using keepalive.
|
178
182
|
# You may want to set this lower, if you get connection errors regularly
|
179
183
|
# Quoting the Apache commons docs (this client is based Apache Commmons):
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
|
3
3
|
s.name = 'logstash-output-elasticsearch'
|
4
|
-
s.version = '5.
|
4
|
+
s.version = '5.3.0'
|
5
5
|
s.licenses = ['apache-2.0']
|
6
6
|
s.summary = "Logstash Output to Elasticsearch"
|
7
7
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
@@ -23,7 +23,7 @@ Gem::Specification.new do |s|
|
|
23
23
|
s.add_runtime_dependency 'cabin', ['~> 0.6']
|
24
24
|
s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
|
25
25
|
s.add_development_dependency 'ftw', '~> 0.0.42'
|
26
|
-
s.add_development_dependency 'addressable', "~> 2.
|
26
|
+
s.add_development_dependency 'addressable', "~> 2.4.0" # used by FTW. V 2.5.0 is ruby 2.0 only.
|
27
27
|
s.add_development_dependency 'logstash-codec-plain'
|
28
28
|
|
29
29
|
if RUBY_PLATFORM == 'java'
|
@@ -1,21 +1,21 @@
|
|
1
1
|
require_relative "../../../spec/es_spec_helper"
|
2
|
+
require "logstash/outputs/elasticsearch"
|
2
3
|
|
3
4
|
shared_examples "an indexer" do
|
5
|
+
let(:event) { LogStash::Event.new("message" => "Hello World!", "type" => type) }
|
4
6
|
let(:index) { 10.times.collect { rand(10).to_s }.join("") }
|
5
7
|
let(:type) { 10.times.collect { rand(10).to_s }.join("") }
|
6
8
|
let(:event_count) { 10000 + rand(500) }
|
7
|
-
let(:flush_size) { rand(200) + 1 }
|
8
9
|
let(:config) { "not implemented" }
|
10
|
+
let(:events) { event_count.times.map { event }.to_a }
|
9
11
|
subject { LogStash::Outputs::ElasticSearch.new(config) }
|
10
12
|
|
11
13
|
before do
|
12
14
|
subject.register
|
13
|
-
event_count.times do
|
14
|
-
subject.multi_receive([LogStash::Event.new("message" => "Hello World!", "type" => type)])
|
15
|
-
end
|
16
15
|
end
|
17
16
|
|
18
17
|
it "ships events" do
|
18
|
+
subject.multi_receive(events)
|
19
19
|
index_url = "http://#{get_host_port}/#{index}"
|
20
20
|
|
21
21
|
ftw = FTW::Agent.new
|
@@ -42,13 +42,57 @@ shared_examples "an indexer" do
|
|
42
42
|
end
|
43
43
|
end
|
44
44
|
|
45
|
+
describe "TARGET_BULK_BYTES", :integration => true do
|
46
|
+
let(:target_bulk_bytes) { LogStash::Outputs::ElasticSearch::TARGET_BULK_BYTES }
|
47
|
+
let(:event_count) { 1000 }
|
48
|
+
let(:events) { event_count.times.map { event }.to_a }
|
49
|
+
let(:config) {
|
50
|
+
{
|
51
|
+
"hosts" => get_host_port,
|
52
|
+
"index" => index
|
53
|
+
}
|
54
|
+
}
|
55
|
+
let(:index) { 10.times.collect { rand(10).to_s }.join("") }
|
56
|
+
let(:type) { 10.times.collect { rand(10).to_s }.join("") }
|
57
|
+
subject { LogStash::Outputs::ElasticSearch.new(config) }
|
58
|
+
|
59
|
+
|
60
|
+
before do
|
61
|
+
subject.register
|
62
|
+
allow(subject.client).to receive(:bulk_send).with(any_args).and_call_original
|
63
|
+
subject.multi_receive(events)
|
64
|
+
end
|
65
|
+
|
66
|
+
describe "batches that are too large for one" do
|
67
|
+
let(:event) { LogStash::Event.new("message" => "a " * (((target_bulk_bytes/2) / event_count)+1)) }
|
68
|
+
|
69
|
+
it "should send in two batches" do
|
70
|
+
expect(subject.client).to have_received(:bulk_send).twice do |payload|
|
71
|
+
expect(payload.size).to be <= target_bulk_bytes
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
describe "batches that fit in one" do
|
76
|
+
# Normally you'd want to generate a request that's just 1 byte below the limit, but it's
|
77
|
+
# impossible to know how many bytes an event will serialize as with bulk proto overhead
|
78
|
+
let(:event) { LogStash::Event.new("message" => "a") }
|
79
|
+
|
80
|
+
it "should send in one batch" do
|
81
|
+
expect(subject.client).to have_received(:bulk_send).once do |payload|
|
82
|
+
expect(payload.size).to be <= target_bulk_bytes
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
|
45
90
|
describe "an indexer with custom index_type", :integration => true do
|
46
91
|
it_behaves_like "an indexer" do
|
47
92
|
let(:config) {
|
48
93
|
{
|
49
94
|
"hosts" => get_host_port,
|
50
|
-
"index" => index
|
51
|
-
"flush_size" => flush_size
|
95
|
+
"index" => index
|
52
96
|
}
|
53
97
|
}
|
54
98
|
end
|
@@ -60,8 +104,7 @@ describe "an indexer with no type value set (default to logs)", :integration =>
|
|
60
104
|
let(:config) {
|
61
105
|
{
|
62
106
|
"hosts" => get_host_port,
|
63
|
-
"index" => index
|
64
|
-
"flush_size" => flush_size
|
107
|
+
"index" => index
|
65
108
|
}
|
66
109
|
}
|
67
110
|
end
|
@@ -4,7 +4,6 @@ shared_examples "a parent indexer" do
|
|
4
4
|
let(:index) { 10.times.collect { rand(10).to_s }.join("") }
|
5
5
|
let(:type) { 10.times.collect { rand(10).to_s }.join("") }
|
6
6
|
let(:event_count) { 10000 + rand(500) }
|
7
|
-
let(:flush_size) { rand(200) + 1 }
|
8
7
|
let(:parent) { "not_implemented" }
|
9
8
|
let(:config) { "not_implemented" }
|
10
9
|
subject { LogStash::Outputs::ElasticSearch.new(config) }
|
@@ -17,7 +16,7 @@ shared_examples "a parent indexer" do
|
|
17
16
|
ftw.put!("#{index_url}", :body => mapping.to_json)
|
18
17
|
pdoc = { "foo" => "bar" }
|
19
18
|
ftw.put!("#{index_url}/#{type}_parent/test", :body => pdoc.to_json)
|
20
|
-
|
19
|
+
|
21
20
|
subject.register
|
22
21
|
subject.multi_receive(event_count.times.map { LogStash::Event.new("link_to" => "test", "message" => "Hello World!", "type" => type) })
|
23
22
|
end
|
@@ -49,7 +48,6 @@ describe "(http protocol) index events with static parent", :integration => true
|
|
49
48
|
{
|
50
49
|
"hosts" => get_host_port,
|
51
50
|
"index" => index,
|
52
|
-
"flush_size" => flush_size,
|
53
51
|
"parent" => parent
|
54
52
|
}
|
55
53
|
}
|
@@ -62,10 +60,8 @@ describe "(http_protocol) index events with fieldref in parent value", :integrat
|
|
62
60
|
{
|
63
61
|
"hosts" => get_host_port,
|
64
62
|
"index" => index,
|
65
|
-
"flush_size" => flush_size,
|
66
63
|
"parent" => "%{link_to}"
|
67
64
|
}
|
68
65
|
}
|
69
66
|
end
|
70
67
|
end
|
71
|
-
|
@@ -4,7 +4,6 @@ shared_examples "a routing indexer" do
|
|
4
4
|
let(:index) { 10.times.collect { rand(10).to_s }.join("") }
|
5
5
|
let(:type) { 10.times.collect { rand(10).to_s }.join("") }
|
6
6
|
let(:event_count) { 10000 + rand(500) }
|
7
|
-
let(:flush_size) { rand(200) + 1 }
|
8
7
|
let(:routing) { "not_implemented" }
|
9
8
|
let(:config) { "not_implemented" }
|
10
9
|
subject { LogStash::Outputs::ElasticSearch.new(config) }
|
@@ -43,7 +42,6 @@ describe "(http protocol) index events with static routing", :integration => tru
|
|
43
42
|
{
|
44
43
|
"hosts" => get_host_port,
|
45
44
|
"index" => index,
|
46
|
-
"flush_size" => flush_size,
|
47
45
|
"routing" => routing
|
48
46
|
}
|
49
47
|
}
|
@@ -57,10 +55,8 @@ describe "(http_protocol) index events with fieldref in routing value", :integra
|
|
57
55
|
{
|
58
56
|
"hosts" => get_host_port,
|
59
57
|
"index" => index,
|
60
|
-
"flush_size" => flush_size,
|
61
58
|
"routing" => "%{message}"
|
62
59
|
}
|
63
60
|
}
|
64
61
|
end
|
65
62
|
end
|
66
|
-
|
@@ -93,21 +93,6 @@ describe LogStash::Outputs::ElasticSearch::HttpClient::Pool do
|
|
93
93
|
expect(subject).to have_received(:in_use_connections).twice
|
94
94
|
end
|
95
95
|
end
|
96
|
-
|
97
|
-
describe "safe_state_changes" do
|
98
|
-
let(:state_changes) do
|
99
|
-
{
|
100
|
-
:added => [URI.parse("http://sekretu:sekretp@foo1")],
|
101
|
-
:removed => [URI.parse("http://sekretu:sekretp@foo2")]
|
102
|
-
}
|
103
|
-
end
|
104
|
-
let(:processed) { subject.safe_state_changes(state_changes)}
|
105
|
-
|
106
|
-
it "should hide passwords" do
|
107
|
-
expect(processed[:added].any? {|p| p =~ /sekretp/ }).to be false
|
108
|
-
expect(processed[:removed].any? {|p| p =~ /sekretp/ }).to be false
|
109
|
-
end
|
110
|
-
end
|
111
96
|
|
112
97
|
describe "connection management" do
|
113
98
|
context "with only one URL in the list" do
|
@@ -3,7 +3,7 @@ require "logstash/outputs/elasticsearch/http_client"
|
|
3
3
|
require "java"
|
4
4
|
|
5
5
|
describe LogStash::Outputs::ElasticSearch::HttpClient do
|
6
|
-
let(:base_options) { {:hosts => ["127.0.0.1"], :logger => Cabin::Channel.get }
|
6
|
+
let(:base_options) { {:hosts => ["127.0.0.1"], :logger => Cabin::Channel.get} }
|
7
7
|
|
8
8
|
describe "Host/URL Parsing" do
|
9
9
|
subject { described_class.new(base_options) }
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-output-elasticsearch
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 5.
|
4
|
+
version: 5.3.0
|
5
5
|
platform: java
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-11-
|
11
|
+
date: 2016-11-04 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|
@@ -83,7 +83,7 @@ dependencies:
|
|
83
83
|
requirements:
|
84
84
|
- - "~>"
|
85
85
|
- !ruby/object:Gem::Version
|
86
|
-
version: 2.
|
86
|
+
version: 2.4.0
|
87
87
|
name: addressable
|
88
88
|
prerelease: false
|
89
89
|
type: :development
|
@@ -91,7 +91,7 @@ dependencies:
|
|
91
91
|
requirements:
|
92
92
|
- - "~>"
|
93
93
|
- !ruby/object:Gem::Version
|
94
|
-
version: 2.
|
94
|
+
version: 2.4.0
|
95
95
|
- !ruby/object:Gem::Dependency
|
96
96
|
requirement: !ruby/object:Gem::Requirement
|
97
97
|
requirements:
|