logstash-output-elasticsearch 7.2.2-java → 7.3.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 21f1b50d8253533385e29f4317d2c4354ebf6231
4
- data.tar.gz: ccb251840a4bbf11a3b21bc98a83cf3aaac18198
3
+ metadata.gz: f92df38e9dc63ba5960b27d4f673f4a4e3914ef0
4
+ data.tar.gz: 3e0b56b678289dcb335e60f77f46dc860fd5a68d
5
5
  SHA512:
6
- metadata.gz: f89f93ce95860b1119c274227ad8a760f67610e2adffc6f12e4cd2b5809be1e00b717ad59c1c60d849e18aea13a69426ee83828aa890dc1e233136aa842c70f9
7
- data.tar.gz: a4abd5d2ad750e811d850062337d8d595f5f6e84c53dbc86b5d98878835c3db9486d9b854de52f411428f954ce901fc90d31c29333b8c0f2dd44e4be2a776caa
6
+ metadata.gz: 9b1fd948848fdfd1c7e6e3448ff4bf47a44b05a15042f16cf1d43e29762797aff793123e713798f6b444b135fce85e3af6deae526dc75d6e7998924efa1ba58f
7
+ data.tar.gz: 395df9184cb1b7b1dd7ce271dc37609ccad2ea1b7c8690e973352d3952d572e78b5053dc62b27470f56d878ea5ad2897517d6b751a26626f6f565ef27c641617
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 7.3.0
2
+ - Log 429 errors as debug instead of error. These aren't actual errors and cause users undue concern.
3
+ This status code is triggered when ES wants LS to backoff, which it does correctly (exponentially)
4
+
1
5
  ## 7.2.2
2
6
  - Docs: Add requirement to use version 6.2.5 or higher to support sending Content-Type headers.
3
7
 
@@ -4,7 +4,7 @@ module LogStash; module Outputs; class ElasticSearch;
4
4
  module Common
5
5
  attr_reader :client, :hosts
6
6
 
7
- RETRYABLE_CODES = [429, 503]
7
+ DLQ_CODES = [400, 404]
8
8
  SUCCESS_CODES = [200, 201]
9
9
  CONFLICT_CODE = 409
10
10
 
@@ -16,6 +16,8 @@ module LogStash; module Outputs; class ElasticSearch;
16
16
 
17
17
  def register
18
18
  @stopping = Concurrent::AtomicBoolean.new(false)
19
+ # To support BWC, we check if DLQ exists in core (< 5.4). If it doesn't, we use nil to resort to previous behavior.
20
+ @dlq_writer = respond_to?(:execution_context) ? execution_context.dlq_writer : nil
19
21
  setup_hosts # properly sets @hosts
20
22
  build_client
21
23
  install_template
@@ -128,16 +130,29 @@ module LogStash; module Outputs; class ElasticSearch;
128
130
  action = actions[idx]
129
131
  action_params = action[1]
130
132
 
133
+ # Retry logic: If it is success, we move on. If it is a failure, we have 3 paths:
134
+ # - For 409, we log and drop. there is nothing we can do
135
+ # - For a mapping error, we send to dead letter queue for a human to intervene at a later point.
136
+ # - For everything else there's mastercard. Yep, and we retry indefinitely. This should fix #572 and other transient network issues
131
137
  if SUCCESS_CODES.include?(status)
132
138
  next
133
- elsif CONFLICT_CODE == status && VERSION_TYPES_PERMITTING_CONFLICT.include?(action_params[:version_type])
134
- @logger.debug "Ignoring external version conflict: status[#{status}] failure[#{failure}] version[#{action_params[:version]}] version_type[#{action_params[:version_type]}]"
139
+ elsif CONFLICT_CODE == status
140
+ @logger.warn "Failed action.", status: status, action: action, response: response if !failure_type_logging_whitelist.include?(failure["type"])
135
141
  next
136
- elsif RETRYABLE_CODES.include?(status)
137
- @logger.info "retrying failed action with response code: #{status} (#{failure})"
142
+ elsif DLQ_CODES.include?(status)
143
+ action_event = action[2]
144
+ # To support bwc, we check if DLQ exists. otherwise we log and drop event (previous behavior)
145
+ if @dlq_writer
146
+ # TODO: Change this to send a map with { :status => status, :action => action } in the future
147
+ @dlq_writer.write(event, "Could not index event to Elasticsearch. status: #{status}, action: #{action}, response: #{response}")
148
+ else
149
+ @logger.warn "Could not index event to Elasticsearch.", status: status, action: action, response: response
150
+ end
151
+ next
152
+ else
153
+ # only log what the user whitelisted
154
+ @logger.info "retrying failed action with response code: #{status} (#{failure})" if !failure_type_logging_whitelist.include?(failure["type"])
138
155
  actions_to_retry << action
139
- elsif !failure_type_logging_whitelist.include?(failure["type"])
140
- @logger.warn "Failed action.", status: status, action: action, response: response
141
156
  end
142
157
  end
143
158
 
@@ -232,7 +247,17 @@ module LogStash; module Outputs; class ElasticSearch;
232
247
  if RETRYABLE_CODES.include?(e.response_code)
233
248
  log_hash = {:code => e.response_code, :url => e.url.sanitized}
234
249
  log_hash[:body] = e.body if @logger.debug? # Generally this is too verbose
235
- @logger.error("Attempted to send a bulk request to elasticsearch but received a bad HTTP response code!", log_hash)
250
+ message = "Encountered a retryable error. Will Retry with exponential backoff "
251
+
252
+ # We treat 429s as a special case because these really aren't errors, but
253
+ # rather just ES telling us to back off a bit, which we do.
254
+ # The other retryable codes are 502 and 503, which are true errors
255
+ # Even though we retry the user should be made aware of these
256
+ if e.response_code == 429
257
+ @logger.debug(message, log_hash)
258
+ else
259
+ @logger.error(message, log_hash)
260
+ end
236
261
 
237
262
  sleep_interval = sleep_for_interval(sleep_interval)
238
263
  retry unless @stopping.true?
@@ -91,24 +91,9 @@ module LogStash; module Outputs; class ElasticSearch
91
91
  # Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
92
92
  mod.config :hosts, :validate => :uri, :default => [::LogStash::Util::SafeURI.new("//127.0.0.1")], :list => true
93
93
 
94
- # This plugin uses the bulk index API for improved indexing performance.
95
- # This setting defines the maximum sized bulk request Logstash will make.
96
- # You may want to increase this to be in line with your pipeline's batch size.
97
- # If you specify a number larger than the batch size of your pipeline it will have no effect,
98
- # save for the case where a filter increases the size of an inflight batch by outputting
99
- # events.
100
- mod.config :flush_size, :validate => :number, :deprecate => "This setting is no longer necessary as we now try to restrict bulk requests to sane sizes. See the 'Batch Sizes' section of the docs. If you think you still need to restrict payloads based on the number, not size, of events, please open a ticket."
101
-
102
- # The amount of time since last flush before a flush is forced.
103
- #
104
- # This setting helps ensure slow event rates don't get stuck in Logstash.
105
- # For example, if your `flush_size` is 100, and you have received 10 events,
106
- # and it has been more than `idle_flush_time` seconds since the last flush,
107
- # Logstash will flush those 10 events automatically.
108
- #
109
- # This helps keep both fast and slow log streams moving along in
110
- # near-real-time.
111
- mod.config :idle_flush_time, :validate => :number, :default => 1
94
+ mod.config :flush_size, :validate => :number, :deprecated => "This setting is no longer necessary as we now try to restrict bulk requests to sane sizes. See the 'Batch Sizes' section of the docs. If you think you still need to restrict payloads based on the number, not size, of events, please open a ticket."
95
+
96
+ mod.config :idle_flush_time, :validate => :number, :default => 1, :deprecated => "This is a no-op now as every pipeline batch is flushed synchronously obviating the need for this option."
112
97
 
113
98
  # Set upsert content for update mode.s
114
99
  # Create a new document with this parameter as json string if `document_id` doesn't exists
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-elasticsearch'
3
- s.version = '7.2.2'
3
+ s.version = '7.3.0'
4
4
  s.licenses = ['apache-2.0']
5
5
  s.summary = "Logstash Output to Elasticsearch"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
@@ -88,14 +88,18 @@ describe "failures in bulk class expected behavior", :integration => true do
88
88
  subject.multi_receive([event1, event1, event1, event2])
89
89
  end
90
90
 
91
- it "should retry actions with response status of 429" do
92
- subject.register
91
+ retryable_codes = [429, 502, 503]
93
92
 
94
- mock_actions_with_response({"errors" => true, "statuses" => [429]},
95
- {"errors" => false})
96
- expect(subject).to receive(:submit).with([action1]).twice.and_call_original
93
+ retryable_codes.each do |code|
94
+ it "should retry actions with response status of #{code}" do
95
+ subject.register
97
96
 
98
- subject.multi_receive([event1])
97
+ mock_actions_with_response({"errors" => true, "statuses" => [code]},
98
+ {"errors" => false})
99
+ expect(subject).to receive(:submit).with([action1]).twice.and_call_original
100
+
101
+ subject.multi_receive([event1])
102
+ end
99
103
  end
100
104
 
101
105
  it "should retry an event infinitely until a non retryable status occurs" do
@@ -107,7 +111,7 @@ describe "failures in bulk class expected behavior", :integration => true do
107
111
  {"errors" => true, "statuses" => [429]},
108
112
  {"errors" => true, "statuses" => [429]},
109
113
  {"errors" => true, "statuses" => [429]},
110
- {"errors" => true, "statuses" => [500]})
114
+ {"errors" => true, "statuses" => [400]})
111
115
 
112
116
  subject.multi_receive([event1])
113
117
  end
@@ -126,7 +130,7 @@ describe "failures in bulk class expected behavior", :integration => true do
126
130
  {"errors" => true, "statuses" => [429]},
127
131
  {"errors" => true, "statuses" => [429]},
128
132
  {"errors" => true, "statuses" => [429]},
129
- {"errors" => true, "statuses" => [500]})
133
+ {"errors" => true, "statuses" => [400]})
130
134
 
131
135
  subject.multi_receive([event1])
132
136
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 7.2.2
4
+ version: 7.3.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-03-28 00:00:00.000000000 Z
11
+ date: 2017-04-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement