logstash-output-elasticsearch 7.3.8-java → 7.4.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: 00d19efacd7374a6f3d095ac5a8b5d76d3357ac0
4
- data.tar.gz: 08e21adfff2a195a0d0c6cbab1b4051ee4df39e5
2
+ SHA256:
3
+ metadata.gz: 868ec97797a9b0bf6a6d57b805ce4097c937ab6c30bb0b2f805cd809e0c6c685
4
+ data.tar.gz: b91cdaf715b3cfae6836aeeb780e7a7a52fd6ee9fdfd4b182fc1596e238b88e8
5
5
  SHA512:
6
- metadata.gz: df592abc3005cf4e81c5869ecf2705afd2ee4aefd4fb95330c1ff876677ca4862f7032c99878a8f7ecd2dedf3cfbdbf7b767e26083ac271895e502f63af0d464
7
- data.tar.gz: 4f42e2dc5518590c4765a9d1c4f7d1a63870ca8b371bdaca7e2a7e98e9d7e9372a087be189316115a0b898723fad38bbfdb1a1caacfd66226437416bbdf697b9
6
+ metadata.gz: bdc11c661ef02325e7b984145244acd4f8df9842e4009928a8a4a9d6d9d2bae288a6aed8047fc15ea0cbf3a096c6211fca1c50aa1dbf191a97d6d96f18bd0f44
7
+ data.tar.gz: 066b2f9fa955dc737495ef330a0f75473ede71c7fd3bc5b265f60767dbc0087bd02d247d00dc435b0542a698d90421edcd5ddf9ecd275f6e61351b5e0dbe1b78
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 7.4.0
2
+ - Retry all non-200 responses of the bulk API indefinitely
3
+ - Improve documentation on retry codes
4
+
1
5
  ## 7.3.8
2
6
  - Fix bug where java class names were logged rather than actual host names in various scenarios
3
7
 
data/docs/index.asciidoc CHANGED
@@ -55,17 +55,18 @@ the new template is installed.
55
55
 
56
56
  ==== Retry Policy
57
57
 
58
- The retry policy has changed significantly in the 2.2.0 release.
58
+ The retry policy has changed significantly in the 7.4.0 release.
59
59
  This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience
60
- either partial or total failures.
60
+ either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP
61
+ request are handled differently than error codes for individual documents.
61
62
 
62
- The following errors are retried infinitely:
63
+ HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely.
63
64
 
64
- - Network errors (inability to connect)
65
- - 429 (Too many requests) and
66
- - 503 (Service unavailable) errors
65
+ The following document errors are handled as follows:
66
+ - 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped.
67
+ - 409 errors (conflict) are logged as a warning and dropped.
67
68
 
68
- NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
69
+ Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
69
70
  It is more performant for Elasticsearch to retry these exceptions than this plugin.
70
71
 
71
72
  ==== Batch Sizes ====
@@ -4,14 +4,10 @@ module LogStash; module Outputs; class ElasticSearch;
4
4
  module Common
5
5
  attr_reader :client, :hosts
6
6
 
7
- # These are codes for temporary recoverable conditions
8
- # 429 just means that ES has too much traffic ATM
9
- # 503 means it , or a proxy is temporarily unavailable
10
- RETRYABLE_CODES = [429, 503]
11
-
12
- DLQ_CODES = [400, 404]
13
- SUCCESS_CODES = [200, 201]
14
- CONFLICT_CODE = 409
7
+ # These codes apply to documents, not at the request level
8
+ DOC_DLQ_CODES = [400, 404]
9
+ DOC_SUCCESS_CODES = [200, 201]
10
+ DOC_CONFLICT_CODE = 409
15
11
 
16
12
  # When you use external versioning, you are communicating that you want
17
13
  # to ignore conflicts. More obviously, since an external version is a
@@ -140,12 +136,12 @@ module LogStash; module Outputs; class ElasticSearch;
140
136
  # - For 409, we log and drop. there is nothing we can do
141
137
  # - For a mapping error, we send to dead letter queue for a human to intervene at a later point.
142
138
  # - For everything else there's mastercard. Yep, and we retry indefinitely. This should fix #572 and other transient network issues
143
- if SUCCESS_CODES.include?(status)
139
+ if DOC_SUCCESS_CODES.include?(status)
144
140
  next
145
- elsif CONFLICT_CODE == status
141
+ elsif DOC_CONFLICT_CODE == status
146
142
  @logger.warn "Failed action.", status: status, action: action, response: response if !failure_type_logging_whitelist.include?(failure["type"])
147
143
  next
148
- elsif DLQ_CODES.include?(status)
144
+ elsif DOC_DLQ_CODES.include?(status)
149
145
  action_event = action[2]
150
146
  # To support bwc, we check if DLQ exists. otherwise we log and drop event (previous behavior)
151
147
  if @dlq_writer
@@ -250,29 +246,22 @@ module LogStash; module Outputs; class ElasticSearch;
250
246
  sleep_interval = next_sleep_interval(sleep_interval)
251
247
  retry unless @stopping.true?
252
248
  rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError => e
253
- if RETRYABLE_CODES.include?(e.response_code)
254
- log_hash = {:code => e.response_code, :url => e.url.sanitized.to_s}
255
- log_hash[:body] = e.body if @logger.debug? # Generally this is too verbose
256
- message = "Encountered a retryable error. Will Retry with exponential backoff "
257
-
258
- # We treat 429s as a special case because these really aren't errors, but
259
- # rather just ES telling us to back off a bit, which we do.
260
- # The other retryable code is 503, which are true errors
261
- # Even though we retry the user should be made aware of these
262
- if e.response_code == 429
263
- logger.debug(message, log_hash)
264
- else
265
- logger.error(message, log_hash)
266
- end
267
-
268
- sleep_interval = sleep_for_interval(sleep_interval)
269
- retry
249
+ log_hash = {:code => e.response_code, :url => e.url.sanitized.to_s}
250
+ log_hash[:body] = e.body if @logger.debug? # Generally this is too verbose
251
+ message = "Encountered a retryable error. Will Retry with exponential backoff "
252
+
253
+ # We treat 429s as a special case because these really aren't errors, but
254
+ # rather just ES telling us to back off a bit, which we do.
255
+ # The other retryable code is 503, which are true errors
256
+ # Even though we retry the user should be made aware of these
257
+ if e.response_code == 429
258
+ logger.debug(message, log_hash)
270
259
  else
271
- log_hash = {:code => e.response_code,
272
- :response_body => e.response_body}
273
- log_hash[:request_body] = e.request_body if @logger.debug?
274
- @logger.error("Got a bad response code from server, but this code is not considered retryable. Request will be dropped", log_hash)
260
+ logger.error(message, log_hash)
275
261
  end
262
+
263
+ sleep_interval = sleep_for_interval(sleep_interval)
264
+ retry
276
265
  rescue => e
277
266
  # Stuff that should never happen
278
267
  # For all other errors print out full connection issues
@@ -285,7 +274,6 @@ module LogStash; module Outputs; class ElasticSearch;
285
274
 
286
275
  @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
287
276
 
288
- # We retry until there are no errors! Errors should all go to the retry queue
289
277
  sleep_interval = sleep_for_interval(sleep_interval)
290
278
  retry unless @stopping.true?
291
279
  end
@@ -140,6 +140,13 @@ module LogStash; module Outputs; class ElasticSearch;
140
140
  body_stream.truncate(0)
141
141
  body_stream.seek(0)
142
142
  end
143
+
144
+ if response.code != 200
145
+ raise ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(
146
+ response.code, @bulk_path, body_stream.to_s, response.body
147
+ )
148
+ end
149
+
143
150
  LogStash::Json.load(response.body)
144
151
  end
145
152
 
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-elasticsearch'
3
- s.version = '7.3.8'
3
+ s.version = '7.4.0'
4
4
  s.licenses = ['apache-2.0']
5
5
  s.summary = "Logstash Output to Elasticsearch"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
@@ -212,8 +212,10 @@ describe "outputs/elasticsearch" do
212
212
  )
213
213
  end
214
214
  let(:logger) { double("logger").as_null_object }
215
+ let(:response) { { :errors => [], :items => [] } }
215
216
 
216
217
  before(:each) do
218
+
217
219
  i = 0
218
220
  bulk_param = [["index", anything, event.to_hash]]
219
221
 
@@ -225,7 +227,7 @@ describe "outputs/elasticsearch" do
225
227
  if i == 1
226
228
  raise error
227
229
  end
228
- end
230
+ end.and_return(response)
229
231
  eso.multi_receive([event])
230
232
  end
231
233
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 7.3.8
4
+ version: 7.4.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-07-20 00:00:00.000000000 Z
11
+ date: 2017-08-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -196,7 +196,9 @@ dependencies:
196
196
  - - ">="
197
197
  - !ruby/object:Gem::Version
198
198
  version: '0'
199
- description: This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program
199
+ description: This gem is a Logstash plugin required to be installed on top of the
200
+ Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This
201
+ gem is not a stand-alone program
200
202
  email: info@elastic.co
201
203
  executables: []
202
204
  extensions: []
@@ -280,7 +282,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
280
282
  version: '0'
281
283
  requirements: []
282
284
  rubyforge_project:
283
- rubygems_version: 2.4.8
285
+ rubygems_version: 2.6.11
284
286
  signing_key:
285
287
  specification_version: 4
286
288
  summary: Logstash Output to Elasticsearch