logstash-input-elasticsearch 4.14.0 → 4.15.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +3 -0
- data/docs/index.asciidoc +13 -0
- data/lib/logstash/helpers/loggable_try.rb +18 -0
- data/lib/logstash/inputs/elasticsearch.rb +56 -10
- data/logstash-input-elasticsearch.gemspec +1 -1
- data/spec/inputs/elasticsearch_spec.rb +186 -56
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9379a31460bf649615c038b1b35d207f6a6db869bb1a27e963da70e278ae6bcd
|
4
|
+
data.tar.gz: f62823b8ddfb587ce9614a1d131be1bce2186dbd859287b1107757917a754c7e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c675ed6a5d1a4a104313611e4895171c6461149d0415c5ad4c276e42b5b99672584ae38ddc37f3e32521074727051c68f0d10a37b77064d0cea6ea03f041a3b9
|
7
|
+
data.tar.gz: beba1b98be77880e6e3712cebdd0474d68c33556a86236e828a3332d2e812b187e920e78f6fb006b27b82269faa0e0654568d050f7057ae7db48da7fd9a20e31
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,6 @@
|
|
1
|
+
## 4.15.0
|
2
|
+
- Feat: add `retries` option. allow retry for failing query [#179](https://github.com/logstash-plugins/logstash-input-elasticsearch/pull/179)
|
3
|
+
|
1
4
|
## 4.14.0
|
2
5
|
- Refactor: switch to using scheduler mixin [#177](https://github.com/logstash-plugins/logstash-input-elasticsearch/pull/177)
|
3
6
|
|
data/docs/index.asciidoc
CHANGED
@@ -124,6 +124,7 @@ This plugin supports the following configuration options plus the <<plugins-{typ
|
|
124
124
|
| <<plugins-{type}s-{plugin}-ssl>> |<<boolean,boolean>>|No
|
125
125
|
| <<plugins-{type}s-{plugin}-socket_timeout_seconds>> | <<number,number>>|No
|
126
126
|
| <<plugins-{type}s-{plugin}-target>> | {logstash-ref}/field-references-deepdive.html[field reference] | No
|
127
|
+
| <<plugins-{type}s-{plugin}-retries>> | <<number,number>>|No
|
127
128
|
| <<plugins-{type}s-{plugin}-user>> |<<string,string>>|No
|
128
129
|
|=======================================================================
|
129
130
|
|
@@ -339,6 +340,17 @@ The maximum amount of time, in seconds, for a single request to Elasticsearch.
|
|
339
340
|
Request timeouts tend to occur when an individual page of data is very large, such as when it contains large-payload
|
340
341
|
documents and/or the <<plugins-{type}s-{plugin}-size>> has been specified as a large value.
|
341
342
|
|
343
|
+
|
344
|
+
[id="plugins-{type}s-{plugin}-retries"]
|
345
|
+
===== `retries`
|
346
|
+
|
347
|
+
* Value type is <<number,number>>
|
348
|
+
* Default value is `0`
|
349
|
+
|
350
|
+
The number of times to re-run the query after the first failure. If the query fails after all retries, it logs an error message.
|
351
|
+
The default is 0 (no retry). This value should be equal to or greater than zero.
|
352
|
+
|
353
|
+
|
342
354
|
[id="plugins-{type}s-{plugin}-schedule"]
|
343
355
|
===== `schedule`
|
344
356
|
|
@@ -424,6 +436,7 @@ When the `target` is set to a field reference, the `_source` of the hit is place
|
|
424
436
|
This option can be useful to avoid populating unknown fields when a downstream schema such as ECS is enforced.
|
425
437
|
It is also possible to target an entry in the event's metadata, which will be available during event processing but not exported to your outputs (e.g., `target \=> "[@metadata][_source]"`).
|
426
438
|
|
439
|
+
|
427
440
|
[id="plugins-{type}s-{plugin}-user"]
|
428
441
|
===== `user`
|
429
442
|
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
2
|
+
# or more contributor license agreements. Licensed under the Elastic License;
|
3
|
+
# you may not use this file except in compliance with the Elastic License.
|
4
|
+
|
5
|
+
require 'stud/try'
|
6
|
+
|
7
|
+
module LogStash module Helpers
|
8
|
+
class LoggableTry < Stud::Try
|
9
|
+
def initialize(logger, name)
|
10
|
+
@logger = logger
|
11
|
+
@name = name
|
12
|
+
end
|
13
|
+
|
14
|
+
def log_failure(exception, fail_count, message)
|
15
|
+
@logger.warn("Attempt to #{@name} but failed. #{message}", fail_count: fail_count, exception: exception.message)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end end
|
@@ -10,6 +10,7 @@ require 'logstash/plugin_mixins/ecs_compatibility_support/target_check'
|
|
10
10
|
require 'logstash/plugin_mixins/ca_trusted_fingerprint_support'
|
11
11
|
require "logstash/plugin_mixins/scheduler"
|
12
12
|
require "base64"
|
13
|
+
require 'logstash/helpers/loggable_try'
|
13
14
|
|
14
15
|
require "elasticsearch"
|
15
16
|
require "elasticsearch/transport/transport/http/manticore"
|
@@ -99,6 +100,9 @@ class LogStash::Inputs::Elasticsearch < LogStash::Inputs::Base
|
|
99
100
|
# This allows you to set the maximum number of hits returned per scroll.
|
100
101
|
config :size, :validate => :number, :default => 1000
|
101
102
|
|
103
|
+
# The number of retries to run the query. If the query fails after all retries, it logs an error message.
|
104
|
+
config :retries, :validate => :number, :default => 0
|
105
|
+
|
102
106
|
# This parameter controls the keepalive time in seconds of the scrolling
|
103
107
|
# request and initiates the scrolling process. The timeout applies per
|
104
108
|
# round trip (i.e. between the previous scroll request, to the next).
|
@@ -221,6 +225,8 @@ class LogStash::Inputs::Elasticsearch < LogStash::Inputs::Base
|
|
221
225
|
@slices < 1 && fail(LogStash::ConfigurationError, "Elasticsearch Input Plugin's `slices` option must be greater than zero, got `#{@slices}`")
|
222
226
|
end
|
223
227
|
|
228
|
+
@retries < 0 && fail(LogStash::ConfigurationError, "Elasticsearch Input Plugin's `retries` option must be equal or greater than zero, got `#{@retries}`")
|
229
|
+
|
224
230
|
validate_authentication
|
225
231
|
fill_user_password_from_cloud_auth
|
226
232
|
fill_hosts_from_cloud_id
|
@@ -262,22 +268,67 @@ class LogStash::Inputs::Elasticsearch < LogStash::Inputs::Base
|
|
262
268
|
end
|
263
269
|
|
264
270
|
private
|
265
|
-
|
271
|
+
JOB_NAME = "run query"
|
266
272
|
def do_run(output_queue)
|
267
273
|
# if configured to run a single slice, don't bother spinning up threads
|
268
|
-
|
274
|
+
if @slices.nil? || @slices <= 1
|
275
|
+
success, events = retryable_slice
|
276
|
+
success && events.each { |event| output_queue << event }
|
277
|
+
return
|
278
|
+
end
|
269
279
|
|
270
280
|
logger.warn("managed slices for query is very large (#{@slices}); consider reducing") if @slices > 8
|
271
281
|
|
282
|
+
slice_results = parallel_slice # array of tuple(ok, events)
|
283
|
+
|
284
|
+
# insert events to queue if all slices success
|
285
|
+
if slice_results.all?(&:first)
|
286
|
+
slice_results.flat_map { |success, events| events }
|
287
|
+
.each { |event| output_queue << event }
|
288
|
+
end
|
289
|
+
|
290
|
+
logger.trace("#{@slices} slices completed")
|
291
|
+
end
|
292
|
+
|
293
|
+
def retryable(job_name, &block)
|
294
|
+
begin
|
295
|
+
stud_try = ::LogStash::Helpers::LoggableTry.new(logger, job_name)
|
296
|
+
output = stud_try.try((@retries + 1).times) { yield }
|
297
|
+
[true, output]
|
298
|
+
rescue => e
|
299
|
+
error_details = {:message => e.message, :cause => e.cause}
|
300
|
+
error_details[:backtrace] = e.backtrace if logger.debug?
|
301
|
+
logger.error("Tried #{job_name} unsuccessfully", error_details)
|
302
|
+
[false, nil]
|
303
|
+
end
|
304
|
+
end
|
305
|
+
|
306
|
+
|
307
|
+
# @return [(ok, events)] : Array of tuple(Boolean, [Logstash::Event])
|
308
|
+
def parallel_slice
|
272
309
|
pipeline_id = execution_context&.pipeline_id || 'main'
|
273
310
|
@slices.times.map do |slice_id|
|
274
311
|
Thread.new do
|
275
312
|
LogStash::Util::set_thread_name("[#{pipeline_id}]|input|elasticsearch|slice_#{slice_id}")
|
276
|
-
|
313
|
+
retryable_slice(slice_id)
|
277
314
|
end
|
278
|
-
end.map
|
315
|
+
end.map do |t|
|
316
|
+
t.join
|
317
|
+
t.value
|
318
|
+
end
|
279
319
|
end
|
280
320
|
|
321
|
+
# @param scroll_id [Integer]
|
322
|
+
# @return (ok, events) [Boolean, Array(Logstash::Event)]
|
323
|
+
def retryable_slice(slice_id=nil)
|
324
|
+
retryable(JOB_NAME) do
|
325
|
+
output = []
|
326
|
+
do_run_slice(output, slice_id)
|
327
|
+
output
|
328
|
+
end
|
329
|
+
end
|
330
|
+
|
331
|
+
|
281
332
|
def do_run_slice(output_queue, slice_id=nil)
|
282
333
|
slice_query = @base_query
|
283
334
|
slice_query = slice_query.merge('slice' => { 'id' => slice_id, 'max' => @slices}) unless slice_id.nil?
|
@@ -314,11 +365,6 @@ class LogStash::Inputs::Elasticsearch < LogStash::Inputs::Base
|
|
314
365
|
r = scroll_request(scroll_id)
|
315
366
|
r['hits']['hits'].each { |hit| push_hit(hit, output_queue) }
|
316
367
|
[r['hits']['hits'].any?, r['_scroll_id']]
|
317
|
-
rescue => e
|
318
|
-
# this will typically be triggered by a scroll timeout
|
319
|
-
logger.error("Scroll request error, aborting scroll", message: e.message, exception: e.class)
|
320
|
-
# return no hits and original scroll_id so we can try to clear it
|
321
|
-
[false, scroll_id]
|
322
368
|
end
|
323
369
|
|
324
370
|
def push_hit(hit, output_queue)
|
@@ -353,7 +399,7 @@ class LogStash::Inputs::Elasticsearch < LogStash::Inputs::Base
|
|
353
399
|
logger.warn("Ignoring clear_scroll exception", message: e.message, exception: e.class)
|
354
400
|
end
|
355
401
|
|
356
|
-
def scroll_request
|
402
|
+
def scroll_request(scroll_id)
|
357
403
|
@client.scroll(:body => { :scroll_id => scroll_id }, :scroll => @scroll)
|
358
404
|
end
|
359
405
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
|
3
3
|
s.name = 'logstash-input-elasticsearch'
|
4
|
-
s.version = '4.
|
4
|
+
s.version = '4.15.0'
|
5
5
|
s.licenses = ['Apache License (2.0)']
|
6
6
|
s.summary = "Reads query results from an Elasticsearch cluster"
|
7
7
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
@@ -51,6 +51,17 @@ describe LogStash::Inputs::Elasticsearch, :ecs_compatibility_support do
|
|
51
51
|
expect { plugin.register }.to raise_error(LogStash::ConfigurationError)
|
52
52
|
end
|
53
53
|
end
|
54
|
+
|
55
|
+
context "retry" do
|
56
|
+
let(:config) do
|
57
|
+
{
|
58
|
+
"retries" => -1
|
59
|
+
}
|
60
|
+
end
|
61
|
+
it "should raise an exception with negative number" do
|
62
|
+
expect { plugin.register }.to raise_error(LogStash::ConfigurationError)
|
63
|
+
end
|
64
|
+
end
|
54
65
|
end
|
55
66
|
|
56
67
|
it_behaves_like "an interruptible input plugin" do
|
@@ -193,7 +204,7 @@ describe LogStash::Inputs::Elasticsearch, :ecs_compatibility_support do
|
|
193
204
|
context 'with `slices => 1`' do
|
194
205
|
let(:slices) { 1 }
|
195
206
|
it 'runs just one slice' do
|
196
|
-
expect(plugin).to receive(:do_run_slice).with(duck_type(:<<))
|
207
|
+
expect(plugin).to receive(:do_run_slice).with(duck_type(:<<), nil)
|
197
208
|
expect(Thread).to_not receive(:new)
|
198
209
|
|
199
210
|
plugin.register
|
@@ -204,7 +215,7 @@ describe LogStash::Inputs::Elasticsearch, :ecs_compatibility_support do
|
|
204
215
|
context 'without slices directive' do
|
205
216
|
let(:config) { super().tap { |h| h.delete('slices') } }
|
206
217
|
it 'runs just one slice' do
|
207
|
-
expect(plugin).to receive(:do_run_slice).with(duck_type(:<<))
|
218
|
+
expect(plugin).to receive(:do_run_slice).with(duck_type(:<<), nil)
|
208
219
|
expect(Thread).to_not receive(:new)
|
209
220
|
|
210
221
|
plugin.register
|
@@ -315,7 +326,6 @@ describe LogStash::Inputs::Elasticsearch, :ecs_compatibility_support do
|
|
315
326
|
end
|
316
327
|
# END SLICE 1
|
317
328
|
|
318
|
-
let(:client) { Elasticsearch::Client.new }
|
319
329
|
|
320
330
|
# RSpec mocks validations are not threadsafe.
|
321
331
|
# Allow caller to synchronize.
|
@@ -329,69 +339,112 @@ describe LogStash::Inputs::Elasticsearch, :ecs_compatibility_support do
|
|
329
339
|
end
|
330
340
|
end
|
331
341
|
|
332
|
-
|
333
|
-
|
334
|
-
|
342
|
+
describe "with normal response" do
|
343
|
+
before(:each) do
|
344
|
+
expect(Elasticsearch::Client).to receive(:new).with(any_args).and_return(client)
|
345
|
+
plugin.register
|
335
346
|
|
336
|
-
|
347
|
+
expect(client).to receive(:clear_scroll).and_return(nil)
|
337
348
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
349
|
+
# SLICE0 is a three-page scroll in which the last page is empty
|
350
|
+
slice0_query = LogStash::Json.dump(query.merge('slice' => { 'id' => 0, 'max' => 2}))
|
351
|
+
expect(client).to receive(:search).with(hash_including(:body => slice0_query)).and_return(slice0_response0)
|
352
|
+
expect(client).to receive(:scroll).with(hash_including(:body => { :scroll_id => slice0_scroll1 })).and_return(slice0_response1)
|
353
|
+
expect(client).to receive(:scroll).with(hash_including(:body => { :scroll_id => slice0_scroll2 })).and_return(slice0_response2)
|
354
|
+
allow(client).to receive(:ping)
|
344
355
|
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
356
|
+
# SLICE1 is a two-page scroll in which the last page has no next scroll id
|
357
|
+
slice1_query = LogStash::Json.dump(query.merge('slice' => { 'id' => 1, 'max' => 2}))
|
358
|
+
expect(client).to receive(:search).with(hash_including(:body => slice1_query)).and_return(slice1_response0)
|
359
|
+
expect(client).to receive(:scroll).with(hash_including(:body => { :scroll_id => slice1_scroll1 })).and_return(slice1_response1)
|
349
360
|
|
350
|
-
|
351
|
-
|
352
|
-
|
361
|
+
synchronize_method!(plugin, :scroll_request)
|
362
|
+
synchronize_method!(plugin, :search_request)
|
363
|
+
end
|
353
364
|
|
354
|
-
|
355
|
-
queue = Queue.new # since we are running slices in threads, we need a thread-safe queue.
|
356
|
-
plugin.run(queue)
|
357
|
-
events = []
|
358
|
-
events << queue.pop until queue.empty?
|
359
|
-
events
|
360
|
-
end
|
365
|
+
let(:client) { Elasticsearch::Client.new }
|
361
366
|
|
362
|
-
|
363
|
-
|
364
|
-
|
367
|
+
let(:emitted_events) do
|
368
|
+
queue = Queue.new # since we are running slices in threads, we need a thread-safe queue.
|
369
|
+
plugin.run(queue)
|
370
|
+
events = []
|
371
|
+
events << queue.pop until queue.empty?
|
372
|
+
events
|
373
|
+
end
|
365
374
|
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
end
|
370
|
-
it 'emits the hits on the second page of the first slice' do
|
371
|
-
expect(emitted_event_ids).to include('slice0-response1-item0')
|
372
|
-
end
|
375
|
+
let(:emitted_event_ids) do
|
376
|
+
emitted_events.map { |event| event.get('[@metadata][_id]') }
|
377
|
+
end
|
373
378
|
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
379
|
+
it 'emits the hits on the first page of the first slice' do
|
380
|
+
expect(emitted_event_ids).to include('slice0-response0-item0')
|
381
|
+
expect(emitted_event_ids).to include('slice0-response0-item1')
|
382
|
+
end
|
383
|
+
it 'emits the hits on the second page of the first slice' do
|
384
|
+
expect(emitted_event_ids).to include('slice0-response1-item0')
|
385
|
+
end
|
378
386
|
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
387
|
+
it 'emits the hits on the first page of the second slice' do
|
388
|
+
expect(emitted_event_ids).to include('slice1-response0-item0')
|
389
|
+
expect(emitted_event_ids).to include('slice1-response0-item1')
|
390
|
+
end
|
391
|
+
|
392
|
+
it 'emits the hits on the second page of the second slice' do
|
393
|
+
expect(emitted_event_ids).to include('slice1-response1-item0')
|
394
|
+
expect(emitted_event_ids).to include('slice1-response1-item1')
|
395
|
+
end
|
396
|
+
|
397
|
+
it 'does not double-emit' do
|
398
|
+
expect(emitted_event_ids.uniq).to eq(emitted_event_ids)
|
399
|
+
end
|
383
400
|
|
384
|
-
|
385
|
-
|
401
|
+
it 'emits events with appropriate fields' do
|
402
|
+
emitted_events.each do |event|
|
403
|
+
expect(event).to be_a(LogStash::Event)
|
404
|
+
expect(event.get('message')).to eq(['hello, world'])
|
405
|
+
expect(event.get('[@metadata][_id]')).to_not be_nil
|
406
|
+
expect(event.get('[@metadata][_id]')).to_not be_empty
|
407
|
+
expect(event.get('[@metadata][_index]')).to start_with('logstash-')
|
408
|
+
end
|
409
|
+
end
|
386
410
|
end
|
387
411
|
|
388
|
-
|
389
|
-
|
390
|
-
expect(
|
391
|
-
|
392
|
-
|
393
|
-
expect(
|
394
|
-
|
412
|
+
describe "with scroll request fail" do
|
413
|
+
before(:each) do
|
414
|
+
expect(Elasticsearch::Client).to receive(:new).with(any_args).and_return(client)
|
415
|
+
plugin.register
|
416
|
+
|
417
|
+
expect(client).to receive(:clear_scroll).and_return(nil)
|
418
|
+
|
419
|
+
# SLICE0 is a three-page scroll in which the second page throw exception
|
420
|
+
slice0_query = LogStash::Json.dump(query.merge('slice' => { 'id' => 0, 'max' => 2}))
|
421
|
+
expect(client).to receive(:search).with(hash_including(:body => slice0_query)).and_return(slice0_response0)
|
422
|
+
expect(client).to receive(:scroll).with(hash_including(:body => { :scroll_id => slice0_scroll1 })).and_raise("boom")
|
423
|
+
allow(client).to receive(:ping)
|
424
|
+
|
425
|
+
# SLICE1 is a two-page scroll in which the last page has no next scroll id
|
426
|
+
slice1_query = LogStash::Json.dump(query.merge('slice' => { 'id' => 1, 'max' => 2}))
|
427
|
+
expect(client).to receive(:search).with(hash_including(:body => slice1_query)).and_return(slice1_response0)
|
428
|
+
expect(client).to receive(:scroll).with(hash_including(:body => { :scroll_id => slice1_scroll1 })).and_return(slice1_response1)
|
429
|
+
|
430
|
+
synchronize_method!(plugin, :scroll_request)
|
431
|
+
synchronize_method!(plugin, :search_request)
|
432
|
+
end
|
433
|
+
|
434
|
+
let(:client) { Elasticsearch::Client.new }
|
435
|
+
|
436
|
+
it 'does not insert event to queue' do
|
437
|
+
expect(plugin).to receive(:parallel_slice).and_wrap_original do |m, *args|
|
438
|
+
slice0, slice1 = m.call
|
439
|
+
expect(slice0[0]).to be_falsey
|
440
|
+
expect(slice1[0]).to be_truthy
|
441
|
+
expect(slice1[1].size).to eq(4) # four items from SLICE1
|
442
|
+
[slice0, slice1]
|
443
|
+
end
|
444
|
+
|
445
|
+
queue = Queue.new
|
446
|
+
plugin.run(queue)
|
447
|
+
expect(queue.size).to eq(0)
|
395
448
|
end
|
396
449
|
end
|
397
450
|
end
|
@@ -890,16 +943,93 @@ describe LogStash::Inputs::Elasticsearch, :ecs_compatibility_support do
|
|
890
943
|
queue << LogStash::Event.new({})
|
891
944
|
}.at_least(:twice)
|
892
945
|
runner = Thread.start { plugin.run(queue) }
|
893
|
-
|
946
|
+
expect(queue.pop).not_to be_nil
|
947
|
+
cron_jobs = plugin.instance_variable_get(:@_scheduler).instance_variable_get(:@impl).jobs
|
948
|
+
expect(cron_jobs[0].next_time - cron_jobs[0].last_time).to be <= 5.0
|
949
|
+
expect(queue.pop).not_to be_nil
|
894
950
|
ensure
|
895
951
|
plugin.do_stop
|
896
952
|
runner.join if runner
|
897
953
|
end
|
898
|
-
expect(queue.size).to be >= 2
|
899
954
|
end
|
900
955
|
|
901
956
|
end
|
902
957
|
|
958
|
+
context "retries" do
|
959
|
+
let(:mock_response) do
|
960
|
+
{
|
961
|
+
"_scroll_id" => "cXVlcnlUaGVuRmV0Y2g",
|
962
|
+
"took" => 27,
|
963
|
+
"timed_out" => false,
|
964
|
+
"_shards" => {
|
965
|
+
"total" => 169,
|
966
|
+
"successful" => 169,
|
967
|
+
"failed" => 0
|
968
|
+
},
|
969
|
+
"hits" => {
|
970
|
+
"total" => 1,
|
971
|
+
"max_score" => 1.0,
|
972
|
+
"hits" => [ {
|
973
|
+
"_index" => "logstash-2014.10.12",
|
974
|
+
"_type" => "logs",
|
975
|
+
"_id" => "C5b2xLQwTZa76jBmHIbwHQ",
|
976
|
+
"_score" => 1.0,
|
977
|
+
"_source" => { "message" => ["ohayo"] }
|
978
|
+
} ]
|
979
|
+
}
|
980
|
+
}
|
981
|
+
end
|
982
|
+
|
983
|
+
let(:mock_scroll_response) do
|
984
|
+
{
|
985
|
+
"_scroll_id" => "r453Wc1jh0caLJhSDg",
|
986
|
+
"hits" => { "hits" => [] }
|
987
|
+
}
|
988
|
+
end
|
989
|
+
|
990
|
+
before(:each) do
|
991
|
+
client = Elasticsearch::Client.new
|
992
|
+
allow(Elasticsearch::Client).to receive(:new).with(any_args).and_return(client)
|
993
|
+
allow(client).to receive(:search).with(any_args).and_return(mock_response)
|
994
|
+
allow(client).to receive(:scroll).with({ :body => { :scroll_id => "cXVlcnlUaGVuRmV0Y2g" }, :scroll=> "1m" }).and_return(mock_scroll_response)
|
995
|
+
allow(client).to receive(:clear_scroll).and_return(nil)
|
996
|
+
allow(client).to receive(:ping)
|
997
|
+
end
|
998
|
+
|
999
|
+
let(:config) do
|
1000
|
+
{
|
1001
|
+
"hosts" => ["localhost"],
|
1002
|
+
"query" => '{ "query": { "match": { "city_name": "Okinawa" } }, "fields": ["message"] }',
|
1003
|
+
"retries" => 1
|
1004
|
+
}
|
1005
|
+
end
|
1006
|
+
|
1007
|
+
it "retry and log error when all search request fail" do
|
1008
|
+
expect(plugin.logger).to receive(:error).with(/Tried .* unsuccessfully/,
|
1009
|
+
hash_including(:message => 'Manticore::UnknownException'))
|
1010
|
+
expect(plugin.logger).to receive(:warn).twice.with(/Attempt to .* but failed/,
|
1011
|
+
hash_including(:exception => "Manticore::UnknownException"))
|
1012
|
+
expect(plugin).to receive(:search_request).with(instance_of(Hash)).and_raise(Manticore::UnknownException).at_least(:twice)
|
1013
|
+
|
1014
|
+
plugin.register
|
1015
|
+
|
1016
|
+
expect{ plugin.run(queue) }.not_to raise_error
|
1017
|
+
expect(queue.size).to eq(0)
|
1018
|
+
end
|
1019
|
+
|
1020
|
+
it "retry successfully when search request fail for one time" do
|
1021
|
+
expect(plugin.logger).to receive(:warn).once.with(/Attempt to .* but failed/,
|
1022
|
+
hash_including(:exception => "Manticore::UnknownException"))
|
1023
|
+
expect(plugin).to receive(:search_request).with(instance_of(Hash)).once.and_raise(Manticore::UnknownException)
|
1024
|
+
expect(plugin).to receive(:search_request).with(instance_of(Hash)).once.and_call_original
|
1025
|
+
|
1026
|
+
plugin.register
|
1027
|
+
|
1028
|
+
expect{ plugin.run(queue) }.not_to raise_error
|
1029
|
+
expect(queue.size).to eq(1)
|
1030
|
+
end
|
1031
|
+
end
|
1032
|
+
|
903
1033
|
# @note can be removed once we depends on elasticsearch gem >= 6.x
|
904
1034
|
def extract_transport(client) # on 7.x client.transport is a ES::Transport::Client
|
905
1035
|
client.transport.respond_to?(:transport) ? client.transport.transport : client.transport
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-input-elasticsearch
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 4.
|
4
|
+
version: 4.15.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2022-
|
11
|
+
date: 2022-08-08 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|
@@ -255,6 +255,7 @@ files:
|
|
255
255
|
- NOTICE.TXT
|
256
256
|
- README.md
|
257
257
|
- docs/index.asciidoc
|
258
|
+
- lib/logstash/helpers/loggable_try.rb
|
258
259
|
- lib/logstash/inputs/elasticsearch.rb
|
259
260
|
- lib/logstash/inputs/elasticsearch/patches/_elasticsearch_transport_connections_selector.rb
|
260
261
|
- lib/logstash/inputs/elasticsearch/patches/_elasticsearch_transport_http_manticore.rb
|