logstash-integration-kafka 10.7.4-java → 10.7.5-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: efc6c33cf871ecd41fc07468d3d6e47dc1a71c4dbd1800fe99127da703547dd2
4
- data.tar.gz: 644f506705807c95e15fc035aac7f5d57233dd5809ae37f2c71939324ab9c3e7
3
+ metadata.gz: 5e76697fe666ea555f7256a8a42865a749ee00e05f9e7294d89c481b1ba6a7c8
4
+ data.tar.gz: 5cfffe44f6e36776efb87a77e3a30ac03cdc802ae769aa423abdae4e0a36a6d5
5
5
  SHA512:
6
- metadata.gz: 28341e37050a860c8e87d0b74a4da4d1fdd37eff3e9c95bd5596dff4d9a613149f4dd09052a949f6002fb6da835601df12ca6d37d03ea25819ff6517833da37c
7
- data.tar.gz: eb196288b02dd30b92bf3a4e083be0b0d8f005307203e20e8892262d46e48041e4d4212182be685ecae668970530c7050c7132f4c2aaa3be50f3f00e46a04122
6
+ metadata.gz: 9052d2ae8570e274840882751383a0cfb3c76e6858ce0519ccfdf2be8ca773b0becc9c27e1ec0f52319004b5ee21e4fdd32d60affed719f5b54b4772b4329541
7
+ data.tar.gz: ff4924b87505befa7f19ddfc2699c109fbdcd53c313b4b7cb5775b7fe6de2853cc98480fae1c9267463e88eac3dbf8c2484178af803505fb820aa0b26af4e5e9
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 10.7.5
2
+ - Improved error handling in the input plugin to avoid errors 'escaping' from the plugin, and crashing the logstash
3
+ process [#87](https://github.com/logstash-plugins/logstash-integration-kafka/pull/87)
4
+
1
5
  ## 10.7.4
2
6
  - Docs: make sure Kafka clients version is updated in docs [#83](https://github.com/logstash-plugins/logstash-integration-kafka/pull/83)
3
7
  Since **10.6.0** Kafka client was updated to **2.5.1**
@@ -253,6 +253,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
253
253
  def register
254
254
  @runner_threads = []
255
255
  @metadata_mode = extract_metadata_level(@decorate_events)
256
+ @pattern ||= java.util.regex.Pattern.compile(@topics_pattern) unless @topics_pattern.nil?
256
257
  check_schema_registry_parameters
257
258
  end
258
259
 
@@ -280,9 +281,11 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
280
281
 
281
282
  public
282
283
  def run(logstash_queue)
283
- @runner_consumers = consumer_threads.times.map { |i| create_consumer("#{client_id}-#{i}") }
284
- @runner_threads = @runner_consumers.map { |consumer| thread_runner(logstash_queue, consumer) }
285
- @runner_threads.each { |t| t.join }
284
+ @runner_consumers = consumer_threads.times.map { |i| subscribe(create_consumer("#{client_id}-#{i}")) }
285
+ @runner_threads = @runner_consumers.map.with_index { |consumer, i| thread_runner(logstash_queue, consumer,
286
+ "kafka-input-worker-#{client_id}-#{i}") }
287
+ @runner_threads.each(&:start)
288
+ @runner_threads.each(&:join)
286
289
  end # def run
287
290
 
288
291
  public
@@ -296,62 +299,100 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
296
299
  @runner_consumers
297
300
  end
298
301
 
299
- private
300
- def thread_runner(logstash_queue, consumer)
301
- Thread.new do
302
+ def subscribe(consumer)
303
+ @pattern.nil? ? consumer.subscribe(topics) : consumer.subscribe(@pattern)
304
+ consumer
305
+ end
306
+
307
+ def thread_runner(logstash_queue, consumer, name)
308
+ java.lang.Thread.new do
309
+ LogStash::Util::set_thread_name(name)
302
310
  begin
303
- unless @topics_pattern.nil?
304
- nooplistener = org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener.new
305
- pattern = java.util.regex.Pattern.compile(@topics_pattern)
306
- consumer.subscribe(pattern, nooplistener)
307
- else
308
- consumer.subscribe(topics);
309
- end
310
311
  codec_instance = @codec.clone
311
- while !stop?
312
- records = consumer.poll(poll_timeout_ms)
313
- next unless records.count > 0
314
- for record in records do
315
- codec_instance.decode(record.value.to_s) do |event|
316
- decorate(event)
317
- if schema_registry_url
318
- json = LogStash::Json.load(record.value.to_s)
319
- json.each do |k, v|
320
- event.set(k, v)
321
- end
322
- event.remove("message")
323
- end
324
- if @metadata_mode.include?(:record_props)
325
- event.set("[@metadata][kafka][topic]", record.topic)
326
- event.set("[@metadata][kafka][consumer_group]", @group_id)
327
- event.set("[@metadata][kafka][partition]", record.partition)
328
- event.set("[@metadata][kafka][offset]", record.offset)
329
- event.set("[@metadata][kafka][key]", record.key)
330
- event.set("[@metadata][kafka][timestamp]", record.timestamp)
331
- end
332
- if @metadata_mode.include?(:headers)
333
- record.headers.each do |header|
334
- s = String.from_java_bytes(header.value)
335
- s.force_encoding(Encoding::UTF_8)
336
- if s.valid_encoding?
337
- event.set("[@metadata][kafka][headers]["+header.key+"]", s)
338
- end
339
- end
340
- end
341
- logstash_queue << event
342
- end
312
+ until stop?
313
+ records = do_poll(consumer)
314
+ unless records.empty?
315
+ records.each { |record| handle_record(record, codec_instance, logstash_queue) }
316
+ maybe_commit_offset(consumer)
343
317
  end
344
- # Manual offset commit
345
- consumer.commitSync if @enable_auto_commit.eql?(false)
346
318
  end
347
- rescue org.apache.kafka.common.errors.WakeupException => e
348
- raise e if !stop?
349
319
  ensure
350
320
  consumer.close
351
321
  end
352
322
  end
353
323
  end
354
324
 
325
+ def do_poll(consumer)
326
+ records = []
327
+ begin
328
+ records = consumer.poll(poll_timeout_ms)
329
+ rescue org.apache.kafka.common.errors.WakeupException => e
330
+ logger.debug("Wake up from poll", :kafka_error_message => e)
331
+ raise e unless stop?
332
+ rescue => e
333
+ logger.error("Unable to poll Kafka consumer",
334
+ :kafka_error_message => e,
335
+ :cause => e.respond_to?(:getCause) ? e.getCause : nil)
336
+ Stud.stoppable_sleep(1) { stop? }
337
+ end
338
+ records
339
+ end
340
+
341
+ def handle_record(record, codec_instance, queue)
342
+ codec_instance.decode(record.value.to_s) do |event|
343
+ decorate(event)
344
+ maybe_apply_schema(event, record)
345
+ maybe_set_metadata(event, record)
346
+ queue << event
347
+ end
348
+ end
349
+
350
+ def maybe_apply_schema(event, record)
351
+ if schema_registry_url
352
+ json = LogStash::Json.load(record.value.to_s)
353
+ json.each do |k, v|
354
+ event.set(k, v)
355
+ end
356
+ event.remove("message")
357
+ end
358
+ end
359
+
360
+ def maybe_set_metadata(event, record)
361
+ if @metadata_mode.include?(:record_props)
362
+ event.set("[@metadata][kafka][topic]", record.topic)
363
+ event.set("[@metadata][kafka][consumer_group]", @group_id)
364
+ event.set("[@metadata][kafka][partition]", record.partition)
365
+ event.set("[@metadata][kafka][offset]", record.offset)
366
+ event.set("[@metadata][kafka][key]", record.key)
367
+ event.set("[@metadata][kafka][timestamp]", record.timestamp)
368
+ end
369
+ if @metadata_mode.include?(:headers)
370
+ record.headers.each do |header|
371
+ s = String.from_java_bytes(header.value)
372
+ s.force_encoding(Encoding::UTF_8)
373
+ if s.valid_encoding?
374
+ event.set("[@metadata][kafka][headers][" + header.key + "]", s)
375
+ end
376
+ end
377
+ end
378
+ end
379
+
380
+ def maybe_commit_offset(consumer)
381
+ begin
382
+ consumer.commitSync if @enable_auto_commit.eql?(false)
383
+ rescue org.apache.kafka.common.errors.WakeupException => e
384
+ logger.debug("Wake up from commitSync", :kafka_error_message => e)
385
+ raise e unless stop?
386
+ rescue StandardError => e
387
+ # For transient errors, the commit should be successful after the next set of
388
+ # polled records has been processed.
389
+ # But, it might also be worth thinking about adding a configurable retry mechanism
390
+ logger.error("Unable to commit records",
391
+ :kafka_error_message => e,
392
+ :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
393
+ end
394
+ end
395
+
355
396
  private
356
397
  def create_consumer(client_id)
357
398
  begin
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '10.7.4'
3
+ s.version = '10.7.5'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -0,0 +1,36 @@
1
+ # encoding: utf-8
2
+ require 'logstash-integration-kafka_jars'
3
+
4
+ describe "[DOCS]" do
5
+
6
+ let(:docs_files) do
7
+ ['index.asciidoc', 'input-kafka.asciidoc', 'output-kafka.asciidoc'].map { |name| File.join('docs', name) }
8
+ end
9
+
10
+ let(:kafka_version_properties) do
11
+ loader = java.lang.Thread.currentThread.getContextClassLoader
12
+ version = loader.getResource('kafka/kafka-version.properties')
13
+ fail "kafka-version.properties missing" unless version
14
+ properties = java.util.Properties.new
15
+ properties.load version.openStream
16
+ properties
17
+ end
18
+
19
+ it 'is sync-ed with Kafka client version' do
20
+ version = kafka_version_properties.get('version') # e.g. '2.5.1'
21
+
22
+ fails = docs_files.map do |file|
23
+ if line = File.readlines(file).find { |line| line.index(':kafka_client:') }
24
+ puts "found #{line.inspect} in #{file}" if $VERBOSE # e.g. ":kafka_client: 2.5\n"
25
+ if !version.start_with?(line.strip.split[1])
26
+ "documentation at #{file} is out of sync with kafka-clients version (#{version.inspect}), detected line: #{line.inspect}"
27
+ else
28
+ nil
29
+ end
30
+ end
31
+ end
32
+
33
+ fail "\n" + fails.join("\n") if fails.flatten.any?
34
+ end
35
+
36
+ end
@@ -198,8 +198,9 @@ def consume_messages(config, queue: Queue.new, timeout:, event_count:)
198
198
  wait(timeout).for { queue.length }.to eq(event_count) unless timeout.eql?(false)
199
199
  block_given? ? yield(queue, kafka_input) : queue
200
200
  ensure
201
+ kafka_input.do_stop
201
202
  t.kill
202
- t.join(30_000)
203
+ t.join(30)
203
204
  end
204
205
  end
205
206
 
@@ -44,7 +44,7 @@ describe "outputs/kafka", :integration => true do
44
44
  end
45
45
 
46
46
  context 'when outputting messages serialized as Byte Array' do
47
- let(:test_topic) { 'topic1b' }
47
+ let(:test_topic) { 'logstash_integration_topicbytearray' }
48
48
  let(:num_events) { 3 }
49
49
 
50
50
  before :each do
@@ -3,38 +3,178 @@ require "logstash/devutils/rspec/spec_helper"
3
3
  require "logstash/inputs/kafka"
4
4
  require "concurrent"
5
5
 
6
- class MockConsumer
7
- def initialize
8
- @wake = Concurrent::AtomicBoolean.new(false)
9
- end
10
6
 
11
- def subscribe(topics)
12
- end
13
-
14
- def poll(ms)
15
- if @wake.value
16
- raise org.apache.kafka.common.errors.WakeupException.new
17
- else
18
- 10.times.map do
19
- org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
7
+ describe LogStash::Inputs::Kafka do
8
+ let(:common_config) { { 'topics' => ['logstash'] } }
9
+ let(:config) { common_config }
10
+ let(:consumer_double) { double(:consumer) }
11
+ let(:needs_raise) { false }
12
+ let(:payload) {
13
+ 10.times.map do
14
+ org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
15
+ end
16
+ }
17
+ subject { LogStash::Inputs::Kafka.new(config) }
18
+
19
+ describe '#poll' do
20
+ before do
21
+ polled = false
22
+ allow(consumer_double).to receive(:poll) do
23
+ if polled
24
+ []
25
+ else
26
+ polled = true
27
+ payload
28
+ end
20
29
  end
21
30
  end
31
+
32
+ it 'should poll' do
33
+ expect(consumer_double).to receive(:poll)
34
+ expect(subject.do_poll(consumer_double)).to eq(payload)
35
+ end
36
+
37
+ it 'should return nil if Kafka Exception is encountered' do
38
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
39
+ expect(subject.do_poll(consumer_double)).to be_empty
40
+ end
41
+
42
+ it 'should not throw if Kafka Exception is encountered' do
43
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
44
+ expect{subject.do_poll(consumer_double)}.not_to raise_error
45
+ end
46
+
47
+ it 'should return no records if Assertion Error is encountered' do
48
+ expect(consumer_double).to receive(:poll).and_raise(java.lang.AssertionError.new(''))
49
+ expect{subject.do_poll(consumer_double)}.to raise_error(java.lang.AssertionError)
50
+ end
22
51
  end
23
52
 
24
- def close
53
+ describe '#maybe_commit_offset' do
54
+ context 'with auto commit disabled' do
55
+ let(:config) { common_config.merge('enable_auto_commit' => false) }
56
+
57
+ it 'should call commit on the consumer' do
58
+ expect(consumer_double).to receive(:commitSync)
59
+ subject.maybe_commit_offset(consumer_double)
60
+ end
61
+ it 'should not throw if a Kafka Exception is encountered' do
62
+ expect(consumer_double).to receive(:commitSync).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
63
+ expect{subject.maybe_commit_offset(consumer_double)}.not_to raise_error
64
+ end
65
+
66
+ it 'should throw if Assertion Error is encountered' do
67
+ expect(consumer_double).to receive(:commitSync).and_raise(java.lang.AssertionError.new(''))
68
+ expect{subject.maybe_commit_offset(consumer_double)}.to raise_error(java.lang.AssertionError)
69
+ end
70
+ end
71
+
72
+ context 'with auto commit enabled' do
73
+ let(:config) { common_config.merge('enable_auto_commit' => true) }
74
+
75
+ it 'should not call commit on the consumer' do
76
+ expect(consumer_double).not_to receive(:commitSync)
77
+ subject.maybe_commit_offset(consumer_double)
78
+ end
79
+ end
25
80
  end
26
81
 
27
- def wakeup
28
- @wake.make_true
82
+ describe '#register' do
83
+ it "should register" do
84
+ expect { subject.register }.to_not raise_error
85
+ end
29
86
  end
30
- end
31
87
 
32
- describe LogStash::Inputs::Kafka do
33
- let(:config) { { 'topics' => ['logstash'], 'consumer_threads' => 4 } }
34
- subject { LogStash::Inputs::Kafka.new(config) }
88
+ describe '#running' do
89
+ let(:q) { Queue.new }
90
+ let(:config) { common_config.merge('client_id' => 'test') }
91
+
92
+ before do
93
+ expect(subject).to receive(:create_consumer).once.and_return(consumer_double)
94
+ allow(consumer_double).to receive(:wakeup)
95
+ allow(consumer_double).to receive(:close)
96
+ allow(consumer_double).to receive(:subscribe)
97
+ end
98
+
99
+ context 'when running' do
100
+ before do
101
+ polled = false
102
+ allow(consumer_double).to receive(:poll) do
103
+ if polled
104
+ []
105
+ else
106
+ polled = true
107
+ payload
108
+ end
109
+ end
110
+
111
+ subject.register
112
+ t = Thread.new do
113
+ sleep(1)
114
+ subject.do_stop
115
+ end
116
+ subject.run(q)
117
+ t.join
118
+ end
119
+
120
+ it 'should process the correct number of events' do
121
+ expect(q.size).to eq(10)
122
+ end
123
+
124
+ it 'should set the consumer thread name' do
125
+ expect(subject.instance_variable_get('@runner_threads').first.get_name).to eq("kafka-input-worker-test-0")
126
+ end
127
+ end
35
128
 
36
- it "should register" do
37
- expect { subject.register }.to_not raise_error
129
+ context 'when errors are encountered during poll' do
130
+ before do
131
+ raised, polled = false
132
+ allow(consumer_double).to receive(:poll) do
133
+ unless raised
134
+ raised = true
135
+ raise exception
136
+ end
137
+ if polled
138
+ []
139
+ else
140
+ polled = true
141
+ payload
142
+ end
143
+ end
144
+
145
+ subject.register
146
+ t = Thread.new do
147
+ sleep 2
148
+ subject.do_stop
149
+ end
150
+ subject.run(q)
151
+ t.join
152
+ end
153
+
154
+ context "when a Kafka exception is raised" do
155
+ let(:exception) { org.apache.kafka.common.errors.TopicAuthorizationException.new('Invalid topic') }
156
+
157
+ it 'should poll successfully' do
158
+ expect(q.size).to eq(10)
159
+ end
160
+ end
161
+
162
+ context "when a StandardError is raised" do
163
+ let(:exception) { StandardError.new('Standard Error') }
164
+
165
+ it 'should retry and poll successfully' do
166
+ expect(q.size).to eq(10)
167
+ end
168
+ end
169
+
170
+ context "when a java error is raised" do
171
+ let(:exception) { java.lang.AssertionError.new('Fatal assertion') }
172
+
173
+ it "should not retry" do
174
+ expect(q.size).to eq(0)
175
+ end
176
+ end
177
+ end
38
178
  end
39
179
 
40
180
  context "register parameter verification" do
@@ -8,6 +8,8 @@ describe "outputs/kafka" do
8
8
  let (:event) { LogStash::Event.new({'message' => 'hello', 'topic_name' => 'my_topic', 'host' => '172.0.0.1',
9
9
  '@timestamp' => LogStash::Timestamp.now}) }
10
10
 
11
+ let(:future) { double('kafka producer future') }
12
+
11
13
  context 'when initializing' do
12
14
  it "should register" do
13
15
  output = LogStash::Plugin.lookup("output", "kafka").new(simple_kafka_config)
@@ -24,8 +26,8 @@ describe "outputs/kafka" do
24
26
 
25
27
  context 'when outputting messages' do
26
28
  it 'should send logstash event to kafka broker' do
27
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
28
- .with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord)).and_call_original
29
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).
30
+ with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord))
29
31
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
30
32
  kafka.register
31
33
  kafka.multi_receive([event])
@@ -33,18 +35,18 @@ describe "outputs/kafka" do
33
35
 
34
36
  it 'should support Event#sprintf placeholders in topic_id' do
35
37
  topic_field = 'topic_name'
36
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
37
- .with("my_topic", event.to_s).and_call_original
38
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
38
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
39
+ with("my_topic", event.to_s).and_call_original
40
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
39
41
  kafka = LogStash::Outputs::Kafka.new({'topic_id' => "%{#{topic_field}}"})
40
42
  kafka.register
41
43
  kafka.multi_receive([event])
42
44
  end
43
45
 
44
46
  it 'should support field referenced message_keys' do
45
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
46
- .with("test", "172.0.0.1", event.to_s).and_call_original
47
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
47
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
48
+ with("test", "172.0.0.1", event.to_s).and_call_original
49
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
48
50
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"message_key" => "%{host}"}))
49
51
  kafka.register
50
52
  kafka.multi_receive([event])
@@ -71,22 +73,24 @@ describe "outputs/kafka" do
71
73
  before do
72
74
  count = 0
73
75
  expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
74
- .exactly(sendcount).times
75
- .and_wrap_original do |m, *args|
76
+ .exactly(sendcount).times do
76
77
  if count < failcount # fail 'failcount' times in a row.
77
78
  count += 1
78
79
  # Pick an exception at random
79
80
  raise exception_classes.shuffle.first.new("injected exception for testing")
80
81
  else
81
- m.call(*args) # call original
82
+ count = :done
83
+ future # return future
82
84
  end
83
85
  end
86
+ expect(future).to receive :get
84
87
  end
85
88
 
86
89
  it "should retry until successful" do
87
90
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
88
91
  kafka.register
89
92
  kafka.multi_receive([event])
93
+ sleep(1.0) # allow for future.get call
90
94
  end
91
95
  end
92
96
 
@@ -101,15 +105,13 @@ describe "outputs/kafka" do
101
105
 
102
106
  before do
103
107
  count = 0
104
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
105
- .exactly(1).times
106
- .and_wrap_original do |m, *args|
108
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(1).times do
107
109
  if count < failcount # fail 'failcount' times in a row.
108
110
  count += 1
109
111
  # Pick an exception at random
110
112
  raise exception_classes.shuffle.first.new("injected exception for testing")
111
113
  else
112
- m.call(*args) # call original
114
+ fail 'unexpected producer#send invocation'
113
115
  end
114
116
  end
115
117
  end
@@ -131,25 +133,24 @@ describe "outputs/kafka" do
131
133
 
132
134
  it "should retry until successful" do
133
135
  count = 0
134
-
135
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
136
- .exactly(sendcount).times
137
- .and_wrap_original do |m, *args|
136
+ success = nil
137
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(sendcount).times do
138
138
  if count < failcount
139
139
  count += 1
140
140
  # inject some failures.
141
141
 
142
142
  # Return a custom Future that will raise an exception to simulate a Kafka send() problem.
143
143
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
144
- future.run
145
- future
146
144
  else
147
- m.call(*args)
145
+ success = true
146
+ future = java.util.concurrent.FutureTask.new { nil } # return no-op future
148
147
  end
148
+ future.tap { Thread.start { future.run } }
149
149
  end
150
150
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
151
151
  kafka.register
152
152
  kafka.multi_receive([event])
153
+ expect( success ).to be true
153
154
  end
154
155
  end
155
156
 
@@ -158,9 +159,7 @@ describe "outputs/kafka" do
158
159
  let(:max_sends) { 1 }
159
160
 
160
161
  it "should should only send once" do
161
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
162
- .once
163
- .and_wrap_original do |m, *args|
162
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
164
163
  # Always fail.
165
164
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
166
165
  future.run
@@ -172,9 +171,7 @@ describe "outputs/kafka" do
172
171
  end
173
172
 
174
173
  it 'should not sleep' do
175
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
176
- .once
177
- .and_wrap_original do |m, *args|
174
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
178
175
  # Always fail.
179
176
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
180
177
  future.run
@@ -193,13 +190,10 @@ describe "outputs/kafka" do
193
190
  let(:max_sends) { retries + 1 }
194
191
 
195
192
  it "should give up after retries are exhausted" do
196
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
197
- .at_most(max_sends).times
198
- .and_wrap_original do |m, *args|
193
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
199
194
  # Always fail.
200
195
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
201
- future.run
202
- future
196
+ future.tap { Thread.start { future.run } }
203
197
  end
204
198
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
205
199
  kafka.register
@@ -207,9 +201,7 @@ describe "outputs/kafka" do
207
201
  end
208
202
 
209
203
  it 'should only sleep retries number of times' do
210
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
211
- .at_most(max_sends).times
212
- .and_wrap_original do |m, *args|
204
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
213
205
  # Always fail.
214
206
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
215
207
  future.run
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 10.7.4
4
+ version: 10.7.5
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-04-14 00:00:00.000000000 Z
11
+ date: 2021-05-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -220,6 +220,7 @@ files:
220
220
  - lib/logstash/plugin_mixins/common.rb
221
221
  - lib/logstash/plugin_mixins/kafka_support.rb
222
222
  - logstash-integration-kafka.gemspec
223
+ - spec/check_docs_spec.rb
223
224
  - spec/fixtures/trust-store_stub.jks
224
225
  - spec/integration/inputs/kafka_spec.rb
225
226
  - spec/integration/outputs/kafka_spec.rb
@@ -269,6 +270,7 @@ signing_key:
269
270
  specification_version: 4
270
271
  summary: Integration with Kafka - input and output plugins
271
272
  test_files:
273
+ - spec/check_docs_spec.rb
272
274
  - spec/fixtures/trust-store_stub.jks
273
275
  - spec/integration/inputs/kafka_spec.rb
274
276
  - spec/integration/outputs/kafka_spec.rb