logstash-integration-kafka 10.4.0-java → 10.5.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -1
- data/docs/output-kafka.asciidoc +21 -8
- data/lib/logstash/outputs/kafka.rb +28 -16
- data/logstash-integration-kafka.gemspec +1 -1
- data/spec/unit/outputs/kafka_spec.rb +40 -8
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c35575aafc1330e1fac14f137c818b3836f399ee1f4514a86a8a7387c4d8e8e9
|
4
|
+
data.tar.gz: 7fb89bca8ec2b25e07ab411b75f7de8fc4edc97f22fd2b0d5869452b10a529d9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 7d0185e11c203175272ac52eb89a1fb3c1a09906832bc34dde3e2eca513575d9b47c79ca741be609c07f3d8f3d191f4371447d6987ad715516da786f7c1622a3
|
7
|
+
data.tar.gz: 93063bdf2cb603134865fc745c31c37a134d2fca09fac7bd9d68c798c792f471c1388fb29a600b45cc56d506c35a700a2c1236577d0fcbf1a65097cea91bf3f4
|
data/CHANGELOG.md
CHANGED
@@ -1,8 +1,14 @@
|
|
1
|
+
## 10.5.0
|
2
|
+
- Changed: retry sending messages only for retriable exceptions [#27](https://github.com/logstash-plugins/logstash-integration-kafka/pull/29)
|
3
|
+
|
4
|
+
## 10.4.1
|
5
|
+
- [DOC] Fixed formatting issues and made minor content edits [#43](https://github.com/logstash-plugins/logstash-integration-kafka/pull/43)
|
6
|
+
|
1
7
|
## 10.4.0
|
2
8
|
- added the input `isolation_level` to allow fine control of whether to return transactional messages [#44](https://github.com/logstash-plugins/logstash-integration-kafka/pull/44)
|
3
9
|
|
4
10
|
## 10.3.0
|
5
|
-
- added the input and output `client_dns_lookup` parameter to allow control of how DNS requests are made
|
11
|
+
- added the input and output `client_dns_lookup` parameter to allow control of how DNS requests are made [#28](https://github.com/logstash-plugins/logstash-integration-kafka/pull/28)
|
6
12
|
|
7
13
|
## 10.2.0
|
8
14
|
- Changed: config defaults to be aligned with Kafka client defaults [#30](https://github.com/logstash-plugins/logstash-integration-kafka/pull/30)
|
data/docs/output-kafka.asciidoc
CHANGED
@@ -115,10 +115,13 @@ output plugins.
|
|
115
115
|
The number of acknowledgments the producer requires the leader to have received
|
116
116
|
before considering a request complete.
|
117
117
|
|
118
|
-
acks=0
|
119
|
-
|
120
|
-
|
121
|
-
|
118
|
+
`acks=0`. The producer will not wait for any acknowledgment from the server.
|
119
|
+
|
120
|
+
`acks=1`. The leader will write the record to its local log, but will respond
|
121
|
+
without waiting for full acknowledgement from all followers.
|
122
|
+
|
123
|
+
`acks=all`. The leader will wait for the full set of in-sync replicas before
|
124
|
+
acknowledging the record.
|
122
125
|
|
123
126
|
[id="plugins-{type}s-{plugin}-batch_size"]
|
124
127
|
===== `batch_size`
|
@@ -154,11 +157,12 @@ The total bytes of memory the producer can use to buffer records waiting to be s
|
|
154
157
|
===== `client_dns_lookup`
|
155
158
|
|
156
159
|
* Value type is <<string,string>>
|
160
|
+
* Valid options are `use_all_dns_ips`, `resolve_canonical_bootstrap_servers_only`, `default`
|
157
161
|
* Default value is `"default"`
|
158
162
|
|
159
|
-
|
160
|
-
IP addresses for a hostname
|
161
|
-
|
163
|
+
Controls how DNS lookups are done. If set to `use_all_dns_ips`, Logstash tries
|
164
|
+
all IP addresses returned for a hostname before failing the connection.
|
165
|
+
If set to `resolve_canonical_bootstrap_servers_only`, each entry will be
|
162
166
|
resolved and expanded into a list of canonical names.
|
163
167
|
|
164
168
|
[id="plugins-{type}s-{plugin}-client_id"]
|
@@ -178,7 +182,7 @@ ip/port by allowing a logical application name to be included with the request
|
|
178
182
|
* Default value is `"none"`
|
179
183
|
|
180
184
|
The compression type for all data generated by the producer.
|
181
|
-
The default is none (i.e. no compression). Valid values are none, gzip, or
|
185
|
+
The default is none (i.e. no compression). Valid values are none, gzip, snappy, or lz4.
|
182
186
|
|
183
187
|
[id="plugins-{type}s-{plugin}-jaas_path"]
|
184
188
|
===== `jaas_path`
|
@@ -323,6 +327,15 @@ Kafka down, etc).
|
|
323
327
|
|
324
328
|
A value less than zero is a configuration error.
|
325
329
|
|
330
|
+
Starting with version 10.5.0, this plugin will only retry exceptions that are a subclass of
|
331
|
+
https://kafka.apache.org/25/javadoc/org/apache/kafka/common/errors/RetriableException.html[RetriableException]
|
332
|
+
and
|
333
|
+
https://kafka.apache.org/25/javadoc/org/apache/kafka/common/errors/InterruptException.html[InterruptException].
|
334
|
+
If producing a message throws any other exception, an error is logged and the message is dropped without retrying.
|
335
|
+
This prevents the Logstash pipeline from hanging indefinitely.
|
336
|
+
|
337
|
+
In versions prior to 10.5.0, any exception is retried indefinitely unless the `retries` option is configured.
|
338
|
+
|
326
339
|
[id="plugins-{type}s-{plugin}-retry_backoff_ms"]
|
327
340
|
===== `retry_backoff_ms`
|
328
341
|
|
@@ -236,7 +236,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
236
236
|
remaining = @retries
|
237
237
|
|
238
238
|
while batch.any?
|
239
|
-
|
239
|
+
unless remaining.nil?
|
240
240
|
if remaining < 0
|
241
241
|
# TODO(sissel): Offer to DLQ? Then again, if it's a transient fault,
|
242
242
|
# DLQing would make things worse (you dlq data that would be successful
|
@@ -255,27 +255,39 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
255
255
|
begin
|
256
256
|
# send() can throw an exception even before the future is created.
|
257
257
|
@producer.send(record)
|
258
|
-
rescue org.apache.kafka.common.errors.
|
258
|
+
rescue org.apache.kafka.common.errors.InterruptException,
|
259
|
+
org.apache.kafka.common.errors.RetriableException => e
|
260
|
+
logger.info("producer send failed, will retry sending", :exception => e.class, :message => e.message)
|
259
261
|
failures << record
|
260
262
|
nil
|
261
|
-
rescue org.apache.kafka.common.
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
# TODO(sissel): Let's add DLQ here.
|
267
|
-
failures << record
|
263
|
+
rescue org.apache.kafka.common.KafkaException => e
|
264
|
+
# This error is not retriable, drop event
|
265
|
+
# TODO: add DLQ support
|
266
|
+
logger.warn("producer send failed, dropping record",:exception => e.class, :message => e.message,
|
267
|
+
:record_value => record.value)
|
268
268
|
nil
|
269
269
|
end
|
270
|
-
end
|
270
|
+
end
|
271
271
|
|
272
272
|
futures.each_with_index do |future, i|
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
273
|
+
# We cannot skip nils using `futures.compact` because then our index `i` will not align with `batch`
|
274
|
+
unless future.nil?
|
275
|
+
begin
|
276
|
+
future.get
|
277
|
+
rescue java.util.concurrent.ExecutionException => e
|
278
|
+
# TODO(sissel): Add metric to count failures, possibly by exception type.
|
279
|
+
if e.get_cause.is_a? org.apache.kafka.common.errors.RetriableException or
|
280
|
+
e.get_cause.is_a? org.apache.kafka.common.errors.InterruptException
|
281
|
+
logger.info("producer send failed, will retry sending", :exception => e.cause.class,
|
282
|
+
:message => e.cause.message)
|
283
|
+
failures << batch[i]
|
284
|
+
elsif e.get_cause.is_a? org.apache.kafka.common.KafkaException
|
285
|
+
# This error is not retriable, drop event
|
286
|
+
# TODO: add DLQ support
|
287
|
+
logger.warn("producer send failed, dropping record", :exception => e.cause.class,
|
288
|
+
:message => e.cause.message, :record_value => batch[i].value)
|
289
|
+
end
|
290
|
+
end
|
279
291
|
end
|
280
292
|
end
|
281
293
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-integration-kafka'
|
3
|
-
s.version = '10.
|
3
|
+
s.version = '10.5.0'
|
4
4
|
s.licenses = ['Apache-2.0']
|
5
5
|
s.summary = "Integration with Kafka - input and output plugins"
|
6
6
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
|
@@ -56,14 +56,15 @@ describe "outputs/kafka" do
|
|
56
56
|
end
|
57
57
|
end
|
58
58
|
|
59
|
-
context "when KafkaProducer#send() raises
|
59
|
+
context "when KafkaProducer#send() raises a retriable exception" do
|
60
60
|
let(:failcount) { (rand * 10).to_i }
|
61
61
|
let(:sendcount) { failcount + 1 }
|
62
62
|
|
63
63
|
let(:exception_classes) { [
|
64
64
|
org.apache.kafka.common.errors.TimeoutException,
|
65
|
+
org.apache.kafka.common.errors.DisconnectException,
|
66
|
+
org.apache.kafka.common.errors.CoordinatorNotAvailableException,
|
65
67
|
org.apache.kafka.common.errors.InterruptException,
|
66
|
-
org.apache.kafka.common.errors.SerializationException
|
67
68
|
] }
|
68
69
|
|
69
70
|
before do
|
@@ -88,6 +89,37 @@ describe "outputs/kafka" do
|
|
88
89
|
end
|
89
90
|
end
|
90
91
|
|
92
|
+
context "when KafkaProducer#send() raises a non-retriable exception" do
|
93
|
+
let(:failcount) { (rand * 10).to_i }
|
94
|
+
|
95
|
+
let(:exception_classes) { [
|
96
|
+
org.apache.kafka.common.errors.SerializationException,
|
97
|
+
org.apache.kafka.common.errors.RecordTooLargeException,
|
98
|
+
org.apache.kafka.common.errors.InvalidTopicException
|
99
|
+
] }
|
100
|
+
|
101
|
+
before do
|
102
|
+
count = 0
|
103
|
+
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
104
|
+
.exactly(1).times
|
105
|
+
.and_wrap_original do |m, *args|
|
106
|
+
if count < failcount # fail 'failcount' times in a row.
|
107
|
+
count += 1
|
108
|
+
# Pick an exception at random
|
109
|
+
raise exception_classes.shuffle.first.new("injected exception for testing")
|
110
|
+
else
|
111
|
+
m.call(*args) # call original
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
it "should not retry" do
|
117
|
+
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
|
118
|
+
kafka.register
|
119
|
+
kafka.multi_receive([event])
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
91
123
|
context "when a send fails" do
|
92
124
|
context "and the default retries behavior is used" do
|
93
125
|
# Fail this many times and then finally succeed.
|
@@ -107,7 +139,7 @@ describe "outputs/kafka" do
|
|
107
139
|
# inject some failures.
|
108
140
|
|
109
141
|
# Return a custom Future that will raise an exception to simulate a Kafka send() problem.
|
110
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
142
|
+
future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
|
111
143
|
future.run
|
112
144
|
future
|
113
145
|
else
|
@@ -129,7 +161,7 @@ describe "outputs/kafka" do
|
|
129
161
|
.once
|
130
162
|
.and_wrap_original do |m, *args|
|
131
163
|
# Always fail.
|
132
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
164
|
+
future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
|
133
165
|
future.run
|
134
166
|
future
|
135
167
|
end
|
@@ -143,7 +175,7 @@ describe "outputs/kafka" do
|
|
143
175
|
.once
|
144
176
|
.and_wrap_original do |m, *args|
|
145
177
|
# Always fail.
|
146
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
178
|
+
future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
|
147
179
|
future.run
|
148
180
|
future
|
149
181
|
end
|
@@ -164,7 +196,7 @@ describe "outputs/kafka" do
|
|
164
196
|
.at_most(max_sends).times
|
165
197
|
.and_wrap_original do |m, *args|
|
166
198
|
# Always fail.
|
167
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
199
|
+
future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
|
168
200
|
future.run
|
169
201
|
future
|
170
202
|
end
|
@@ -175,10 +207,10 @@ describe "outputs/kafka" do
|
|
175
207
|
|
176
208
|
it 'should only sleep retries number of times' do
|
177
209
|
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
178
|
-
.at_most(max_sends)
|
210
|
+
.at_most(max_sends).times
|
179
211
|
.and_wrap_original do |m, *args|
|
180
212
|
# Always fail.
|
181
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
213
|
+
future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
|
182
214
|
future.run
|
183
215
|
future
|
184
216
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-integration-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 10.
|
4
|
+
version: 10.5.0
|
5
5
|
platform: java
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-07-
|
11
|
+
date: 2020-07-20 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|