logstash-output-kafka 6.2.4 → 7.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/CHANGELOG.md +2 -11
- data/docs/index.asciidoc +23 -48
- data/lib/logstash/outputs/kafka.rb +19 -121
- data/logstash-output-kafka.gemspec +1 -1
- data/spec/integration/outputs/kafka_spec.rb +1 -1
- data/spec/unit/outputs/kafka_spec.rb +10 -144
- metadata +5 -8
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 16ba446928486f42a66917029451fe39f5273e79
|
4
|
+
data.tar.gz: 6dfef7dbad150d1f69ca07c47a38bbe40df66a0f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e7186f5cbbd2ad8b9ac238dbbf26a16ee057fbd5592ebc47e11f593d86acbcf78bdc017e80ec61405f699a731e7203350ed674792b02fa2d471e49bbef586879
|
7
|
+
data.tar.gz: 8531a86e70b27dabac9bce52c71eb51e244d5b4db2ac49a6e7a1bfe2eb450350bb61b6537bf2329cfa37f4f9fda9c1a474afde6688cbe5867155969e38248a2b
|
data/CHANGELOG.md
CHANGED
@@ -1,14 +1,5 @@
|
|
1
|
-
##
|
2
|
-
-
|
3
|
-
- Fixed unnecessary sleep after exhausted retries [#166](https://github.com/logstash-plugins/logstash-output-kafka/pull/166)
|
4
|
-
- Changed Kafka send errors to log as warn [#179](https://github.com/logstash-plugins/logstash-output-kafka/pull/179)
|
5
|
-
|
6
|
-
## 6.2.3
|
7
|
-
- Bugfix: Sends are now retried until successful. Previously, failed transmissions to Kafka
|
8
|
-
could have been lost by the KafkaProducer library. Now we verify transmission explicitly.
|
9
|
-
This changes the default 'retry' from 0 to retry-forever. It was a bug that we defaulted
|
10
|
-
to a retry count of 0.
|
11
|
-
https://github.com/logstash-plugins/logstash-output-kafka/pull/151
|
1
|
+
## 7.0.0
|
2
|
+
- Breaking: mark deprecated `block_on_buffer_full`, `ssl` and `timeout_ms` options as obsolete
|
12
3
|
|
13
4
|
## 6.2.2
|
14
5
|
- bump kafka dependency to 0.11.0.0
|
data/docs/index.asciidoc
CHANGED
@@ -20,11 +20,26 @@ include::{include_path}/plugin_header.asciidoc[]
|
|
20
20
|
|
21
21
|
==== Description
|
22
22
|
|
23
|
-
Write events to a Kafka topic.
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
23
|
+
Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on
|
24
|
+
the broker.
|
25
|
+
|
26
|
+
Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination
|
27
|
+
of Logstash and the Kafka output plugin:
|
28
|
+
|
29
|
+
[options="header"]
|
30
|
+
|==========================================================
|
31
|
+
|Kafka Client Version |Logstash Version |Plugin Version |Why?
|
32
|
+
|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular
|
33
|
+
|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`)
|
34
|
+
|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`)
|
35
|
+
|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker
|
36
|
+
|0.10.1.x |2.4.x - 5.x.x | 6.x.x |
|
37
|
+
|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker
|
38
|
+
|==========================================================
|
39
|
+
|
40
|
+
NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should
|
41
|
+
upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker
|
42
|
+
is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around.
|
28
43
|
|
29
44
|
This output supports connecting to Kafka over:
|
30
45
|
|
@@ -121,17 +136,6 @@ The producer will attempt to batch records together into fewer requests whenever
|
|
121
136
|
records are being sent to the same partition. This helps performance on both the client
|
122
137
|
and the server. This configuration controls the default batch size in bytes.
|
123
138
|
|
124
|
-
[id="plugins-{type}s-{plugin}-block_on_buffer_full"]
|
125
|
-
===== `block_on_buffer_full` (DEPRECATED)
|
126
|
-
|
127
|
-
* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
|
128
|
-
* Value type is <<boolean,boolean>>
|
129
|
-
* Default value is `true`
|
130
|
-
|
131
|
-
When our memory buffer is exhausted we must either stop accepting new
|
132
|
-
records (block) or throw errors. By default this setting is true and we block,
|
133
|
-
however in some scenarios blocking is not desirable and it is better to immediately give an error.
|
134
|
-
|
135
139
|
[id="plugins-{type}s-{plugin}-bootstrap_servers"]
|
136
140
|
===== `bootstrap_servers`
|
137
141
|
|
@@ -287,17 +291,10 @@ retries are exhausted.
|
|
287
291
|
===== `retries`
|
288
292
|
|
289
293
|
* Value type is <<number,number>>
|
290
|
-
*
|
291
|
-
|
292
|
-
The default retry behavior is to retry until successful. To prevent data loss,
|
293
|
-
the use of this setting is discouraged.
|
294
|
-
|
295
|
-
If you choose to set `retries`, a value greater than zero will cause the
|
296
|
-
client to only retry a fixed number of times. This will result in data loss
|
297
|
-
if a transport fault exists for longer than your retry count (network outage,
|
298
|
-
Kafka down, etc).
|
294
|
+
* Default value is `0`
|
299
295
|
|
300
|
-
|
296
|
+
Setting a value greater than zero will cause the client to
|
297
|
+
resend any record whose send fails with a potentially transient error.
|
301
298
|
|
302
299
|
[id="plugins-{type}s-{plugin}-retry_backoff_ms"]
|
303
300
|
===== `retry_backoff_ms`
|
@@ -342,15 +339,6 @@ Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SA
|
|
342
339
|
|
343
340
|
The size of the TCP send buffer to use when sending data.
|
344
341
|
|
345
|
-
[id="plugins-{type}s-{plugin}-ssl"]
|
346
|
-
===== `ssl` (DEPRECATED)
|
347
|
-
|
348
|
-
* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
|
349
|
-
* Value type is <<boolean,boolean>>
|
350
|
-
* Default value is `false`
|
351
|
-
|
352
|
-
Enable SSL/TLS secured communication to Kafka broker.
|
353
|
-
|
354
342
|
[id="plugins-{type}s-{plugin}-ssl_key_password"]
|
355
343
|
===== `ssl_key_password`
|
356
344
|
|
@@ -407,19 +395,6 @@ The truststore password
|
|
407
395
|
|
408
396
|
The truststore type.
|
409
397
|
|
410
|
-
[id="plugins-{type}s-{plugin}-timeout_ms"]
|
411
|
-
===== `timeout_ms` (DEPRECATED)
|
412
|
-
|
413
|
-
* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
|
414
|
-
* Value type is <<number,number>>
|
415
|
-
* Default value is `30000`
|
416
|
-
|
417
|
-
The configuration controls the maximum amount of time the server will wait for acknowledgments
|
418
|
-
from followers to meet the acknowledgment requirements the producer has specified with the
|
419
|
-
acks configuration. If the requested number of acknowledgments are not met when the timeout
|
420
|
-
elapses an error will be returned. This timeout is measured on the server side and does not
|
421
|
-
include the network latency of the request.
|
422
|
-
|
423
398
|
[id="plugins-{type}s-{plugin}-topic_id"]
|
424
399
|
===== `topic_id`
|
425
400
|
|
@@ -71,10 +71,8 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
71
71
|
# `host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a
|
72
72
|
# subset of brokers.
|
73
73
|
config :bootstrap_servers, :validate => :string, :default => 'localhost:9092'
|
74
|
-
|
75
|
-
|
76
|
-
# however in some scenarios blocking is not desirable and it is better to immediately give an error.
|
77
|
-
config :block_on_buffer_full, :validate => :boolean, :default => true, :deprecated => "This config will be removed in a future release"
|
74
|
+
|
75
|
+
config :block_on_buffer_full, :validate => :boolean, :obsolete => "This options is obsolete"
|
78
76
|
# The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
|
79
77
|
config :buffer_memory, :validate => :number, :default => 33554432
|
80
78
|
# The compression type for all data generated by the producer.
|
@@ -111,21 +109,15 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
111
109
|
# elapses the client will resend the request if necessary or fail the request if
|
112
110
|
# retries are exhausted.
|
113
111
|
config :request_timeout_ms, :validate => :string
|
114
|
-
#
|
115
|
-
#
|
116
|
-
|
117
|
-
# If you choose to set `retries`, a value greater than zero will cause the
|
118
|
-
# client to only retry a fixed number of times. This will result in data loss
|
119
|
-
# if a transient error outlasts your retry count.
|
120
|
-
#
|
121
|
-
# A value less than zero is a configuration error.
|
122
|
-
config :retries, :validate => :number
|
112
|
+
# Setting a value greater than zero will cause the client to
|
113
|
+
# resend any record whose send fails with a potentially transient error.
|
114
|
+
config :retries, :validate => :number, :default => 0
|
123
115
|
# The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
|
124
116
|
config :retry_backoff_ms, :validate => :number, :default => 100
|
125
117
|
# The size of the TCP send buffer to use when sending data.
|
126
118
|
config :send_buffer_bytes, :validate => :number, :default => 131072
|
127
119
|
# Enable SSL/TLS secured communication to Kafka broker.
|
128
|
-
config :ssl, :validate => :boolean, :
|
120
|
+
config :ssl, :validate => :boolean, :obsolete => "Use security_protocol => 'SSL'"
|
129
121
|
# The truststore type.
|
130
122
|
config :ssl_truststore_type, :validate => :string
|
131
123
|
# The JKS truststore path to validate the Kafka broker's certificate.
|
@@ -168,12 +160,9 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
168
160
|
config :jaas_path, :validate => :path
|
169
161
|
# Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html
|
170
162
|
config :kerberos_config, :validate => :path
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
# elapses an error will be returned. This timeout is measured on the server side and does not
|
175
|
-
# include the network latency of the request.
|
176
|
-
config :timeout_ms, :validate => :number, :default => 30000, :deprecated => "This config will be removed in a future release. Please use request_timeout_ms"
|
163
|
+
|
164
|
+
config :timeout_ms, :validate => :number, :obsolete => "This option is obsolete. Please use request_timeout_ms"
|
165
|
+
|
177
166
|
# The topic to produce messages to
|
178
167
|
config :topic_id, :validate => :string, :required => true
|
179
168
|
# Serializer class for the value of the message
|
@@ -181,17 +170,6 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
181
170
|
|
182
171
|
public
|
183
172
|
def register
|
184
|
-
@thread_batch_map = Concurrent::Hash.new
|
185
|
-
|
186
|
-
if !@retries.nil?
|
187
|
-
if @retries < 0
|
188
|
-
raise ConfigurationError, "A negative retry count (#{@retries}) is not valid. Must be a value >= 0"
|
189
|
-
end
|
190
|
-
|
191
|
-
@logger.warn("Kafka output is configured with finite retry. This instructs Logstash to LOSE DATA after a set number of send attempts fails. If you do not want to lose data if Kafka is down, then you must remove the retry setting.", :retries => @retries)
|
192
|
-
end
|
193
|
-
|
194
|
-
|
195
173
|
@producer = create_producer
|
196
174
|
@codec.on_event do |event, data|
|
197
175
|
begin
|
@@ -200,7 +178,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
200
178
|
else
|
201
179
|
record = org.apache.kafka.clients.producer.ProducerRecord.new(event.sprintf(@topic_id), event.sprintf(@message_key), data)
|
202
180
|
end
|
203
|
-
|
181
|
+
@producer.send(record)
|
204
182
|
rescue LogStash::ShutdownSignal
|
205
183
|
@logger.debug('Kafka producer got shutdown signal')
|
206
184
|
rescue => e
|
@@ -208,92 +186,14 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
208
186
|
:exception => e)
|
209
187
|
end
|
210
188
|
end
|
211
|
-
end # def register
|
212
|
-
|
213
|
-
def prepare(record)
|
214
|
-
# This output is threadsafe, so we need to keep a batch per thread.
|
215
|
-
@thread_batch_map[Thread.current].add(record)
|
216
|
-
end
|
217
|
-
|
218
|
-
def multi_receive(events)
|
219
|
-
t = Thread.current
|
220
|
-
if !@thread_batch_map.include?(t)
|
221
|
-
@thread_batch_map[t] = java.util.ArrayList.new(events.size)
|
222
|
-
end
|
223
|
-
|
224
|
-
events.each do |event|
|
225
|
-
break if event == LogStash::SHUTDOWN
|
226
|
-
@codec.encode(event)
|
227
|
-
end
|
228
|
-
|
229
|
-
batch = @thread_batch_map[t]
|
230
|
-
if batch.any?
|
231
|
-
retrying_send(batch)
|
232
|
-
batch.clear
|
233
|
-
end
|
234
|
-
end
|
235
|
-
|
236
|
-
def retrying_send(batch)
|
237
|
-
remaining = @retries
|
238
|
-
|
239
|
-
while batch.any?
|
240
|
-
if !remaining.nil?
|
241
|
-
if remaining < 0
|
242
|
-
# TODO(sissel): Offer to DLQ? Then again, if it's a transient fault,
|
243
|
-
# DLQing would make things worse (you dlq data that would be successful
|
244
|
-
# after the fault is repaired)
|
245
|
-
logger.info("Exhausted user-configured retry count when sending to Kafka. Dropping these events.",
|
246
|
-
:max_retries => @retries, :drop_count => batch.count)
|
247
|
-
break
|
248
|
-
end
|
249
|
-
|
250
|
-
remaining -= 1
|
251
|
-
end
|
252
189
|
|
253
|
-
|
254
|
-
|
255
|
-
futures = batch.collect do |record|
|
256
|
-
begin
|
257
|
-
# send() can throw an exception even before the future is created.
|
258
|
-
@producer.send(record)
|
259
|
-
rescue org.apache.kafka.common.errors.TimeoutException => e
|
260
|
-
failures << record
|
261
|
-
nil
|
262
|
-
rescue org.apache.kafka.common.errors.InterruptException => e
|
263
|
-
failures << record
|
264
|
-
nil
|
265
|
-
rescue org.apache.kafka.common.errors.SerializationException => e
|
266
|
-
# TODO(sissel): Retrying will fail because the data itself has a problem serializing.
|
267
|
-
# TODO(sissel): Let's add DLQ here.
|
268
|
-
failures << record
|
269
|
-
nil
|
270
|
-
end
|
271
|
-
end.compact
|
272
|
-
|
273
|
-
futures.each_with_index do |future, i|
|
274
|
-
begin
|
275
|
-
result = future.get()
|
276
|
-
rescue => e
|
277
|
-
# TODO(sissel): Add metric to count failures, possibly by exception type.
|
278
|
-
logger.debug? && logger.debug("KafkaProducer.send() failed: #{e}", :exception => e);
|
279
|
-
failures << batch[i]
|
280
|
-
end
|
281
|
-
end
|
282
|
-
|
283
|
-
# No failures? Cool. Let's move on.
|
284
|
-
break if failures.empty?
|
190
|
+
end # def register
|
285
191
|
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
else
|
290
|
-
delay = @retry_backoff_ms / 1000.0
|
291
|
-
logger.info("Sending batch to Kafka failed. Will retry after a delay.", :batch_size => batch.size,
|
292
|
-
:failures => failures.size, :sleep => delay)
|
293
|
-
batch = failures
|
294
|
-
sleep(delay)
|
295
|
-
end
|
192
|
+
def receive(event)
|
193
|
+
if event == LogStash::SHUTDOWN
|
194
|
+
return
|
296
195
|
end
|
196
|
+
@codec.encode(event)
|
297
197
|
end
|
298
198
|
|
299
199
|
def close
|
@@ -317,14 +217,14 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
317
217
|
props.put(kafka::MAX_REQUEST_SIZE_CONFIG, max_request_size.to_s)
|
318
218
|
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
|
319
219
|
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
|
320
|
-
props.put(kafka::RETRIES_CONFIG, retries.to_s)
|
321
|
-
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
|
220
|
+
props.put(kafka::RETRIES_CONFIG, retries.to_s)
|
221
|
+
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
|
322
222
|
props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s)
|
323
223
|
props.put(kafka::VALUE_SERIALIZER_CLASS_CONFIG, value_serializer)
|
324
224
|
|
325
225
|
props.put("security.protocol", security_protocol) unless security_protocol.nil?
|
326
226
|
|
327
|
-
if security_protocol == "SSL"
|
227
|
+
if security_protocol == "SSL"
|
328
228
|
set_trustore_keystore_config(props)
|
329
229
|
elsif security_protocol == "SASL_PLAINTEXT"
|
330
230
|
set_sasl_config(props)
|
@@ -336,9 +236,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
336
236
|
|
337
237
|
org.apache.kafka.clients.producer.KafkaProducer.new(props)
|
338
238
|
rescue => e
|
339
|
-
logger.error("Unable to create Kafka producer from given configuration",
|
340
|
-
:kafka_error_message => e,
|
341
|
-
:cause => e.respond_to?(:getCause) ? e.getCause() : nil)
|
239
|
+
logger.error("Unable to create Kafka producer from given configuration", :kafka_error_message => e)
|
342
240
|
raise e
|
343
241
|
end
|
344
242
|
end
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
|
3
3
|
s.name = 'logstash-output-kafka'
|
4
|
-
s.version = '
|
4
|
+
s.version = '7.0.0'
|
5
5
|
s.licenses = ['Apache License (2.0)']
|
6
6
|
s.summary = 'Output events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on the broker'
|
7
7
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
@@ -157,7 +157,7 @@ describe "outputs/kafka", :integration => true do
|
|
157
157
|
def load_kafka_data(config)
|
158
158
|
kafka = LogStash::Outputs::Kafka.new(config)
|
159
159
|
kafka.register
|
160
|
-
|
160
|
+
num_events.times do kafka.receive(event) end
|
161
161
|
kafka.close
|
162
162
|
end
|
163
163
|
|
@@ -25,168 +25,34 @@ describe "outputs/kafka" do
|
|
25
25
|
context 'when outputting messages' do
|
26
26
|
it 'should send logstash event to kafka broker' do
|
27
27
|
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
28
|
-
.with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord))
|
28
|
+
.with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord))
|
29
29
|
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
|
30
30
|
kafka.register
|
31
|
-
kafka.
|
31
|
+
kafka.receive(event)
|
32
32
|
end
|
33
33
|
|
34
34
|
it 'should support Event#sprintf placeholders in topic_id' do
|
35
35
|
topic_field = 'topic_name'
|
36
36
|
expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
|
37
|
-
.with("my_topic", event.to_s)
|
38
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
37
|
+
.with("my_topic", event.to_s)
|
38
|
+
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
39
39
|
kafka = LogStash::Outputs::Kafka.new({'topic_id' => "%{#{topic_field}}"})
|
40
40
|
kafka.register
|
41
|
-
kafka.
|
41
|
+
kafka.receive(event)
|
42
42
|
end
|
43
43
|
|
44
44
|
it 'should support field referenced message_keys' do
|
45
45
|
expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
|
46
|
-
.with("test", "172.0.0.1", event.to_s)
|
47
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
46
|
+
.with("test", "172.0.0.1", event.to_s)
|
47
|
+
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
48
48
|
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"message_key" => "%{host}"}))
|
49
49
|
kafka.register
|
50
|
-
kafka.
|
50
|
+
kafka.receive(event)
|
51
51
|
end
|
52
|
-
|
52
|
+
|
53
53
|
it 'should raise config error when truststore location is not set and ssl is enabled' do
|
54
|
-
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("
|
54
|
+
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"ssl" => "true"}))
|
55
55
|
expect { kafka.register }.to raise_error(LogStash::ConfigurationError, /ssl_truststore_location must be set when SSL is enabled/)
|
56
56
|
end
|
57
57
|
end
|
58
|
-
|
59
|
-
context "when KafkaProducer#send() raises an exception" do
|
60
|
-
let(:failcount) { (rand * 10).to_i }
|
61
|
-
let(:sendcount) { failcount + 1 }
|
62
|
-
|
63
|
-
let(:exception_classes) { [
|
64
|
-
org.apache.kafka.common.errors.TimeoutException,
|
65
|
-
org.apache.kafka.common.errors.InterruptException,
|
66
|
-
org.apache.kafka.common.errors.SerializationException
|
67
|
-
] }
|
68
|
-
|
69
|
-
before do
|
70
|
-
count = 0
|
71
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
72
|
-
.exactly(sendcount).times
|
73
|
-
.and_wrap_original do |m, *args|
|
74
|
-
if count < failcount # fail 'failcount' times in a row.
|
75
|
-
count += 1
|
76
|
-
# Pick an exception at random
|
77
|
-
raise exception_classes.shuffle.first.new("injected exception for testing")
|
78
|
-
else
|
79
|
-
m.call(*args) # call original
|
80
|
-
end
|
81
|
-
end
|
82
|
-
end
|
83
|
-
|
84
|
-
it "should retry until successful" do
|
85
|
-
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
|
86
|
-
kafka.register
|
87
|
-
kafka.multi_receive([event])
|
88
|
-
end
|
89
|
-
end
|
90
|
-
|
91
|
-
context "when a send fails" do
|
92
|
-
context "and the default retries behavior is used" do
|
93
|
-
# Fail this many times and then finally succeed.
|
94
|
-
let(:failcount) { (rand * 10).to_i }
|
95
|
-
|
96
|
-
# Expect KafkaProducer.send() to get called again after every failure, plus the successful one.
|
97
|
-
let(:sendcount) { failcount + 1 }
|
98
|
-
|
99
|
-
it "should retry until successful" do
|
100
|
-
count = 0;
|
101
|
-
|
102
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
103
|
-
.exactly(sendcount).times
|
104
|
-
.and_wrap_original do |m, *args|
|
105
|
-
if count < failcount
|
106
|
-
count += 1
|
107
|
-
# inject some failures.
|
108
|
-
|
109
|
-
# Return a custom Future that will raise an exception to simulate a Kafka send() problem.
|
110
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
111
|
-
future.run
|
112
|
-
future
|
113
|
-
else
|
114
|
-
m.call(*args)
|
115
|
-
end
|
116
|
-
end
|
117
|
-
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
|
118
|
-
kafka.register
|
119
|
-
kafka.multi_receive([event])
|
120
|
-
end
|
121
|
-
end
|
122
|
-
|
123
|
-
context 'when retries is 0' do
|
124
|
-
let(:retries) { 0 }
|
125
|
-
let(:max_sends) { 1 }
|
126
|
-
|
127
|
-
it "should should only send once" do
|
128
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
129
|
-
.once
|
130
|
-
.and_wrap_original do |m, *args|
|
131
|
-
# Always fail.
|
132
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
133
|
-
future.run
|
134
|
-
future
|
135
|
-
end
|
136
|
-
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
|
137
|
-
kafka.register
|
138
|
-
kafka.multi_receive([event])
|
139
|
-
end
|
140
|
-
|
141
|
-
it 'should not sleep' do
|
142
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
143
|
-
.once
|
144
|
-
.and_wrap_original do |m, *args|
|
145
|
-
# Always fail.
|
146
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
147
|
-
future.run
|
148
|
-
future
|
149
|
-
end
|
150
|
-
|
151
|
-
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
|
152
|
-
expect(kafka).not_to receive(:sleep).with(anything)
|
153
|
-
kafka.register
|
154
|
-
kafka.multi_receive([event])
|
155
|
-
end
|
156
|
-
end
|
157
|
-
|
158
|
-
context "and when retries is set by the user" do
|
159
|
-
let(:retries) { (rand * 10).to_i }
|
160
|
-
let(:max_sends) { retries + 1 }
|
161
|
-
|
162
|
-
it "should give up after retries are exhausted" do
|
163
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
164
|
-
.at_most(max_sends).times
|
165
|
-
.and_wrap_original do |m, *args|
|
166
|
-
# Always fail.
|
167
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
168
|
-
future.run
|
169
|
-
future
|
170
|
-
end
|
171
|
-
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
|
172
|
-
kafka.register
|
173
|
-
kafka.multi_receive([event])
|
174
|
-
end
|
175
|
-
|
176
|
-
it 'should only sleep retries number of times' do
|
177
|
-
expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
|
178
|
-
.at_most(max_sends)
|
179
|
-
.and_wrap_original do |m, *args|
|
180
|
-
# Always fail.
|
181
|
-
future = java.util.concurrent.FutureTask.new { raise "Failed" }
|
182
|
-
future.run
|
183
|
-
future
|
184
|
-
end
|
185
|
-
kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
|
186
|
-
expect(kafka).to receive(:sleep).exactly(retries).times
|
187
|
-
kafka.register
|
188
|
-
kafka.multi_receive([event])
|
189
|
-
end
|
190
|
-
end
|
191
|
-
end
|
192
58
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-output-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 7.0.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Elasticsearch
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2017-08-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|
@@ -114,9 +114,7 @@ dependencies:
|
|
114
114
|
- - ">="
|
115
115
|
- !ruby/object:Gem::Version
|
116
116
|
version: '0'
|
117
|
-
description: This gem is a Logstash plugin required to be installed on top of the
|
118
|
-
Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This
|
119
|
-
gem is not a stand-alone program
|
117
|
+
description: This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program
|
120
118
|
email: info@elastic.co
|
121
119
|
executables: []
|
122
120
|
extensions: []
|
@@ -178,11 +176,10 @@ requirements:
|
|
178
176
|
- jar 'org.slf4j:slf4j-log4j12', '1.7.21'
|
179
177
|
- jar 'org.apache.logging.log4j:log4j-1.2-api', '2.6.2'
|
180
178
|
rubyforge_project:
|
181
|
-
rubygems_version: 2.
|
179
|
+
rubygems_version: 2.4.8
|
182
180
|
signing_key:
|
183
181
|
specification_version: 4
|
184
|
-
summary: Output events to a Kafka topic. This uses the Kafka Producer API to write
|
185
|
-
messages to a topic on the broker
|
182
|
+
summary: Output events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on the broker
|
186
183
|
test_files:
|
187
184
|
- spec/integration/outputs/kafka_spec.rb
|
188
185
|
- spec/unit/outputs/kafka_spec.rb
|