logstash-integration-kafka 10.7.2-java → 10.7.7-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3ca014c7463762ed6c2f4be1a9c0dce13356980f7ddf244294dd02837862d54f
4
- data.tar.gz: 1d68b07fa127636a6bba867ffa0fff6beefd5b1d0a7ba1419eedcea224a97fe5
3
+ metadata.gz: 7a9e872c7a49cd1eb9fc9709c10be72d1c9609b0f19ef5550a2dd7cb317a925e
4
+ data.tar.gz: b0541ab279e7b7fcea74f1a951e7916abf54adde8bae60e2c4b5897b6f4daadb
5
5
  SHA512:
6
- metadata.gz: 97c32c1cdf91c205a61fefa6ec23544b45cbc66205418df85b2e7bb0c724a2a5672f658b947c086f38808b445d99bc053ee0651eae6b3da0e5cee6065f519c59
7
- data.tar.gz: 6f67718001a257aafa339dde326192eef009a1bd74a1c1d4e8b37ca277362f95da3a56100819d9ec972a782e0631a5e4c91013d59efab720ea8e8c39b9d100a4
6
+ metadata.gz: 1e53d939b471032107afaeb2b7656bf6c755d803e1effbda487b953ae6cc0b79fd97458834d079168023e0ecfa8aeec67a474dc1a4a99336a7be10af3d1b6412
7
+ data.tar.gz: bea64e388923bab253278e46eccc0701c2bda8f3067cc1e429223fbc3f9cd3a797ced2e334755bbbe3bbf1cf711237359d85e1c6ec22cde822ced2285840fd9f
data/CHANGELOG.md CHANGED
@@ -1,8 +1,26 @@
1
+ ## 10.7.7
2
+ - Fix: Correct the settings to allow basic auth to work properly, either by setting `schema_registry_key/secret` or embedding username/password in the
3
+ url [#94](https://github.com/logstash-plugins/logstash-integration-kafka/pull/94)
4
+
5
+ ## 10.7.6
6
+ - Test: specify development dependency version [#91](https://github.com/logstash-plugins/logstash-integration-kafka/pull/91)
7
+
8
+ ## 10.7.5
9
+ - Improved error handling in the input plugin to avoid errors 'escaping' from the plugin, and crashing the logstash
10
+ process [#87](https://github.com/logstash-plugins/logstash-integration-kafka/pull/87)
11
+
12
+ ## 10.7.4
13
+ - Docs: make sure Kafka clients version is updated in docs [#83](https://github.com/logstash-plugins/logstash-integration-kafka/pull/83)
14
+ Since **10.6.0** Kafka client was updated to **2.5.1**
15
+
16
+ ## 10.7.3
17
+ - Changed `decorate_events` to add also Kafka headers [#78](https://github.com/logstash-plugins/logstash-integration-kafka/pull/78)
18
+
1
19
  ## 10.7.2
2
20
  - Update Jersey dependency to version 2.33 [#75](https://github.com/logstash-plugins/logstash-integration-kafka/pull/75)
3
21
 
4
22
  ## 10.7.1
5
- - Fix: dropped usage of SHUTDOWN event deprecated since Logstash 5.0 [#71](https://github.com/logstash-plugins/logstash-integration-kafka/issue/71)
23
+ - Fix: dropped usage of SHUTDOWN event deprecated since Logstash 5.0 [#71](https://github.com/logstash-plugins/logstash-integration-kafka/pull/71)
6
24
 
7
25
  ## 10.7.0
8
26
  - Switched use from Faraday to Manticore as HTTP client library to access Schema Registry service
data/docs/index.asciidoc CHANGED
@@ -1,7 +1,7 @@
1
1
  :plugin: kafka
2
2
  :type: integration
3
3
  :no_codec:
4
- :kafka_client: 2.4
4
+ :kafka_client: 2.5.1
5
5
 
6
6
  ///////////////////////////////////////////
7
7
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: input
4
4
  :default_codec: plain
5
- :kafka_client: 2.4
6
- :kafka_client_doc: 24
5
+ :kafka_client: 2.5
6
+ :kafka_client_doc: 25
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -73,7 +73,7 @@ either when the record was created (default) or when it was received by the
73
73
  broker. See more about property log.message.timestamp.type at
74
74
  https://kafka.apache.org/{kafka_client_doc}/documentation.html#brokerconfigs
75
75
 
76
- Metadata is only added to the event if the `decorate_events` option is set to true (it defaults to false).
76
+ Metadata is only added to the event if the `decorate_events` option is set to `basic` or `extended` (it defaults to `none`).
77
77
 
78
78
  Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be
79
79
  inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`.
@@ -99,7 +99,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
99
99
  | <<plugins-{type}s-{plugin}-client_rack>> |<<string,string>>|No
100
100
  | <<plugins-{type}s-{plugin}-connections_max_idle_ms>> |<<number,number>>|No
101
101
  | <<plugins-{type}s-{plugin}-consumer_threads>> |<<number,number>>|No
102
- | <<plugins-{type}s-{plugin}-decorate_events>> |<<boolean,boolean>>|No
102
+ | <<plugins-{type}s-{plugin}-decorate_events>> |<<string,string>>|No
103
103
  | <<plugins-{type}s-{plugin}-enable_auto_commit>> |<<boolean,boolean>>|No
104
104
  | <<plugins-{type}s-{plugin}-exclude_internal_topics>> |<<string,string>>|No
105
105
  | <<plugins-{type}s-{plugin}-fetch_max_bytes>> |<<number,number>>|No
@@ -246,10 +246,16 @@ balance — more threads than partitions means that some threads will be idl
246
246
  [id="plugins-{type}s-{plugin}-decorate_events"]
247
247
  ===== `decorate_events`
248
248
 
249
- * Value type is <<boolean,boolean>>
250
- * Default value is `false`
251
-
252
- Option to add Kafka metadata like topic, message size to the event.
249
+ * Value type is <<string,string>>
250
+ * Accepted values are:
251
+ - `none`: no metadata is added
252
+ - `basic`: record's attributes are added
253
+ - `extended`: record's attributes, headers are added
254
+ - `false`: deprecated alias for `none`
255
+ - `true`: deprecated alias for `basic`
256
+ * Default value is `none`
257
+
258
+ Option to add Kafka metadata like topic, message size and header key values to the event.
253
259
  This will add a field named `kafka` to the logstash event containing the following attributes:
254
260
 
255
261
  * `topic`: The topic this message is associated with
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: output
4
4
  :default_codec: plain
5
- :kafka_client: 2.4
6
- :kafka_client_doc: 24
5
+ :kafka_client: 2.5
6
+ :kafka_client_doc: 25
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -8,6 +8,7 @@ require 'manticore'
8
8
  require "json"
9
9
  require "logstash/json"
10
10
  require_relative '../plugin_mixins/common'
11
+ require 'logstash/plugin_mixins/deprecation_logger_support'
11
12
 
12
13
  # This input will read events from a Kafka topic. It uses the 0.10 version of
13
14
  # the consumer API provided by Kafka to read messages from the broker.
@@ -58,6 +59,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
58
59
 
59
60
  include LogStash::PluginMixins::KafkaSupport
60
61
  include ::LogStash::PluginMixins::KafkaAvroSchemaRegistry
62
+ include LogStash::PluginMixins::DeprecationLoggerSupport
61
63
 
62
64
  config_name 'kafka'
63
65
 
@@ -233,27 +235,57 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
233
235
  config :sasl_jaas_config, :validate => :string
234
236
  # Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html
235
237
  config :kerberos_config, :validate => :path
236
- # Option to add Kafka metadata like topic, message size to the event.
237
- # This will add a field named `kafka` to the logstash event containing the following attributes:
238
+ # Option to add Kafka metadata like topic, message size and header key values to the event.
239
+ # With `basic` this will add a field named `kafka` to the logstash event containing the following attributes:
238
240
  # `topic`: The topic this message is associated with
239
241
  # `consumer_group`: The consumer group used to read in this event
240
242
  # `partition`: The partition this message is associated with
241
243
  # `offset`: The offset from the partition this message is associated with
242
244
  # `key`: A ByteBuffer containing the message key
243
245
  # `timestamp`: The timestamp of this message
244
- config :decorate_events, :validate => :boolean, :default => false
246
+ # While with `extended` it adds also all the key values present in the Kafka header if the key is valid UTF-8 else
247
+ # silently skip it.
248
+ config :decorate_events, :validate => %w(none basic extended false true), :default => "none"
249
+
250
+ attr_reader :metadata_mode
245
251
 
246
252
  public
247
253
  def register
248
254
  @runner_threads = []
255
+ @metadata_mode = extract_metadata_level(@decorate_events)
256
+ @pattern ||= java.util.regex.Pattern.compile(@topics_pattern) unless @topics_pattern.nil?
249
257
  check_schema_registry_parameters
250
258
  end
251
259
 
260
+ METADATA_NONE = Set[].freeze
261
+ METADATA_BASIC = Set[:record_props].freeze
262
+ METADATA_EXTENDED = Set[:record_props, :headers].freeze
263
+ METADATA_DEPRECATION_MAP = { 'true' => 'basic', 'false' => 'none' }
264
+
265
+ private
266
+ def extract_metadata_level(decorate_events_setting)
267
+ metadata_enabled = decorate_events_setting
268
+
269
+ if METADATA_DEPRECATION_MAP.include?(metadata_enabled)
270
+ canonical_value = METADATA_DEPRECATION_MAP[metadata_enabled]
271
+ deprecation_logger.deprecated("Deprecated value `#{decorate_events_setting}` for `decorate_events` option; use `#{canonical_value}` instead.")
272
+ metadata_enabled = canonical_value
273
+ end
274
+
275
+ case metadata_enabled
276
+ when 'none' then METADATA_NONE
277
+ when 'basic' then METADATA_BASIC
278
+ when 'extended' then METADATA_EXTENDED
279
+ end
280
+ end
281
+
252
282
  public
253
283
  def run(logstash_queue)
254
- @runner_consumers = consumer_threads.times.map { |i| create_consumer("#{client_id}-#{i}") }
255
- @runner_threads = @runner_consumers.map { |consumer| thread_runner(logstash_queue, consumer) }
256
- @runner_threads.each { |t| t.join }
284
+ @runner_consumers = consumer_threads.times.map { |i| subscribe(create_consumer("#{client_id}-#{i}")) }
285
+ @runner_threads = @runner_consumers.map.with_index { |consumer, i| thread_runner(logstash_queue, consumer,
286
+ "kafka-input-worker-#{client_id}-#{i}") }
287
+ @runner_threads.each(&:start)
288
+ @runner_threads.each(&:join)
257
289
  end # def run
258
290
 
259
291
  public
@@ -267,53 +299,100 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
267
299
  @runner_consumers
268
300
  end
269
301
 
270
- private
271
- def thread_runner(logstash_queue, consumer)
272
- Thread.new do
302
+ def subscribe(consumer)
303
+ @pattern.nil? ? consumer.subscribe(topics) : consumer.subscribe(@pattern)
304
+ consumer
305
+ end
306
+
307
+ def thread_runner(logstash_queue, consumer, name)
308
+ java.lang.Thread.new do
309
+ LogStash::Util::set_thread_name(name)
273
310
  begin
274
- unless @topics_pattern.nil?
275
- nooplistener = org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener.new
276
- pattern = java.util.regex.Pattern.compile(@topics_pattern)
277
- consumer.subscribe(pattern, nooplistener)
278
- else
279
- consumer.subscribe(topics);
280
- end
281
311
  codec_instance = @codec.clone
282
- while !stop?
283
- records = consumer.poll(poll_timeout_ms)
284
- next unless records.count > 0
285
- for record in records do
286
- codec_instance.decode(record.value.to_s) do |event|
287
- decorate(event)
288
- if schema_registry_url
289
- json = LogStash::Json.load(record.value.to_s)
290
- json.each do |k, v|
291
- event.set(k, v)
292
- end
293
- event.remove("message")
294
- end
295
- if @decorate_events
296
- event.set("[@metadata][kafka][topic]", record.topic)
297
- event.set("[@metadata][kafka][consumer_group]", @group_id)
298
- event.set("[@metadata][kafka][partition]", record.partition)
299
- event.set("[@metadata][kafka][offset]", record.offset)
300
- event.set("[@metadata][kafka][key]", record.key)
301
- event.set("[@metadata][kafka][timestamp]", record.timestamp)
302
- end
303
- logstash_queue << event
304
- end
312
+ until stop?
313
+ records = do_poll(consumer)
314
+ unless records.empty?
315
+ records.each { |record| handle_record(record, codec_instance, logstash_queue) }
316
+ maybe_commit_offset(consumer)
305
317
  end
306
- # Manual offset commit
307
- consumer.commitSync if @enable_auto_commit.eql?(false)
308
318
  end
309
- rescue org.apache.kafka.common.errors.WakeupException => e
310
- raise e if !stop?
311
319
  ensure
312
320
  consumer.close
313
321
  end
314
322
  end
315
323
  end
316
324
 
325
+ def do_poll(consumer)
326
+ records = []
327
+ begin
328
+ records = consumer.poll(poll_timeout_ms)
329
+ rescue org.apache.kafka.common.errors.WakeupException => e
330
+ logger.debug("Wake up from poll", :kafka_error_message => e)
331
+ raise e unless stop?
332
+ rescue => e
333
+ logger.error("Unable to poll Kafka consumer",
334
+ :kafka_error_message => e,
335
+ :cause => e.respond_to?(:getCause) ? e.getCause : nil)
336
+ Stud.stoppable_sleep(1) { stop? }
337
+ end
338
+ records
339
+ end
340
+
341
+ def handle_record(record, codec_instance, queue)
342
+ codec_instance.decode(record.value.to_s) do |event|
343
+ decorate(event)
344
+ maybe_apply_schema(event, record)
345
+ maybe_set_metadata(event, record)
346
+ queue << event
347
+ end
348
+ end
349
+
350
+ def maybe_apply_schema(event, record)
351
+ if schema_registry_url
352
+ json = LogStash::Json.load(record.value.to_s)
353
+ json.each do |k, v|
354
+ event.set(k, v)
355
+ end
356
+ event.remove("message")
357
+ end
358
+ end
359
+
360
+ def maybe_set_metadata(event, record)
361
+ if @metadata_mode.include?(:record_props)
362
+ event.set("[@metadata][kafka][topic]", record.topic)
363
+ event.set("[@metadata][kafka][consumer_group]", @group_id)
364
+ event.set("[@metadata][kafka][partition]", record.partition)
365
+ event.set("[@metadata][kafka][offset]", record.offset)
366
+ event.set("[@metadata][kafka][key]", record.key)
367
+ event.set("[@metadata][kafka][timestamp]", record.timestamp)
368
+ end
369
+ if @metadata_mode.include?(:headers)
370
+ record.headers.each do |header|
371
+ s = String.from_java_bytes(header.value)
372
+ s.force_encoding(Encoding::UTF_8)
373
+ if s.valid_encoding?
374
+ event.set("[@metadata][kafka][headers][" + header.key + "]", s)
375
+ end
376
+ end
377
+ end
378
+ end
379
+
380
+ def maybe_commit_offset(consumer)
381
+ begin
382
+ consumer.commitSync if @enable_auto_commit.eql?(false)
383
+ rescue org.apache.kafka.common.errors.WakeupException => e
384
+ logger.debug("Wake up from commitSync", :kafka_error_message => e)
385
+ raise e unless stop?
386
+ rescue StandardError => e
387
+ # For transient errors, the commit should be successful after the next set of
388
+ # polled records has been processed.
389
+ # But, it might also be worth thinking about adding a configurable retry mechanism
390
+ logger.error("Unable to commit records",
391
+ :kafka_error_message => e,
392
+ :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
393
+ end
394
+ end
395
+
317
396
  private
318
397
  def create_consumer(client_id)
319
398
  begin
@@ -354,13 +433,16 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
354
433
  if schema_registry_url
355
434
  props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, Java::io.confluent.kafka.serializers.KafkaAvroDeserializer.java_class)
356
435
  serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
357
- props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, schema_registry_url.to_s)
436
+ props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, schema_registry_url.uri.to_s)
358
437
  if schema_registry_proxy && !schema_registry_proxy.empty?
359
438
  props.put(serdes_config::PROXY_HOST, @schema_registry_proxy_host)
360
439
  props.put(serdes_config::PROXY_PORT, @schema_registry_proxy_port)
361
440
  end
362
441
  if schema_registry_key && !schema_registry_key.empty?
442
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
363
443
  props.put(serdes_config::USER_INFO_CONFIG, schema_registry_key + ":" + schema_registry_secret.value)
444
+ else
445
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'URL')
364
446
  end
365
447
  end
366
448
  if security_protocol == "SSL"
@@ -53,9 +53,8 @@ module LogStash
53
53
  options[:auth] = {:user => schema_registry_key, :password => schema_registry_secret.value}
54
54
  end
55
55
  client = Manticore::Client.new(options)
56
-
57
56
  begin
58
- response = client.get(@schema_registry_url.to_s + '/subjects').body
57
+ response = client.get(@schema_registry_url.uri.to_s + '/subjects').body
59
58
  rescue Manticore::ManticoreException => e
60
59
  raise LogStash::ConfigurationError.new("Schema registry service doesn't respond, error: #{e.message}")
61
60
  end
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '10.7.2'
3
+ s.version = '10.7.7'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -47,9 +47,11 @@ Gem::Specification.new do |s|
47
47
  s.add_runtime_dependency 'logstash-codec-plain'
48
48
  s.add_runtime_dependency 'stud', '>= 0.0.22', '< 0.1.0'
49
49
  s.add_runtime_dependency "manticore", '>= 0.5.4', '< 1.0.0'
50
+ s.add_runtime_dependency 'logstash-mixin-deprecation_logger_support', '~>1.0'
50
51
 
51
52
  s.add_development_dependency 'logstash-devutils'
52
53
  s.add_development_dependency 'rspec-wait'
53
- s.add_development_dependency 'ruby-kafka'
54
+ s.add_development_dependency 'digest-crc', '~> 0.5.1' # 0.6.0 started using a C-ext
55
+ s.add_development_dependency 'ruby-kafka' # depends on digest-crc
54
56
  s.add_development_dependency 'snappy'
55
57
  end
@@ -0,0 +1,36 @@
1
+ # encoding: utf-8
2
+ require 'logstash-integration-kafka_jars'
3
+
4
+ describe "[DOCS]" do
5
+
6
+ let(:docs_files) do
7
+ ['index.asciidoc', 'input-kafka.asciidoc', 'output-kafka.asciidoc'].map { |name| File.join('docs', name) }
8
+ end
9
+
10
+ let(:kafka_version_properties) do
11
+ loader = java.lang.Thread.currentThread.getContextClassLoader
12
+ version = loader.getResource('kafka/kafka-version.properties')
13
+ fail "kafka-version.properties missing" unless version
14
+ properties = java.util.Properties.new
15
+ properties.load version.openStream
16
+ properties
17
+ end
18
+
19
+ it 'is sync-ed with Kafka client version' do
20
+ version = kafka_version_properties.get('version') # e.g. '2.5.1'
21
+
22
+ fails = docs_files.map do |file|
23
+ if line = File.readlines(file).find { |line| line.index(':kafka_client:') }
24
+ puts "found #{line.inspect} in #{file}" if $VERBOSE # e.g. ":kafka_client: 2.5\n"
25
+ if !version.start_with?(line.strip.split[1])
26
+ "documentation at #{file} is out of sync with kafka-clients version (#{version.inspect}), detected line: #{line.inspect}"
27
+ else
28
+ nil
29
+ end
30
+ end
31
+ end
32
+
33
+ fail "\n" + fails.join("\n") if fails.flatten.any?
34
+ end
35
+
36
+ end
@@ -0,0 +1,5 @@
1
+ SchemaRegistry-Props {
2
+ org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
3
+ file="build/confluent_platform/etc/schema-registry/pwd"
4
+ debug="true";
5
+ };
data/spec/fixtures/pwd ADDED
@@ -0,0 +1,5 @@
1
+ fred: OBF:1w8t1tvf1w261w8v1w1c1tvn1w8x,user,admin
2
+ barney: changeme,user,developer
3
+ admin:admin,admin
4
+ betty: MD5:164c88b302622e17050af52c89945d44,user
5
+ wilma: CRYPT:adpexzg3FUZAk,admin,sr-user
@@ -36,7 +36,15 @@ describe "inputs/kafka", :integration => true do
36
36
  end
37
37
  let(:decorate_config) do
38
38
  { 'topics' => ['logstash_integration_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_3,
39
- 'auto_offset_reset' => 'earliest', 'decorate_events' => true }
39
+ 'auto_offset_reset' => 'earliest', 'decorate_events' => 'true' }
40
+ end
41
+ let(:decorate_headers_config) do
42
+ { 'topics' => ['logstash_integration_topic_plain_with_headers'], 'codec' => 'plain', 'group_id' => group_id_3,
43
+ 'auto_offset_reset' => 'earliest', 'decorate_events' => 'extended' }
44
+ end
45
+ let(:decorate_bad_headers_config) do
46
+ { 'topics' => ['logstash_integration_topic_plain_with_headers_badly'], 'codec' => 'plain', 'group_id' => group_id_3,
47
+ 'auto_offset_reset' => 'earliest', 'decorate_events' => 'extended' }
40
48
  end
41
49
  let(:manual_commit_config) do
42
50
  { 'topics' => ['logstash_integration_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_5,
@@ -45,6 +53,35 @@ describe "inputs/kafka", :integration => true do
45
53
  let(:timeout_seconds) { 30 }
46
54
  let(:num_events) { 103 }
47
55
 
56
+ before(:all) do
57
+ # Prepare message with headers with valid UTF-8 chars
58
+ header = org.apache.kafka.common.header.internals.RecordHeader.new("name", "John ανδρεα €".to_java_bytes)
59
+ record = org.apache.kafka.clients.producer.ProducerRecord.new(
60
+ "logstash_integration_topic_plain_with_headers", 0, "key", "value", [header])
61
+ send_message(record)
62
+
63
+ # Prepare message with headers with invalid UTF-8 chars
64
+ invalid = "日本".encode('Shift_JIS').force_encoding(Encoding::UTF_8).to_java_bytes
65
+ header = org.apache.kafka.common.header.internals.RecordHeader.new("name", invalid)
66
+ record = org.apache.kafka.clients.producer.ProducerRecord.new(
67
+ "logstash_integration_topic_plain_with_headers_badly", 0, "key", "value", [header])
68
+
69
+ send_message(record)
70
+ end
71
+
72
+ def send_message(record)
73
+ props = java.util.Properties.new
74
+ kafka = org.apache.kafka.clients.producer.ProducerConfig
75
+ props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
76
+ props.put(kafka::KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
77
+ props.put(kafka::VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
78
+
79
+ producer = org.apache.kafka.clients.producer.KafkaProducer.new(props)
80
+
81
+ producer.send(record)
82
+ producer.close
83
+ end
84
+
48
85
  describe "#kafka-topics" do
49
86
 
50
87
  it "should consume all messages from plain 3-partition topic" do
@@ -74,7 +111,7 @@ describe "inputs/kafka", :integration => true do
74
111
 
75
112
  context "#kafka-topics-pattern" do
76
113
  it "should consume all messages from all 3 topics" do
77
- total_events = num_events * 3
114
+ total_events = num_events * 3 + 2
78
115
  queue = consume_messages(pattern_config, timeout: timeout_seconds, event_count: total_events)
79
116
  expect(queue.length).to eq(total_events)
80
117
  end
@@ -91,6 +128,31 @@ describe "inputs/kafka", :integration => true do
91
128
  expect(event.get("[@metadata][kafka][timestamp]")).to be >= start
92
129
  end
93
130
  end
131
+
132
+ it "should show the right topic and group name in and kafka headers decorated kafka section" do
133
+ start = LogStash::Timestamp.now.time.to_i
134
+ consume_messages(decorate_headers_config, timeout: timeout_seconds, event_count: 1) do |queue, _|
135
+ expect(queue.length).to eq(1)
136
+ event = queue.shift
137
+ expect(event.get("[@metadata][kafka][topic]")).to eq("logstash_integration_topic_plain_with_headers")
138
+ expect(event.get("[@metadata][kafka][consumer_group]")).to eq(group_id_3)
139
+ expect(event.get("[@metadata][kafka][timestamp]")).to be >= start
140
+ expect(event.get("[@metadata][kafka][headers][name]")).to eq("John ανδρεα €")
141
+ end
142
+ end
143
+
144
+ it "should skip headers not encoded in UTF-8" do
145
+ start = LogStash::Timestamp.now.time.to_i
146
+ consume_messages(decorate_bad_headers_config, timeout: timeout_seconds, event_count: 1) do |queue, _|
147
+ expect(queue.length).to eq(1)
148
+ event = queue.shift
149
+ expect(event.get("[@metadata][kafka][topic]")).to eq("logstash_integration_topic_plain_with_headers_badly")
150
+ expect(event.get("[@metadata][kafka][consumer_group]")).to eq(group_id_3)
151
+ expect(event.get("[@metadata][kafka][timestamp]")).to be >= start
152
+
153
+ expect(event.include?("[@metadata][kafka][headers][name]")).to eq(false)
154
+ end
155
+ end
94
156
  end
95
157
 
96
158
  context "#kafka-offset-commit" do
@@ -129,19 +191,31 @@ private
129
191
 
130
192
  def consume_messages(config, queue: Queue.new, timeout:, event_count:)
131
193
  kafka_input = LogStash::Inputs::Kafka.new(config)
194
+ kafka_input.register
132
195
  t = Thread.new { kafka_input.run(queue) }
133
196
  begin
134
197
  t.run
135
198
  wait(timeout).for { queue.length }.to eq(event_count) unless timeout.eql?(false)
136
199
  block_given? ? yield(queue, kafka_input) : queue
137
200
  ensure
201
+ kafka_input.do_stop
138
202
  t.kill
139
- t.join(30_000)
203
+ t.join(30)
140
204
  end
141
205
  end
142
206
 
143
207
 
144
208
  describe "schema registry connection options" do
209
+ schema_registry = Manticore::Client.new
210
+ before (:all) do
211
+ shutdown_schema_registry
212
+ startup_schema_registry(schema_registry)
213
+ end
214
+
215
+ after(:all) do
216
+ shutdown_schema_registry
217
+ end
218
+
145
219
  context "remote endpoint validation" do
146
220
  it "should fail if not reachable" do
147
221
  config = {'schema_registry_url' => 'http://localnothost:8081'}
@@ -168,8 +242,7 @@ describe "schema registry connection options" do
168
242
  end
169
243
 
170
244
  after(:each) do
171
- schema_registry_client = Manticore::Client.new
172
- delete_remote_schema(schema_registry_client, SUBJECT_NAME)
245
+ delete_remote_schema(schema_registry, SUBJECT_NAME)
173
246
  end
174
247
 
175
248
  it "should correctly complete registration phase" do
@@ -200,9 +273,25 @@ end
200
273
 
201
274
  # AdminClientConfig = org.alpache.kafka.clients.admin.AdminClientConfig
202
275
 
276
+ def startup_schema_registry(schema_registry, auth=false)
277
+ system('./stop_schema_registry.sh')
278
+ auth ? system('./start_auth_schema_registry.sh') : system('./start_schema_registry.sh')
279
+ url = auth ? "http://barney:changeme@localhost:8081" : "http://localhost:8081"
280
+ Stud.try(20.times, [Manticore::SocketException, StandardError, RSpec::Expectations::ExpectationNotMetError]) do
281
+ expect(schema_registry.get(url).code).to eq(200)
282
+ end
283
+ end
284
+
203
285
  describe "Schema registry API", :integration => true do
286
+ schema_registry = Manticore::Client.new
287
+
288
+ before(:all) do
289
+ startup_schema_registry(schema_registry)
290
+ end
204
291
 
205
- let(:schema_registry) { Manticore::Client.new }
292
+ after(:all) do
293
+ shutdown_schema_registry
294
+ end
206
295
 
207
296
  context 'listing subject on clean instance' do
208
297
  it "should return an empty set" do
@@ -228,37 +317,58 @@ describe "Schema registry API", :integration => true do
228
317
  expect( subjects ).to be_empty
229
318
  end
230
319
  end
320
+ end
321
+
322
+ def shutdown_schema_registry
323
+ system('./stop_schema_registry.sh')
324
+ end
325
+
326
+ describe "Deserializing with the schema registry", :integration => true do
327
+ schema_registry = Manticore::Client.new
328
+
329
+ shared_examples 'it reads from a topic using a schema registry' do |with_auth|
330
+
331
+ before(:all) do
332
+ shutdown_schema_registry
333
+ startup_schema_registry(schema_registry, with_auth)
334
+ end
335
+
336
+ after(:all) do
337
+ shutdown_schema_registry
338
+ end
231
339
 
232
- context 'use the schema to serialize' do
233
340
  after(:each) do
234
- expect( schema_registry.delete('http://localhost:8081/subjects/topic_avro-value').code ).to be(200)
341
+ expect( schema_registry.delete("#{subject_url}/#{avro_topic_name}-value").code ).to be(200)
235
342
  sleep 1
236
- expect( schema_registry.delete('http://localhost:8081/subjects/topic_avro-value?permanent=true').code ).to be(200)
343
+ expect( schema_registry.delete("#{subject_url}/#{avro_topic_name}-value?permanent=true").code ).to be(200)
237
344
 
238
345
  Stud.try(3.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
239
346
  wait(10).for do
240
- subjects = JSON.parse schema_registry.get('http://localhost:8081/subjects').body
347
+ subjects = JSON.parse schema_registry.get(subject_url).body
241
348
  subjects.empty?
242
349
  end.to be_truthy
243
350
  end
244
351
  end
245
352
 
246
- let(:group_id_1) {rand(36**8).to_s(36)}
247
-
248
- let(:avro_topic_name) { "topic_avro" }
249
-
250
- let(:plain_config) do
251
- { 'schema_registry_url' => 'http://localhost:8081',
252
- 'topics' => [avro_topic_name],
253
- 'codec' => 'plain',
254
- 'group_id' => group_id_1,
255
- 'auto_offset_reset' => 'earliest' }
353
+ let(:base_config) do
354
+ {
355
+ 'topics' => [avro_topic_name],
356
+ 'codec' => 'plain',
357
+ 'group_id' => group_id_1,
358
+ 'auto_offset_reset' => 'earliest'
359
+ }
256
360
  end
257
361
 
258
- def delete_topic_if_exists(topic_name)
362
+ let(:group_id_1) {rand(36**8).to_s(36)}
363
+
364
+ def delete_topic_if_exists(topic_name, user = nil, password = nil)
259
365
  props = java.util.Properties.new
260
366
  props.put(Java::org.apache.kafka.clients.admin.AdminClientConfig::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
261
-
367
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
368
+ unless user.nil?
369
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
370
+ props.put(serdes_config::USER_INFO_CONFIG, "#{user}:#{password}")
371
+ end
262
372
  admin_client = org.apache.kafka.clients.admin.AdminClient.create(props)
263
373
  topics_list = admin_client.listTopics().names().get()
264
374
  if topics_list.contains(topic_name)
@@ -267,7 +377,7 @@ describe "Schema registry API", :integration => true do
267
377
  end
268
378
  end
269
379
 
270
- def write_some_data_to(topic_name)
380
+ def write_some_data_to(topic_name, user = nil, password = nil)
271
381
  props = java.util.Properties.new
272
382
  config = org.apache.kafka.clients.producer.ProducerConfig
273
383
 
@@ -275,6 +385,10 @@ describe "Schema registry API", :integration => true do
275
385
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081")
276
386
 
277
387
  props.put(config::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
388
+ unless user.nil?
389
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
390
+ props.put(serdes_config::USER_INFO_CONFIG, "#{user}:#{password}")
391
+ end
278
392
  props.put(config::KEY_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringSerializer.java_class)
279
393
  props.put(config::VALUE_SERIALIZER_CLASS_CONFIG, Java::io.confluent.kafka.serializers.KafkaAvroSerializer.java_class)
280
394
 
@@ -296,11 +410,11 @@ describe "Schema registry API", :integration => true do
296
410
  end
297
411
 
298
412
  it "stored a new schema using Avro Kafka serdes" do
299
- delete_topic_if_exists avro_topic_name
300
- write_some_data_to avro_topic_name
413
+ auth ? delete_topic_if_exists(avro_topic_name, user, password) : delete_topic_if_exists(avro_topic_name)
414
+ auth ? write_some_data_to(avro_topic_name, user, password) : write_some_data_to(avro_topic_name)
301
415
 
302
- subjects = JSON.parse schema_registry.get('http://localhost:8081/subjects').body
303
- expect( subjects ).to contain_exactly("topic_avro-value")
416
+ subjects = JSON.parse schema_registry.get(subject_url).body
417
+ expect( subjects ).to contain_exactly("#{avro_topic_name}-value")
304
418
 
305
419
  num_events = 1
306
420
  queue = consume_messages(plain_config, timeout: 30, event_count: num_events)
@@ -311,4 +425,43 @@ describe "Schema registry API", :integration => true do
311
425
  expect( elem.get("map_field")["inner_field"] ).to eq("inner value")
312
426
  end
313
427
  end
428
+
429
+ context 'with an unauthed schema registry' do
430
+ let(:auth) { false }
431
+ let(:avro_topic_name) { "topic_avro" }
432
+ let(:subject_url) { "http://localhost:8081/subjects" }
433
+ let(:plain_config) { base_config.merge!({'schema_registry_url' => "http://localhost:8081"}) }
434
+
435
+ it_behaves_like 'it reads from a topic using a schema registry', false
436
+ end
437
+
438
+ context 'with an authed schema registry' do
439
+ let(:auth) { true }
440
+ let(:user) { "barney" }
441
+ let(:password) { "changeme" }
442
+ let(:avro_topic_name) { "topic_avro_auth" }
443
+ let(:subject_url) { "http://#{user}:#{password}@localhost:8081/subjects" }
444
+
445
+ context 'using schema_registry_key' do
446
+ let(:plain_config) do
447
+ base_config.merge!({
448
+ 'schema_registry_url' => "http://localhost:8081",
449
+ 'schema_registry_key' => user,
450
+ 'schema_registry_secret' => password
451
+ })
452
+ end
453
+
454
+ it_behaves_like 'it reads from a topic using a schema registry', true
455
+ end
456
+
457
+ context 'using schema_registry_url' do
458
+ let(:plain_config) do
459
+ base_config.merge!({
460
+ 'schema_registry_url' => "http://#{user}:#{password}@localhost:8081"
461
+ })
462
+ end
463
+
464
+ it_behaves_like 'it reads from a topic using a schema registry', true
465
+ end
466
+ end
314
467
  end
@@ -44,7 +44,7 @@ describe "outputs/kafka", :integration => true do
44
44
  end
45
45
 
46
46
  context 'when outputting messages serialized as Byte Array' do
47
- let(:test_topic) { 'topic1b' }
47
+ let(:test_topic) { 'logstash_integration_topicbytearray' }
48
48
  let(:num_events) { 3 }
49
49
 
50
50
  before :each do
@@ -3,53 +3,210 @@ require "logstash/devutils/rspec/spec_helper"
3
3
  require "logstash/inputs/kafka"
4
4
  require "concurrent"
5
5
 
6
- class MockConsumer
7
- def initialize
8
- @wake = Concurrent::AtomicBoolean.new(false)
9
- end
10
6
 
11
- def subscribe(topics)
12
- end
13
-
14
- def poll(ms)
15
- if @wake.value
16
- raise org.apache.kafka.common.errors.WakeupException.new
17
- else
18
- 10.times.map do
19
- org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
7
+ describe LogStash::Inputs::Kafka do
8
+ let(:common_config) { { 'topics' => ['logstash'] } }
9
+ let(:config) { common_config }
10
+ let(:consumer_double) { double(:consumer) }
11
+ let(:needs_raise) { false }
12
+ let(:payload) {
13
+ 10.times.map do
14
+ org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
15
+ end
16
+ }
17
+ subject { LogStash::Inputs::Kafka.new(config) }
18
+
19
+ describe '#poll' do
20
+ before do
21
+ polled = false
22
+ allow(consumer_double).to receive(:poll) do
23
+ if polled
24
+ []
25
+ else
26
+ polled = true
27
+ payload
28
+ end
20
29
  end
21
30
  end
31
+
32
+ it 'should poll' do
33
+ expect(consumer_double).to receive(:poll)
34
+ expect(subject.do_poll(consumer_double)).to eq(payload)
35
+ end
36
+
37
+ it 'should return nil if Kafka Exception is encountered' do
38
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
39
+ expect(subject.do_poll(consumer_double)).to be_empty
40
+ end
41
+
42
+ it 'should not throw if Kafka Exception is encountered' do
43
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
44
+ expect{subject.do_poll(consumer_double)}.not_to raise_error
45
+ end
46
+
47
+ it 'should return no records if Assertion Error is encountered' do
48
+ expect(consumer_double).to receive(:poll).and_raise(java.lang.AssertionError.new(''))
49
+ expect{subject.do_poll(consumer_double)}.to raise_error(java.lang.AssertionError)
50
+ end
22
51
  end
23
52
 
24
- def close
53
+ describe '#maybe_commit_offset' do
54
+ context 'with auto commit disabled' do
55
+ let(:config) { common_config.merge('enable_auto_commit' => false) }
56
+
57
+ it 'should call commit on the consumer' do
58
+ expect(consumer_double).to receive(:commitSync)
59
+ subject.maybe_commit_offset(consumer_double)
60
+ end
61
+ it 'should not throw if a Kafka Exception is encountered' do
62
+ expect(consumer_double).to receive(:commitSync).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
63
+ expect{subject.maybe_commit_offset(consumer_double)}.not_to raise_error
64
+ end
65
+
66
+ it 'should throw if Assertion Error is encountered' do
67
+ expect(consumer_double).to receive(:commitSync).and_raise(java.lang.AssertionError.new(''))
68
+ expect{subject.maybe_commit_offset(consumer_double)}.to raise_error(java.lang.AssertionError)
69
+ end
70
+ end
71
+
72
+ context 'with auto commit enabled' do
73
+ let(:config) { common_config.merge('enable_auto_commit' => true) }
74
+
75
+ it 'should not call commit on the consumer' do
76
+ expect(consumer_double).not_to receive(:commitSync)
77
+ subject.maybe_commit_offset(consumer_double)
78
+ end
79
+ end
25
80
  end
26
81
 
27
- def wakeup
28
- @wake.make_true
82
+ describe '#register' do
83
+ it "should register" do
84
+ expect { subject.register }.to_not raise_error
85
+ end
29
86
  end
30
- end
31
87
 
32
- describe LogStash::Inputs::Kafka do
33
- let(:config) { { 'topics' => ['logstash'], 'consumer_threads' => 4 } }
34
- subject { LogStash::Inputs::Kafka.new(config) }
88
+ describe '#running' do
89
+ let(:q) { Queue.new }
90
+ let(:config) { common_config.merge('client_id' => 'test') }
91
+
92
+ before do
93
+ expect(subject).to receive(:create_consumer).once.and_return(consumer_double)
94
+ allow(consumer_double).to receive(:wakeup)
95
+ allow(consumer_double).to receive(:close)
96
+ allow(consumer_double).to receive(:subscribe)
97
+ end
35
98
 
36
- it "should register" do
37
- expect { subject.register }.to_not raise_error
99
+ context 'when running' do
100
+ before do
101
+ polled = false
102
+ allow(consumer_double).to receive(:poll) do
103
+ if polled
104
+ []
105
+ else
106
+ polled = true
107
+ payload
108
+ end
109
+ end
110
+
111
+ subject.register
112
+ t = Thread.new do
113
+ sleep(1)
114
+ subject.do_stop
115
+ end
116
+ subject.run(q)
117
+ t.join
118
+ end
119
+
120
+ it 'should process the correct number of events' do
121
+ expect(q.size).to eq(10)
122
+ end
123
+
124
+ it 'should set the consumer thread name' do
125
+ expect(subject.instance_variable_get('@runner_threads').first.get_name).to eq("kafka-input-worker-test-0")
126
+ end
127
+ end
128
+
129
+ context 'when errors are encountered during poll' do
130
+ before do
131
+ raised, polled = false
132
+ allow(consumer_double).to receive(:poll) do
133
+ unless raised
134
+ raised = true
135
+ raise exception
136
+ end
137
+ if polled
138
+ []
139
+ else
140
+ polled = true
141
+ payload
142
+ end
143
+ end
144
+
145
+ subject.register
146
+ t = Thread.new do
147
+ sleep 2
148
+ subject.do_stop
149
+ end
150
+ subject.run(q)
151
+ t.join
152
+ end
153
+
154
+ context "when a Kafka exception is raised" do
155
+ let(:exception) { org.apache.kafka.common.errors.TopicAuthorizationException.new('Invalid topic') }
156
+
157
+ it 'should poll successfully' do
158
+ expect(q.size).to eq(10)
159
+ end
160
+ end
161
+
162
+ context "when a StandardError is raised" do
163
+ let(:exception) { StandardError.new('Standard Error') }
164
+
165
+ it 'should retry and poll successfully' do
166
+ expect(q.size).to eq(10)
167
+ end
168
+ end
169
+
170
+ context "when a java error is raised" do
171
+ let(:exception) { java.lang.AssertionError.new('Fatal assertion') }
172
+
173
+ it "should not retry" do
174
+ expect(q.size).to eq(0)
175
+ end
176
+ end
177
+ end
38
178
  end
39
179
 
40
180
  context "register parameter verification" do
41
- let(:config) do
42
- { 'schema_registry_url' => 'http://localhost:8081', 'topics' => ['logstash'], 'consumer_threads' => 4 }
43
- end
181
+ context "schema_registry_url" do
182
+ let(:config) do
183
+ { 'schema_registry_url' => 'http://localhost:8081', 'topics' => ['logstash'], 'consumer_threads' => 4 }
184
+ end
44
185
 
45
- it "schema_registry_url conflict with value_deserializer_class should fail" do
46
- config['value_deserializer_class'] = 'my.fantasy.Deserializer'
47
- expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of value_deserializer_class/
186
+ it "conflict with value_deserializer_class should fail" do
187
+ config['value_deserializer_class'] = 'my.fantasy.Deserializer'
188
+ expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of value_deserializer_class/
189
+ end
190
+
191
+ it "conflict with topics_pattern should fail" do
192
+ config['topics_pattern'] = 'topic_.*'
193
+ expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of topics_pattern/
194
+ end
48
195
  end
49
196
 
50
- it "schema_registry_url conflict with topics_pattern should fail" do
51
- config['topics_pattern'] = 'topic_.*'
52
- expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of topics_pattern/
197
+ context "decorate_events" do
198
+ let(:config) { { 'decorate_events' => 'extended'} }
199
+
200
+ it "should raise error for invalid value" do
201
+ config['decorate_events'] = 'avoid'
202
+ expect { subject.register }.to raise_error LogStash::ConfigurationError, /Something is wrong with your configuration./
203
+ end
204
+
205
+ it "should map old true boolean value to :record_props mode" do
206
+ config['decorate_events'] = "true"
207
+ subject.register
208
+ expect(subject.metadata_mode).to include(:record_props)
209
+ end
53
210
  end
54
211
  end
55
212
 
@@ -8,6 +8,8 @@ describe "outputs/kafka" do
8
8
  let (:event) { LogStash::Event.new({'message' => 'hello', 'topic_name' => 'my_topic', 'host' => '172.0.0.1',
9
9
  '@timestamp' => LogStash::Timestamp.now}) }
10
10
 
11
+ let(:future) { double('kafka producer future') }
12
+
11
13
  context 'when initializing' do
12
14
  it "should register" do
13
15
  output = LogStash::Plugin.lookup("output", "kafka").new(simple_kafka_config)
@@ -24,8 +26,8 @@ describe "outputs/kafka" do
24
26
 
25
27
  context 'when outputting messages' do
26
28
  it 'should send logstash event to kafka broker' do
27
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
28
- .with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord)).and_call_original
29
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).
30
+ with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord))
29
31
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
30
32
  kafka.register
31
33
  kafka.multi_receive([event])
@@ -33,18 +35,18 @@ describe "outputs/kafka" do
33
35
 
34
36
  it 'should support Event#sprintf placeholders in topic_id' do
35
37
  topic_field = 'topic_name'
36
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
37
- .with("my_topic", event.to_s).and_call_original
38
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
38
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
39
+ with("my_topic", event.to_s).and_call_original
40
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
39
41
  kafka = LogStash::Outputs::Kafka.new({'topic_id' => "%{#{topic_field}}"})
40
42
  kafka.register
41
43
  kafka.multi_receive([event])
42
44
  end
43
45
 
44
46
  it 'should support field referenced message_keys' do
45
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
46
- .with("test", "172.0.0.1", event.to_s).and_call_original
47
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
47
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
48
+ with("test", "172.0.0.1", event.to_s).and_call_original
49
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
48
50
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"message_key" => "%{host}"}))
49
51
  kafka.register
50
52
  kafka.multi_receive([event])
@@ -71,22 +73,24 @@ describe "outputs/kafka" do
71
73
  before do
72
74
  count = 0
73
75
  expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
74
- .exactly(sendcount).times
75
- .and_wrap_original do |m, *args|
76
+ .exactly(sendcount).times do
76
77
  if count < failcount # fail 'failcount' times in a row.
77
78
  count += 1
78
79
  # Pick an exception at random
79
80
  raise exception_classes.shuffle.first.new("injected exception for testing")
80
81
  else
81
- m.call(*args) # call original
82
+ count = :done
83
+ future # return future
82
84
  end
83
85
  end
86
+ expect(future).to receive :get
84
87
  end
85
88
 
86
89
  it "should retry until successful" do
87
90
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
88
91
  kafka.register
89
92
  kafka.multi_receive([event])
93
+ sleep(1.0) # allow for future.get call
90
94
  end
91
95
  end
92
96
 
@@ -101,15 +105,13 @@ describe "outputs/kafka" do
101
105
 
102
106
  before do
103
107
  count = 0
104
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
105
- .exactly(1).times
106
- .and_wrap_original do |m, *args|
108
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(1).times do
107
109
  if count < failcount # fail 'failcount' times in a row.
108
110
  count += 1
109
111
  # Pick an exception at random
110
112
  raise exception_classes.shuffle.first.new("injected exception for testing")
111
113
  else
112
- m.call(*args) # call original
114
+ fail 'unexpected producer#send invocation'
113
115
  end
114
116
  end
115
117
  end
@@ -131,25 +133,24 @@ describe "outputs/kafka" do
131
133
 
132
134
  it "should retry until successful" do
133
135
  count = 0
134
-
135
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
136
- .exactly(sendcount).times
137
- .and_wrap_original do |m, *args|
136
+ success = nil
137
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(sendcount).times do
138
138
  if count < failcount
139
139
  count += 1
140
140
  # inject some failures.
141
141
 
142
142
  # Return a custom Future that will raise an exception to simulate a Kafka send() problem.
143
143
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
144
- future.run
145
- future
146
144
  else
147
- m.call(*args)
145
+ success = true
146
+ future = java.util.concurrent.FutureTask.new { nil } # return no-op future
148
147
  end
148
+ future.tap { Thread.start { future.run } }
149
149
  end
150
150
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
151
151
  kafka.register
152
152
  kafka.multi_receive([event])
153
+ expect( success ).to be true
153
154
  end
154
155
  end
155
156
 
@@ -158,9 +159,7 @@ describe "outputs/kafka" do
158
159
  let(:max_sends) { 1 }
159
160
 
160
161
  it "should should only send once" do
161
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
162
- .once
163
- .and_wrap_original do |m, *args|
162
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
164
163
  # Always fail.
165
164
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
166
165
  future.run
@@ -172,9 +171,7 @@ describe "outputs/kafka" do
172
171
  end
173
172
 
174
173
  it 'should not sleep' do
175
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
176
- .once
177
- .and_wrap_original do |m, *args|
174
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
178
175
  # Always fail.
179
176
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
180
177
  future.run
@@ -193,13 +190,10 @@ describe "outputs/kafka" do
193
190
  let(:max_sends) { retries + 1 }
194
191
 
195
192
  it "should give up after retries are exhausted" do
196
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
197
- .at_most(max_sends).times
198
- .and_wrap_original do |m, *args|
193
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
199
194
  # Always fail.
200
195
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
201
- future.run
202
- future
196
+ future.tap { Thread.start { future.run } }
203
197
  end
204
198
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
205
199
  kafka.register
@@ -207,9 +201,7 @@ describe "outputs/kafka" do
207
201
  end
208
202
 
209
203
  it 'should only sleep retries number of times' do
210
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
211
- .at_most(max_sends).times
212
- .and_wrap_original do |m, *args|
204
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
213
205
  # Always fail.
214
206
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
215
207
  future.run
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 10.7.2
4
+ version: 10.7.7
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-03-05 00:00:00.000000000 Z
11
+ date: 2021-07-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -126,6 +126,20 @@ dependencies:
126
126
  - - "<"
127
127
  - !ruby/object:Gem::Version
128
128
  version: 1.0.0
129
+ - !ruby/object:Gem::Dependency
130
+ requirement: !ruby/object:Gem::Requirement
131
+ requirements:
132
+ - - "~>"
133
+ - !ruby/object:Gem::Version
134
+ version: '1.0'
135
+ name: logstash-mixin-deprecation_logger_support
136
+ prerelease: false
137
+ type: :runtime
138
+ version_requirements: !ruby/object:Gem::Requirement
139
+ requirements:
140
+ - - "~>"
141
+ - !ruby/object:Gem::Version
142
+ version: '1.0'
129
143
  - !ruby/object:Gem::Dependency
130
144
  requirement: !ruby/object:Gem::Requirement
131
145
  requirements:
@@ -154,6 +168,20 @@ dependencies:
154
168
  - - ">="
155
169
  - !ruby/object:Gem::Version
156
170
  version: '0'
171
+ - !ruby/object:Gem::Dependency
172
+ requirement: !ruby/object:Gem::Requirement
173
+ requirements:
174
+ - - "~>"
175
+ - !ruby/object:Gem::Version
176
+ version: 0.5.1
177
+ name: digest-crc
178
+ prerelease: false
179
+ type: :development
180
+ version_requirements: !ruby/object:Gem::Requirement
181
+ requirements:
182
+ - - "~>"
183
+ - !ruby/object:Gem::Version
184
+ version: 0.5.1
157
185
  - !ruby/object:Gem::Dependency
158
186
  requirement: !ruby/object:Gem::Requirement
159
187
  requirements:
@@ -206,6 +234,9 @@ files:
206
234
  - lib/logstash/plugin_mixins/common.rb
207
235
  - lib/logstash/plugin_mixins/kafka_support.rb
208
236
  - logstash-integration-kafka.gemspec
237
+ - spec/check_docs_spec.rb
238
+ - spec/fixtures/jaas.config
239
+ - spec/fixtures/pwd
209
240
  - spec/fixtures/trust-store_stub.jks
210
241
  - spec/integration/inputs/kafka_spec.rb
211
242
  - spec/integration/outputs/kafka_spec.rb
@@ -255,6 +286,9 @@ signing_key:
255
286
  specification_version: 4
256
287
  summary: Integration with Kafka - input and output plugins
257
288
  test_files:
289
+ - spec/check_docs_spec.rb
290
+ - spec/fixtures/jaas.config
291
+ - spec/fixtures/pwd
258
292
  - spec/fixtures/trust-store_stub.jks
259
293
  - spec/integration/inputs/kafka_spec.rb
260
294
  - spec/integration/outputs/kafka_spec.rb