logstash-integration-kafka 10.7.1-java → 10.7.6-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d68bf1e5dada6ec2bea00b759962a1cf0db3b1555c38ffd2f5f4a84178a79d9e
4
- data.tar.gz: d41de9544b85313e5f064d68304be2044a77837e3a55778836cb0f04da085b76
3
+ metadata.gz: fc9c66beb8b3a0074d0fa5090dc8896eedd796428e4b7644d8d96e159c7bed2f
4
+ data.tar.gz: 780f7aeac690ee7a23e954e94c2359d8e9527bcd1464dde007ce7e55c4a023f8
5
5
  SHA512:
6
- metadata.gz: 7b25d38f7e6014889a03060e38f04bdfde157dcec9a7bb5567936cdae636c83448594cc53941980363b463521561ab27bc4d2bc1ad9c4d3f2a1f4f83cca11878
7
- data.tar.gz: 88244076005fa307881cf3b5108fb0c2f274b8e03f9238fda2f629799ab4751c9d1c7ab09148d67894191045e676686d479c1612ce112d597654d576b5652b0f
6
+ metadata.gz: db2e1a6eefc076887de8b42944fe5e0d4ddf025915faf55a4f150710772b08c170e0b4bd629871ca89f4b45fa36b572eb15191c284dec5ad62409c228343e9c5
7
+ data.tar.gz: b1ae9060e3cf83f6b7c6ca8d949ab2f78696b7a983c7154d8db01e2004875ece63f202ac5778095112adf9597b2d54cedac0ed79682b59bb0bd1b9002265fd36
data/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
+ ## 10.7.6
2
+ - Test: specify development dependency version [#91](https://github.com/logstash-plugins/logstash-integration-kafka/pull/91)
3
+
4
+ ## 10.7.5
5
+ - Improved error handling in the input plugin to avoid errors 'escaping' from the plugin, and crashing the logstash
6
+ process [#87](https://github.com/logstash-plugins/logstash-integration-kafka/pull/87)
7
+
8
+ ## 10.7.4
9
+ - Docs: make sure Kafka clients version is updated in docs [#83](https://github.com/logstash-plugins/logstash-integration-kafka/pull/83)
10
+ Since **10.6.0** Kafka client was updated to **2.5.1**
11
+
12
+ ## 10.7.3
13
+ - Changed `decorate_events` to add also Kafka headers [#78](https://github.com/logstash-plugins/logstash-integration-kafka/pull/78)
14
+
15
+ ## 10.7.2
16
+ - Update Jersey dependency to version 2.33 [#75](https://github.com/logstash-plugins/logstash-integration-kafka/pull/75)
17
+
1
18
  ## 10.7.1
2
- - Fix: dropped usage of SHUTDOWN event deprecated since Logstash 5.0 [#71](https://github.com/logstash-plugins/logstash-integration-kafka/issue/71)
19
+ - Fix: dropped usage of SHUTDOWN event deprecated since Logstash 5.0 [#71](https://github.com/logstash-plugins/logstash-integration-kafka/pull/71)
3
20
 
4
21
  ## 10.7.0
5
22
  - Switched use from Faraday to Manticore as HTTP client library to access Schema Registry service
data/docs/index.asciidoc CHANGED
@@ -1,7 +1,7 @@
1
1
  :plugin: kafka
2
2
  :type: integration
3
3
  :no_codec:
4
- :kafka_client: 2.4
4
+ :kafka_client: 2.5.1
5
5
 
6
6
  ///////////////////////////////////////////
7
7
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: input
4
4
  :default_codec: plain
5
- :kafka_client: 2.4
6
- :kafka_client_doc: 24
5
+ :kafka_client: 2.5
6
+ :kafka_client_doc: 25
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -73,7 +73,7 @@ either when the record was created (default) or when it was received by the
73
73
  broker. See more about property log.message.timestamp.type at
74
74
  https://kafka.apache.org/{kafka_client_doc}/documentation.html#brokerconfigs
75
75
 
76
- Metadata is only added to the event if the `decorate_events` option is set to true (it defaults to false).
76
+ Metadata is only added to the event if the `decorate_events` option is set to `basic` or `extended` (it defaults to `none`).
77
77
 
78
78
  Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be
79
79
  inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`.
@@ -99,7 +99,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
99
99
  | <<plugins-{type}s-{plugin}-client_rack>> |<<string,string>>|No
100
100
  | <<plugins-{type}s-{plugin}-connections_max_idle_ms>> |<<number,number>>|No
101
101
  | <<plugins-{type}s-{plugin}-consumer_threads>> |<<number,number>>|No
102
- | <<plugins-{type}s-{plugin}-decorate_events>> |<<boolean,boolean>>|No
102
+ | <<plugins-{type}s-{plugin}-decorate_events>> |<<string,string>>|No
103
103
  | <<plugins-{type}s-{plugin}-enable_auto_commit>> |<<boolean,boolean>>|No
104
104
  | <<plugins-{type}s-{plugin}-exclude_internal_topics>> |<<string,string>>|No
105
105
  | <<plugins-{type}s-{plugin}-fetch_max_bytes>> |<<number,number>>|No
@@ -246,10 +246,16 @@ balance — more threads than partitions means that some threads will be idl
246
246
  [id="plugins-{type}s-{plugin}-decorate_events"]
247
247
  ===== `decorate_events`
248
248
 
249
- * Value type is <<boolean,boolean>>
250
- * Default value is `false`
251
-
252
- Option to add Kafka metadata like topic, message size to the event.
249
+ * Value type is <<string,string>>
250
+ * Accepted values are:
251
+ - `none`: no metadata is added
252
+ - `basic`: record's attributes are added
253
+ - `extended`: record's attributes, headers are added
254
+ - `false`: deprecated alias for `none`
255
+ - `true`: deprecated alias for `basic`
256
+ * Default value is `none`
257
+
258
+ Option to add Kafka metadata like topic, message size and header key values to the event.
253
259
  This will add a field named `kafka` to the logstash event containing the following attributes:
254
260
 
255
261
  * `topic`: The topic this message is associated with
@@ -2,8 +2,8 @@
2
2
  :plugin: kafka
3
3
  :type: output
4
4
  :default_codec: plain
5
- :kafka_client: 2.4
6
- :kafka_client_doc: 24
5
+ :kafka_client: 2.5
6
+ :kafka_client_doc: 25
7
7
 
8
8
  ///////////////////////////////////////////
9
9
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -9,7 +9,7 @@ require_jar('io.confluent', 'kafka-schema-registry-client', '5.5.1')
9
9
  require_jar('org.apache.kafka', 'kafka_2.12', '2.5.1')
10
10
  require_jar('io.confluent', 'common-utils', '5.5.1')
11
11
  require_jar('javax.ws.rs', 'javax.ws.rs-api', '2.1.1')
12
- require_jar('org.glassfish.jersey.core', 'jersey-common', '2.30')
12
+ require_jar('org.glassfish.jersey.core', 'jersey-common', '2.33')
13
13
  require_jar('org.apache.kafka', 'kafka-clients', '2.5.1')
14
14
  require_jar('com.github.luben', 'zstd-jni', '1.4.4-7')
15
15
  require_jar('org.slf4j', 'slf4j-api', '1.7.30')
@@ -8,6 +8,7 @@ require 'manticore'
8
8
  require "json"
9
9
  require "logstash/json"
10
10
  require_relative '../plugin_mixins/common'
11
+ require 'logstash/plugin_mixins/deprecation_logger_support'
11
12
 
12
13
  # This input will read events from a Kafka topic. It uses the 0.10 version of
13
14
  # the consumer API provided by Kafka to read messages from the broker.
@@ -58,6 +59,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
58
59
 
59
60
  include LogStash::PluginMixins::KafkaSupport
60
61
  include ::LogStash::PluginMixins::KafkaAvroSchemaRegistry
62
+ include LogStash::PluginMixins::DeprecationLoggerSupport
61
63
 
62
64
  config_name 'kafka'
63
65
 
@@ -233,27 +235,57 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
233
235
  config :sasl_jaas_config, :validate => :string
234
236
  # Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html
235
237
  config :kerberos_config, :validate => :path
236
- # Option to add Kafka metadata like topic, message size to the event.
237
- # This will add a field named `kafka` to the logstash event containing the following attributes:
238
+ # Option to add Kafka metadata like topic, message size and header key values to the event.
239
+ # With `basic` this will add a field named `kafka` to the logstash event containing the following attributes:
238
240
  # `topic`: The topic this message is associated with
239
241
  # `consumer_group`: The consumer group used to read in this event
240
242
  # `partition`: The partition this message is associated with
241
243
  # `offset`: The offset from the partition this message is associated with
242
244
  # `key`: A ByteBuffer containing the message key
243
245
  # `timestamp`: The timestamp of this message
244
- config :decorate_events, :validate => :boolean, :default => false
246
+ # While with `extended` it adds also all the key values present in the Kafka header if the key is valid UTF-8 else
247
+ # silently skip it.
248
+ config :decorate_events, :validate => %w(none basic extended false true), :default => "none"
249
+
250
+ attr_reader :metadata_mode
245
251
 
246
252
  public
247
253
  def register
248
254
  @runner_threads = []
255
+ @metadata_mode = extract_metadata_level(@decorate_events)
256
+ @pattern ||= java.util.regex.Pattern.compile(@topics_pattern) unless @topics_pattern.nil?
249
257
  check_schema_registry_parameters
250
258
  end
251
259
 
260
+ METADATA_NONE = Set[].freeze
261
+ METADATA_BASIC = Set[:record_props].freeze
262
+ METADATA_EXTENDED = Set[:record_props, :headers].freeze
263
+ METADATA_DEPRECATION_MAP = { 'true' => 'basic', 'false' => 'none' }
264
+
265
+ private
266
+ def extract_metadata_level(decorate_events_setting)
267
+ metadata_enabled = decorate_events_setting
268
+
269
+ if METADATA_DEPRECATION_MAP.include?(metadata_enabled)
270
+ canonical_value = METADATA_DEPRECATION_MAP[metadata_enabled]
271
+ deprecation_logger.deprecated("Deprecated value `#{decorate_events_setting}` for `decorate_events` option; use `#{canonical_value}` instead.")
272
+ metadata_enabled = canonical_value
273
+ end
274
+
275
+ case metadata_enabled
276
+ when 'none' then METADATA_NONE
277
+ when 'basic' then METADATA_BASIC
278
+ when 'extended' then METADATA_EXTENDED
279
+ end
280
+ end
281
+
252
282
  public
253
283
  def run(logstash_queue)
254
- @runner_consumers = consumer_threads.times.map { |i| create_consumer("#{client_id}-#{i}") }
255
- @runner_threads = @runner_consumers.map { |consumer| thread_runner(logstash_queue, consumer) }
256
- @runner_threads.each { |t| t.join }
284
+ @runner_consumers = consumer_threads.times.map { |i| subscribe(create_consumer("#{client_id}-#{i}")) }
285
+ @runner_threads = @runner_consumers.map.with_index { |consumer, i| thread_runner(logstash_queue, consumer,
286
+ "kafka-input-worker-#{client_id}-#{i}") }
287
+ @runner_threads.each(&:start)
288
+ @runner_threads.each(&:join)
257
289
  end # def run
258
290
 
259
291
  public
@@ -267,53 +299,100 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
267
299
  @runner_consumers
268
300
  end
269
301
 
270
- private
271
- def thread_runner(logstash_queue, consumer)
272
- Thread.new do
302
+ def subscribe(consumer)
303
+ @pattern.nil? ? consumer.subscribe(topics) : consumer.subscribe(@pattern)
304
+ consumer
305
+ end
306
+
307
+ def thread_runner(logstash_queue, consumer, name)
308
+ java.lang.Thread.new do
309
+ LogStash::Util::set_thread_name(name)
273
310
  begin
274
- unless @topics_pattern.nil?
275
- nooplistener = org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener.new
276
- pattern = java.util.regex.Pattern.compile(@topics_pattern)
277
- consumer.subscribe(pattern, nooplistener)
278
- else
279
- consumer.subscribe(topics);
280
- end
281
311
  codec_instance = @codec.clone
282
- while !stop?
283
- records = consumer.poll(poll_timeout_ms)
284
- next unless records.count > 0
285
- for record in records do
286
- codec_instance.decode(record.value.to_s) do |event|
287
- decorate(event)
288
- if schema_registry_url
289
- json = LogStash::Json.load(record.value.to_s)
290
- json.each do |k, v|
291
- event.set(k, v)
292
- end
293
- event.remove("message")
294
- end
295
- if @decorate_events
296
- event.set("[@metadata][kafka][topic]", record.topic)
297
- event.set("[@metadata][kafka][consumer_group]", @group_id)
298
- event.set("[@metadata][kafka][partition]", record.partition)
299
- event.set("[@metadata][kafka][offset]", record.offset)
300
- event.set("[@metadata][kafka][key]", record.key)
301
- event.set("[@metadata][kafka][timestamp]", record.timestamp)
302
- end
303
- logstash_queue << event
304
- end
312
+ until stop?
313
+ records = do_poll(consumer)
314
+ unless records.empty?
315
+ records.each { |record| handle_record(record, codec_instance, logstash_queue) }
316
+ maybe_commit_offset(consumer)
305
317
  end
306
- # Manual offset commit
307
- consumer.commitSync if @enable_auto_commit.eql?(false)
308
318
  end
309
- rescue org.apache.kafka.common.errors.WakeupException => e
310
- raise e if !stop?
311
319
  ensure
312
320
  consumer.close
313
321
  end
314
322
  end
315
323
  end
316
324
 
325
+ def do_poll(consumer)
326
+ records = []
327
+ begin
328
+ records = consumer.poll(poll_timeout_ms)
329
+ rescue org.apache.kafka.common.errors.WakeupException => e
330
+ logger.debug("Wake up from poll", :kafka_error_message => e)
331
+ raise e unless stop?
332
+ rescue => e
333
+ logger.error("Unable to poll Kafka consumer",
334
+ :kafka_error_message => e,
335
+ :cause => e.respond_to?(:getCause) ? e.getCause : nil)
336
+ Stud.stoppable_sleep(1) { stop? }
337
+ end
338
+ records
339
+ end
340
+
341
+ def handle_record(record, codec_instance, queue)
342
+ codec_instance.decode(record.value.to_s) do |event|
343
+ decorate(event)
344
+ maybe_apply_schema(event, record)
345
+ maybe_set_metadata(event, record)
346
+ queue << event
347
+ end
348
+ end
349
+
350
+ def maybe_apply_schema(event, record)
351
+ if schema_registry_url
352
+ json = LogStash::Json.load(record.value.to_s)
353
+ json.each do |k, v|
354
+ event.set(k, v)
355
+ end
356
+ event.remove("message")
357
+ end
358
+ end
359
+
360
+ def maybe_set_metadata(event, record)
361
+ if @metadata_mode.include?(:record_props)
362
+ event.set("[@metadata][kafka][topic]", record.topic)
363
+ event.set("[@metadata][kafka][consumer_group]", @group_id)
364
+ event.set("[@metadata][kafka][partition]", record.partition)
365
+ event.set("[@metadata][kafka][offset]", record.offset)
366
+ event.set("[@metadata][kafka][key]", record.key)
367
+ event.set("[@metadata][kafka][timestamp]", record.timestamp)
368
+ end
369
+ if @metadata_mode.include?(:headers)
370
+ record.headers.each do |header|
371
+ s = String.from_java_bytes(header.value)
372
+ s.force_encoding(Encoding::UTF_8)
373
+ if s.valid_encoding?
374
+ event.set("[@metadata][kafka][headers][" + header.key + "]", s)
375
+ end
376
+ end
377
+ end
378
+ end
379
+
380
+ def maybe_commit_offset(consumer)
381
+ begin
382
+ consumer.commitSync if @enable_auto_commit.eql?(false)
383
+ rescue org.apache.kafka.common.errors.WakeupException => e
384
+ logger.debug("Wake up from commitSync", :kafka_error_message => e)
385
+ raise e unless stop?
386
+ rescue StandardError => e
387
+ # For transient errors, the commit should be successful after the next set of
388
+ # polled records has been processed.
389
+ # But, it might also be worth thinking about adding a configurable retry mechanism
390
+ logger.error("Unable to commit records",
391
+ :kafka_error_message => e,
392
+ :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
393
+ end
394
+ end
395
+
317
396
  private
318
397
  def create_consumer(client_id)
319
398
  begin
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '10.7.1'
3
+ s.version = '10.7.6'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -47,9 +47,11 @@ Gem::Specification.new do |s|
47
47
  s.add_runtime_dependency 'logstash-codec-plain'
48
48
  s.add_runtime_dependency 'stud', '>= 0.0.22', '< 0.1.0'
49
49
  s.add_runtime_dependency "manticore", '>= 0.5.4', '< 1.0.0'
50
+ s.add_runtime_dependency 'logstash-mixin-deprecation_logger_support', '~>1.0'
50
51
 
51
52
  s.add_development_dependency 'logstash-devutils'
52
53
  s.add_development_dependency 'rspec-wait'
53
- s.add_development_dependency 'ruby-kafka'
54
+ s.add_development_dependency 'digest-crc', '~> 0.5.1' # 0.6.0 started using a C-ext
55
+ s.add_development_dependency 'ruby-kafka' # depends on digest-crc
54
56
  s.add_development_dependency 'snappy'
55
57
  end
@@ -0,0 +1,36 @@
1
+ # encoding: utf-8
2
+ require 'logstash-integration-kafka_jars'
3
+
4
+ describe "[DOCS]" do
5
+
6
+ let(:docs_files) do
7
+ ['index.asciidoc', 'input-kafka.asciidoc', 'output-kafka.asciidoc'].map { |name| File.join('docs', name) }
8
+ end
9
+
10
+ let(:kafka_version_properties) do
11
+ loader = java.lang.Thread.currentThread.getContextClassLoader
12
+ version = loader.getResource('kafka/kafka-version.properties')
13
+ fail "kafka-version.properties missing" unless version
14
+ properties = java.util.Properties.new
15
+ properties.load version.openStream
16
+ properties
17
+ end
18
+
19
+ it 'is sync-ed with Kafka client version' do
20
+ version = kafka_version_properties.get('version') # e.g. '2.5.1'
21
+
22
+ fails = docs_files.map do |file|
23
+ if line = File.readlines(file).find { |line| line.index(':kafka_client:') }
24
+ puts "found #{line.inspect} in #{file}" if $VERBOSE # e.g. ":kafka_client: 2.5\n"
25
+ if !version.start_with?(line.strip.split[1])
26
+ "documentation at #{file} is out of sync with kafka-clients version (#{version.inspect}), detected line: #{line.inspect}"
27
+ else
28
+ nil
29
+ end
30
+ end
31
+ end
32
+
33
+ fail "\n" + fails.join("\n") if fails.flatten.any?
34
+ end
35
+
36
+ end
@@ -36,7 +36,15 @@ describe "inputs/kafka", :integration => true do
36
36
  end
37
37
  let(:decorate_config) do
38
38
  { 'topics' => ['logstash_integration_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_3,
39
- 'auto_offset_reset' => 'earliest', 'decorate_events' => true }
39
+ 'auto_offset_reset' => 'earliest', 'decorate_events' => 'true' }
40
+ end
41
+ let(:decorate_headers_config) do
42
+ { 'topics' => ['logstash_integration_topic_plain_with_headers'], 'codec' => 'plain', 'group_id' => group_id_3,
43
+ 'auto_offset_reset' => 'earliest', 'decorate_events' => 'extended' }
44
+ end
45
+ let(:decorate_bad_headers_config) do
46
+ { 'topics' => ['logstash_integration_topic_plain_with_headers_badly'], 'codec' => 'plain', 'group_id' => group_id_3,
47
+ 'auto_offset_reset' => 'earliest', 'decorate_events' => 'extended' }
40
48
  end
41
49
  let(:manual_commit_config) do
42
50
  { 'topics' => ['logstash_integration_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_5,
@@ -45,6 +53,35 @@ describe "inputs/kafka", :integration => true do
45
53
  let(:timeout_seconds) { 30 }
46
54
  let(:num_events) { 103 }
47
55
 
56
+ before(:all) do
57
+ # Prepare message with headers with valid UTF-8 chars
58
+ header = org.apache.kafka.common.header.internals.RecordHeader.new("name", "John ανδρεα €".to_java_bytes)
59
+ record = org.apache.kafka.clients.producer.ProducerRecord.new(
60
+ "logstash_integration_topic_plain_with_headers", 0, "key", "value", [header])
61
+ send_message(record)
62
+
63
+ # Prepare message with headers with invalid UTF-8 chars
64
+ invalid = "日本".encode('Shift_JIS').force_encoding(Encoding::UTF_8).to_java_bytes
65
+ header = org.apache.kafka.common.header.internals.RecordHeader.new("name", invalid)
66
+ record = org.apache.kafka.clients.producer.ProducerRecord.new(
67
+ "logstash_integration_topic_plain_with_headers_badly", 0, "key", "value", [header])
68
+
69
+ send_message(record)
70
+ end
71
+
72
+ def send_message(record)
73
+ props = java.util.Properties.new
74
+ kafka = org.apache.kafka.clients.producer.ProducerConfig
75
+ props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
76
+ props.put(kafka::KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
77
+ props.put(kafka::VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
78
+
79
+ producer = org.apache.kafka.clients.producer.KafkaProducer.new(props)
80
+
81
+ producer.send(record)
82
+ producer.close
83
+ end
84
+
48
85
  describe "#kafka-topics" do
49
86
 
50
87
  it "should consume all messages from plain 3-partition topic" do
@@ -74,7 +111,7 @@ describe "inputs/kafka", :integration => true do
74
111
 
75
112
  context "#kafka-topics-pattern" do
76
113
  it "should consume all messages from all 3 topics" do
77
- total_events = num_events * 3
114
+ total_events = num_events * 3 + 2
78
115
  queue = consume_messages(pattern_config, timeout: timeout_seconds, event_count: total_events)
79
116
  expect(queue.length).to eq(total_events)
80
117
  end
@@ -91,6 +128,31 @@ describe "inputs/kafka", :integration => true do
91
128
  expect(event.get("[@metadata][kafka][timestamp]")).to be >= start
92
129
  end
93
130
  end
131
+
132
+ it "should show the right topic and group name in and kafka headers decorated kafka section" do
133
+ start = LogStash::Timestamp.now.time.to_i
134
+ consume_messages(decorate_headers_config, timeout: timeout_seconds, event_count: 1) do |queue, _|
135
+ expect(queue.length).to eq(1)
136
+ event = queue.shift
137
+ expect(event.get("[@metadata][kafka][topic]")).to eq("logstash_integration_topic_plain_with_headers")
138
+ expect(event.get("[@metadata][kafka][consumer_group]")).to eq(group_id_3)
139
+ expect(event.get("[@metadata][kafka][timestamp]")).to be >= start
140
+ expect(event.get("[@metadata][kafka][headers][name]")).to eq("John ανδρεα €")
141
+ end
142
+ end
143
+
144
+ it "should skip headers not encoded in UTF-8" do
145
+ start = LogStash::Timestamp.now.time.to_i
146
+ consume_messages(decorate_bad_headers_config, timeout: timeout_seconds, event_count: 1) do |queue, _|
147
+ expect(queue.length).to eq(1)
148
+ event = queue.shift
149
+ expect(event.get("[@metadata][kafka][topic]")).to eq("logstash_integration_topic_plain_with_headers_badly")
150
+ expect(event.get("[@metadata][kafka][consumer_group]")).to eq(group_id_3)
151
+ expect(event.get("[@metadata][kafka][timestamp]")).to be >= start
152
+
153
+ expect(event.include?("[@metadata][kafka][headers][name]")).to eq(false)
154
+ end
155
+ end
94
156
  end
95
157
 
96
158
  context "#kafka-offset-commit" do
@@ -129,14 +191,16 @@ private
129
191
 
130
192
  def consume_messages(config, queue: Queue.new, timeout:, event_count:)
131
193
  kafka_input = LogStash::Inputs::Kafka.new(config)
194
+ kafka_input.register
132
195
  t = Thread.new { kafka_input.run(queue) }
133
196
  begin
134
197
  t.run
135
198
  wait(timeout).for { queue.length }.to eq(event_count) unless timeout.eql?(false)
136
199
  block_given? ? yield(queue, kafka_input) : queue
137
200
  ensure
201
+ kafka_input.do_stop
138
202
  t.kill
139
- t.join(30_000)
203
+ t.join(30)
140
204
  end
141
205
  end
142
206
 
@@ -44,7 +44,7 @@ describe "outputs/kafka", :integration => true do
44
44
  end
45
45
 
46
46
  context 'when outputting messages serialized as Byte Array' do
47
- let(:test_topic) { 'topic1b' }
47
+ let(:test_topic) { 'logstash_integration_topicbytearray' }
48
48
  let(:num_events) { 3 }
49
49
 
50
50
  before :each do
@@ -3,58 +3,215 @@ require "logstash/devutils/rspec/spec_helper"
3
3
  require "logstash/inputs/kafka"
4
4
  require "concurrent"
5
5
 
6
- class MockConsumer
7
- def initialize
8
- @wake = Concurrent::AtomicBoolean.new(false)
9
- end
10
6
 
11
- def subscribe(topics)
12
- end
13
-
14
- def poll(ms)
15
- if @wake.value
16
- raise org.apache.kafka.common.errors.WakeupException.new
17
- else
18
- 10.times.map do
19
- org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
7
+ describe LogStash::Inputs::Kafka do
8
+ let(:common_config) { { 'topics' => ['logstash'] } }
9
+ let(:config) { common_config }
10
+ let(:consumer_double) { double(:consumer) }
11
+ let(:needs_raise) { false }
12
+ let(:payload) {
13
+ 10.times.map do
14
+ org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
15
+ end
16
+ }
17
+ subject { LogStash::Inputs::Kafka.new(config) }
18
+
19
+ describe '#poll' do
20
+ before do
21
+ polled = false
22
+ allow(consumer_double).to receive(:poll) do
23
+ if polled
24
+ []
25
+ else
26
+ polled = true
27
+ payload
28
+ end
20
29
  end
21
30
  end
31
+
32
+ it 'should poll' do
33
+ expect(consumer_double).to receive(:poll)
34
+ expect(subject.do_poll(consumer_double)).to eq(payload)
35
+ end
36
+
37
+ it 'should return nil if Kafka Exception is encountered' do
38
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
39
+ expect(subject.do_poll(consumer_double)).to be_empty
40
+ end
41
+
42
+ it 'should not throw if Kafka Exception is encountered' do
43
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
44
+ expect{subject.do_poll(consumer_double)}.not_to raise_error
45
+ end
46
+
47
+ it 'should return no records if Assertion Error is encountered' do
48
+ expect(consumer_double).to receive(:poll).and_raise(java.lang.AssertionError.new(''))
49
+ expect{subject.do_poll(consumer_double)}.to raise_error(java.lang.AssertionError)
50
+ end
22
51
  end
23
52
 
24
- def close
53
+ describe '#maybe_commit_offset' do
54
+ context 'with auto commit disabled' do
55
+ let(:config) { common_config.merge('enable_auto_commit' => false) }
56
+
57
+ it 'should call commit on the consumer' do
58
+ expect(consumer_double).to receive(:commitSync)
59
+ subject.maybe_commit_offset(consumer_double)
60
+ end
61
+ it 'should not throw if a Kafka Exception is encountered' do
62
+ expect(consumer_double).to receive(:commitSync).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
63
+ expect{subject.maybe_commit_offset(consumer_double)}.not_to raise_error
64
+ end
65
+
66
+ it 'should throw if Assertion Error is encountered' do
67
+ expect(consumer_double).to receive(:commitSync).and_raise(java.lang.AssertionError.new(''))
68
+ expect{subject.maybe_commit_offset(consumer_double)}.to raise_error(java.lang.AssertionError)
69
+ end
70
+ end
71
+
72
+ context 'with auto commit enabled' do
73
+ let(:config) { common_config.merge('enable_auto_commit' => true) }
74
+
75
+ it 'should not call commit on the consumer' do
76
+ expect(consumer_double).not_to receive(:commitSync)
77
+ subject.maybe_commit_offset(consumer_double)
78
+ end
79
+ end
25
80
  end
26
81
 
27
- def wakeup
28
- @wake.make_true
82
+ describe '#register' do
83
+ it "should register" do
84
+ expect { subject.register }.to_not raise_error
85
+ end
29
86
  end
30
- end
31
87
 
32
- describe LogStash::Inputs::Kafka do
33
- let(:config) { { 'topics' => ['logstash'], 'consumer_threads' => 4 } }
34
- subject { LogStash::Inputs::Kafka.new(config) }
88
+ describe '#running' do
89
+ let(:q) { Queue.new }
90
+ let(:config) { common_config.merge('client_id' => 'test') }
91
+
92
+ before do
93
+ expect(subject).to receive(:create_consumer).once.and_return(consumer_double)
94
+ allow(consumer_double).to receive(:wakeup)
95
+ allow(consumer_double).to receive(:close)
96
+ allow(consumer_double).to receive(:subscribe)
97
+ end
35
98
 
36
- it "should register" do
37
- expect { subject.register }.to_not raise_error
99
+ context 'when running' do
100
+ before do
101
+ polled = false
102
+ allow(consumer_double).to receive(:poll) do
103
+ if polled
104
+ []
105
+ else
106
+ polled = true
107
+ payload
108
+ end
109
+ end
110
+
111
+ subject.register
112
+ t = Thread.new do
113
+ sleep(1)
114
+ subject.do_stop
115
+ end
116
+ subject.run(q)
117
+ t.join
118
+ end
119
+
120
+ it 'should process the correct number of events' do
121
+ expect(q.size).to eq(10)
122
+ end
123
+
124
+ it 'should set the consumer thread name' do
125
+ expect(subject.instance_variable_get('@runner_threads').first.get_name).to eq("kafka-input-worker-test-0")
126
+ end
127
+ end
128
+
129
+ context 'when errors are encountered during poll' do
130
+ before do
131
+ raised, polled = false
132
+ allow(consumer_double).to receive(:poll) do
133
+ unless raised
134
+ raised = true
135
+ raise exception
136
+ end
137
+ if polled
138
+ []
139
+ else
140
+ polled = true
141
+ payload
142
+ end
143
+ end
144
+
145
+ subject.register
146
+ t = Thread.new do
147
+ sleep 2
148
+ subject.do_stop
149
+ end
150
+ subject.run(q)
151
+ t.join
152
+ end
153
+
154
+ context "when a Kafka exception is raised" do
155
+ let(:exception) { org.apache.kafka.common.errors.TopicAuthorizationException.new('Invalid topic') }
156
+
157
+ it 'should poll successfully' do
158
+ expect(q.size).to eq(10)
159
+ end
160
+ end
161
+
162
+ context "when a StandardError is raised" do
163
+ let(:exception) { StandardError.new('Standard Error') }
164
+
165
+ it 'should retry and poll successfully' do
166
+ expect(q.size).to eq(10)
167
+ end
168
+ end
169
+
170
+ context "when a java error is raised" do
171
+ let(:exception) { java.lang.AssertionError.new('Fatal assertion') }
172
+
173
+ it "should not retry" do
174
+ expect(q.size).to eq(0)
175
+ end
176
+ end
177
+ end
38
178
  end
39
179
 
40
180
  context "register parameter verification" do
41
- let(:config) do
42
- { 'schema_registry_url' => 'http://localhost:8081', 'topics' => ['logstash'], 'consumer_threads' => 4 }
43
- end
181
+ context "schema_registry_url" do
182
+ let(:config) do
183
+ { 'schema_registry_url' => 'http://localhost:8081', 'topics' => ['logstash'], 'consumer_threads' => 4 }
184
+ end
44
185
 
45
- it "schema_registry_url conflict with value_deserializer_class should fail" do
46
- config['value_deserializer_class'] = 'my.fantasy.Deserializer'
47
- expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of value_deserializer_class/
186
+ it "conflict with value_deserializer_class should fail" do
187
+ config['value_deserializer_class'] = 'my.fantasy.Deserializer'
188
+ expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of value_deserializer_class/
189
+ end
190
+
191
+ it "conflict with topics_pattern should fail" do
192
+ config['topics_pattern'] = 'topic_.*'
193
+ expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of topics_pattern/
194
+ end
48
195
  end
49
196
 
50
- it "schema_registry_url conflict with topics_pattern should fail" do
51
- config['topics_pattern'] = 'topic_.*'
52
- expect { subject.register }.to raise_error LogStash::ConfigurationError, /Option schema_registry_url prohibit the customization of topics_pattern/
197
+ context "decorate_events" do
198
+ let(:config) { { 'decorate_events' => 'extended'} }
199
+
200
+ it "should raise error for invalid value" do
201
+ config['decorate_events'] = 'avoid'
202
+ expect { subject.register }.to raise_error LogStash::ConfigurationError, /Something is wrong with your configuration./
203
+ end
204
+
205
+ it "should map old true boolean value to :record_props mode" do
206
+ config['decorate_events'] = "true"
207
+ subject.register
208
+ expect(subject.metadata_mode).to include(:record_props)
209
+ end
53
210
  end
54
211
  end
55
212
 
56
213
  context 'with client_rack' do
57
- let(:config) { super.merge('client_rack' => 'EU-R1') }
214
+ let(:config) { super().merge('client_rack' => 'EU-R1') }
58
215
 
59
216
  it "sets broker rack parameter" do
60
217
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
@@ -66,7 +223,7 @@ describe LogStash::Inputs::Kafka do
66
223
  end
67
224
 
68
225
  context 'string integer config' do
69
- let(:config) { super.merge('session_timeout_ms' => '25000', 'max_poll_interval_ms' => '345000') }
226
+ let(:config) { super().merge('session_timeout_ms' => '25000', 'max_poll_interval_ms' => '345000') }
70
227
 
71
228
  it "sets integer values" do
72
229
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
@@ -78,7 +235,7 @@ describe LogStash::Inputs::Kafka do
78
235
  end
79
236
 
80
237
  context 'integer config' do
81
- let(:config) { super.merge('session_timeout_ms' => 25200, 'max_poll_interval_ms' => 123_000) }
238
+ let(:config) { super().merge('session_timeout_ms' => 25200, 'max_poll_interval_ms' => 123_000) }
82
239
 
83
240
  it "sets integer values" do
84
241
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
@@ -90,7 +247,7 @@ describe LogStash::Inputs::Kafka do
90
247
  end
91
248
 
92
249
  context 'string boolean config' do
93
- let(:config) { super.merge('enable_auto_commit' => 'false', 'check_crcs' => 'true') }
250
+ let(:config) { super().merge('enable_auto_commit' => 'false', 'check_crcs' => 'true') }
94
251
 
95
252
  it "sets parameters" do
96
253
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
@@ -103,7 +260,7 @@ describe LogStash::Inputs::Kafka do
103
260
  end
104
261
 
105
262
  context 'boolean config' do
106
- let(:config) { super.merge('enable_auto_commit' => true, 'check_crcs' => false) }
263
+ let(:config) { super().merge('enable_auto_commit' => true, 'check_crcs' => false) }
107
264
 
108
265
  it "sets parameters" do
109
266
  expect(org.apache.kafka.clients.consumer.KafkaConsumer).
@@ -8,6 +8,8 @@ describe "outputs/kafka" do
8
8
  let (:event) { LogStash::Event.new({'message' => 'hello', 'topic_name' => 'my_topic', 'host' => '172.0.0.1',
9
9
  '@timestamp' => LogStash::Timestamp.now}) }
10
10
 
11
+ let(:future) { double('kafka producer future') }
12
+
11
13
  context 'when initializing' do
12
14
  it "should register" do
13
15
  output = LogStash::Plugin.lookup("output", "kafka").new(simple_kafka_config)
@@ -24,8 +26,8 @@ describe "outputs/kafka" do
24
26
 
25
27
  context 'when outputting messages' do
26
28
  it 'should send logstash event to kafka broker' do
27
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
28
- .with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord)).and_call_original
29
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).
30
+ with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord))
29
31
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
30
32
  kafka.register
31
33
  kafka.multi_receive([event])
@@ -33,18 +35,18 @@ describe "outputs/kafka" do
33
35
 
34
36
  it 'should support Event#sprintf placeholders in topic_id' do
35
37
  topic_field = 'topic_name'
36
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
37
- .with("my_topic", event.to_s).and_call_original
38
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
38
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
39
+ with("my_topic", event.to_s).and_call_original
40
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
39
41
  kafka = LogStash::Outputs::Kafka.new({'topic_id' => "%{#{topic_field}}"})
40
42
  kafka.register
41
43
  kafka.multi_receive([event])
42
44
  end
43
45
 
44
46
  it 'should support field referenced message_keys' do
45
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
46
- .with("test", "172.0.0.1", event.to_s).and_call_original
47
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
47
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
48
+ with("test", "172.0.0.1", event.to_s).and_call_original
49
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
48
50
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"message_key" => "%{host}"}))
49
51
  kafka.register
50
52
  kafka.multi_receive([event])
@@ -71,22 +73,24 @@ describe "outputs/kafka" do
71
73
  before do
72
74
  count = 0
73
75
  expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
74
- .exactly(sendcount).times
75
- .and_wrap_original do |m, *args|
76
+ .exactly(sendcount).times do
76
77
  if count < failcount # fail 'failcount' times in a row.
77
78
  count += 1
78
79
  # Pick an exception at random
79
80
  raise exception_classes.shuffle.first.new("injected exception for testing")
80
81
  else
81
- m.call(*args) # call original
82
+ count = :done
83
+ future # return future
82
84
  end
83
85
  end
86
+ expect(future).to receive :get
84
87
  end
85
88
 
86
89
  it "should retry until successful" do
87
90
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
88
91
  kafka.register
89
92
  kafka.multi_receive([event])
93
+ sleep(1.0) # allow for future.get call
90
94
  end
91
95
  end
92
96
 
@@ -101,15 +105,13 @@ describe "outputs/kafka" do
101
105
 
102
106
  before do
103
107
  count = 0
104
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
105
- .exactly(1).times
106
- .and_wrap_original do |m, *args|
108
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(1).times do
107
109
  if count < failcount # fail 'failcount' times in a row.
108
110
  count += 1
109
111
  # Pick an exception at random
110
112
  raise exception_classes.shuffle.first.new("injected exception for testing")
111
113
  else
112
- m.call(*args) # call original
114
+ fail 'unexpected producer#send invocation'
113
115
  end
114
116
  end
115
117
  end
@@ -131,25 +133,24 @@ describe "outputs/kafka" do
131
133
 
132
134
  it "should retry until successful" do
133
135
  count = 0
134
-
135
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
136
- .exactly(sendcount).times
137
- .and_wrap_original do |m, *args|
136
+ success = nil
137
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(sendcount).times do
138
138
  if count < failcount
139
139
  count += 1
140
140
  # inject some failures.
141
141
 
142
142
  # Return a custom Future that will raise an exception to simulate a Kafka send() problem.
143
143
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
144
- future.run
145
- future
146
144
  else
147
- m.call(*args)
145
+ success = true
146
+ future = java.util.concurrent.FutureTask.new { nil } # return no-op future
148
147
  end
148
+ future.tap { Thread.start { future.run } }
149
149
  end
150
150
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
151
151
  kafka.register
152
152
  kafka.multi_receive([event])
153
+ expect( success ).to be true
153
154
  end
154
155
  end
155
156
 
@@ -158,9 +159,7 @@ describe "outputs/kafka" do
158
159
  let(:max_sends) { 1 }
159
160
 
160
161
  it "should should only send once" do
161
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
162
- .once
163
- .and_wrap_original do |m, *args|
162
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
164
163
  # Always fail.
165
164
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
166
165
  future.run
@@ -172,9 +171,7 @@ describe "outputs/kafka" do
172
171
  end
173
172
 
174
173
  it 'should not sleep' do
175
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
176
- .once
177
- .and_wrap_original do |m, *args|
174
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
178
175
  # Always fail.
179
176
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
180
177
  future.run
@@ -193,13 +190,10 @@ describe "outputs/kafka" do
193
190
  let(:max_sends) { retries + 1 }
194
191
 
195
192
  it "should give up after retries are exhausted" do
196
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
197
- .at_most(max_sends).times
198
- .and_wrap_original do |m, *args|
193
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
199
194
  # Always fail.
200
195
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
201
- future.run
202
- future
196
+ future.tap { Thread.start { future.run } }
203
197
  end
204
198
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
205
199
  kafka.register
@@ -207,9 +201,7 @@ describe "outputs/kafka" do
207
201
  end
208
202
 
209
203
  it 'should only sleep retries number of times' do
210
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
211
- .at_most(max_sends).times
212
- .and_wrap_original do |m, *args|
204
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
213
205
  # Always fail.
214
206
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
215
207
  future.run
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 10.7.1
4
+ version: 10.7.6
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-02-11 00:00:00.000000000 Z
11
+ date: 2021-05-31 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -126,6 +126,20 @@ dependencies:
126
126
  - - "<"
127
127
  - !ruby/object:Gem::Version
128
128
  version: 1.0.0
129
+ - !ruby/object:Gem::Dependency
130
+ requirement: !ruby/object:Gem::Requirement
131
+ requirements:
132
+ - - "~>"
133
+ - !ruby/object:Gem::Version
134
+ version: '1.0'
135
+ name: logstash-mixin-deprecation_logger_support
136
+ prerelease: false
137
+ type: :runtime
138
+ version_requirements: !ruby/object:Gem::Requirement
139
+ requirements:
140
+ - - "~>"
141
+ - !ruby/object:Gem::Version
142
+ version: '1.0'
129
143
  - !ruby/object:Gem::Dependency
130
144
  requirement: !ruby/object:Gem::Requirement
131
145
  requirements:
@@ -154,6 +168,20 @@ dependencies:
154
168
  - - ">="
155
169
  - !ruby/object:Gem::Version
156
170
  version: '0'
171
+ - !ruby/object:Gem::Dependency
172
+ requirement: !ruby/object:Gem::Requirement
173
+ requirements:
174
+ - - "~>"
175
+ - !ruby/object:Gem::Version
176
+ version: 0.5.1
177
+ name: digest-crc
178
+ prerelease: false
179
+ type: :development
180
+ version_requirements: !ruby/object:Gem::Requirement
181
+ requirements:
182
+ - - "~>"
183
+ - !ruby/object:Gem::Version
184
+ version: 0.5.1
157
185
  - !ruby/object:Gem::Dependency
158
186
  requirement: !ruby/object:Gem::Requirement
159
187
  requirements:
@@ -206,6 +234,7 @@ files:
206
234
  - lib/logstash/plugin_mixins/common.rb
207
235
  - lib/logstash/plugin_mixins/kafka_support.rb
208
236
  - logstash-integration-kafka.gemspec
237
+ - spec/check_docs_spec.rb
209
238
  - spec/fixtures/trust-store_stub.jks
210
239
  - spec/integration/inputs/kafka_spec.rb
211
240
  - spec/integration/outputs/kafka_spec.rb
@@ -222,7 +251,7 @@ files:
222
251
  - vendor/jar-dependencies/org/apache/avro/avro/1.9.2/avro-1.9.2.jar
223
252
  - vendor/jar-dependencies/org/apache/kafka/kafka-clients/2.5.1/kafka-clients-2.5.1.jar
224
253
  - vendor/jar-dependencies/org/apache/kafka/kafka_2.12/2.5.1/kafka_2.12-2.5.1.jar
225
- - vendor/jar-dependencies/org/glassfish/jersey/core/jersey-common/2.30/jersey-common-2.30.jar
254
+ - vendor/jar-dependencies/org/glassfish/jersey/core/jersey-common/2.33/jersey-common-2.33.jar
226
255
  - vendor/jar-dependencies/org/lz4/lz4-java/1.7.1/lz4-java-1.7.1.jar
227
256
  - vendor/jar-dependencies/org/slf4j/slf4j-api/1.7.30/slf4j-api-1.7.30.jar
228
257
  - vendor/jar-dependencies/org/xerial/snappy/snappy-java/1.1.7.3/snappy-java-1.1.7.3.jar
@@ -255,6 +284,7 @@ signing_key:
255
284
  specification_version: 4
256
285
  summary: Integration with Kafka - input and output plugins
257
286
  test_files:
287
+ - spec/check_docs_spec.rb
258
288
  - spec/fixtures/trust-store_stub.jks
259
289
  - spec/integration/inputs/kafka_spec.rb
260
290
  - spec/integration/outputs/kafka_spec.rb