logstash-integration-kafka 10.7.4-java → 10.8.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: efc6c33cf871ecd41fc07468d3d6e47dc1a71c4dbd1800fe99127da703547dd2
4
- data.tar.gz: 644f506705807c95e15fc035aac7f5d57233dd5809ae37f2c71939324ab9c3e7
3
+ metadata.gz: a322e757007ef9c740f48a93ebe97aa50511a29b17450219ea4f2d08a49934e1
4
+ data.tar.gz: 111a1d72591d14876f33dba65cfc29e6c9ae397397f295e3a94e5078b413eb3a
5
5
  SHA512:
6
- metadata.gz: 28341e37050a860c8e87d0b74a4da4d1fdd37eff3e9c95bd5596dff4d9a613149f4dd09052a949f6002fb6da835601df12ca6d37d03ea25819ff6517833da37c
7
- data.tar.gz: eb196288b02dd30b92bf3a4e083be0b0d8f005307203e20e8892262d46e48041e4d4212182be685ecae668970530c7050c7132f4c2aaa3be50f3f00e46a04122
6
+ metadata.gz: bac61cfb956bb284df97746df68afc90b12b1d04186eba0cf27fc68886858dad965aefcf46566dc8e27e8a484b524c7ce8d3185d24d9412798e06defc9f78ab8
7
+ data.tar.gz: f706aaad143f31fdd31ea47c5d1604f490f505f08329ecd07dff75201c51d1983701e79e2f74a52f484c860eb026a26880447e3b89a985dffeb7367728cab1ef
data/CHANGELOG.md CHANGED
@@ -1,3 +1,18 @@
1
+ ## 10.8.0
2
+ - Added config setting to enable schema registry validation to be skipped when an authentication scheme unsupported
3
+ by the validator is used [#97](https://github.com/logstash-plugins/logstash-integration-kafka/pull/97)
4
+
5
+ ## 10.7.7
6
+ - Fix: Correct the settings to allow basic auth to work properly, either by setting `schema_registry_key/secret` or embedding username/password in the
7
+ url [#94](https://github.com/logstash-plugins/logstash-integration-kafka/pull/94)
8
+
9
+ ## 10.7.6
10
+ - Test: specify development dependency version [#91](https://github.com/logstash-plugins/logstash-integration-kafka/pull/91)
11
+
12
+ ## 10.7.5
13
+ - Improved error handling in the input plugin to avoid errors 'escaping' from the plugin, and crashing the logstash
14
+ process [#87](https://github.com/logstash-plugins/logstash-integration-kafka/pull/87)
15
+
1
16
  ## 10.7.4
2
17
  - Docs: make sure Kafka clients version is updated in docs [#83](https://github.com/logstash-plugins/logstash-integration-kafka/pull/83)
3
18
  Since **10.6.0** Kafka client was updated to **2.5.1**
@@ -128,6 +128,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
128
128
  | <<plugins-{type}s-{plugin}-schema_registry_proxy>> |<<uri,uri>>|No
129
129
  | <<plugins-{type}s-{plugin}-schema_registry_secret>> |<<string,string>>|No
130
130
  | <<plugins-{type}s-{plugin}-schema_registry_url>> |<<uri,uri>>|No
131
+ | <<plugins-{type}s-{plugin}-schema_registry_validation>> |<<string,string>>|No
131
132
  | <<plugins-{type}s-{plugin}-security_protocol>> |<<string,string>>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No
132
133
  | <<plugins-{type}s-{plugin}-send_buffer_bytes>> |<<number,number>>|No
133
134
  | <<plugins-{type}s-{plugin}-session_timeout_ms>> |<<number,number>>|No
@@ -576,6 +577,18 @@ The schemas must follow a naming convention with the pattern <topic name>-value.
576
577
  Use either the Schema Registry config option or the
577
578
  <<plugins-{type}s-{plugin}-value_deserializer_class>> config option, but not both.
578
579
 
580
+ [id="plugins-{type}s-{plugin}-schema_registry_validation"]
581
+ ===== `schema_registry_validation`
582
+
583
+ * Value can be either of: `auto`, `skip`
584
+ * Default value is `"auto"`
585
+
586
+ NOTE: Under most circumstances, the default setting of `auto` should not need to be changed.
587
+
588
+ When using the schema registry, by default the plugin checks connectivity and validates the schema registry, during plugin registration, before events are processed.
589
+ In some circumstances, this process may fail when it tries to validate an authenticated schema registry, causing the plugin to crash.
590
+ This setting allows the plugin to skip validation during registration, which allows the plugin to continue and events to be processed. Note that an incorrectly configured schema registry will still stop the plugin from processing events.
591
+
579
592
  [id="plugins-{type}s-{plugin}-security_protocol"]
580
593
  ===== `security_protocol`
581
594
 
@@ -253,6 +253,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
253
253
  def register
254
254
  @runner_threads = []
255
255
  @metadata_mode = extract_metadata_level(@decorate_events)
256
+ @pattern ||= java.util.regex.Pattern.compile(@topics_pattern) unless @topics_pattern.nil?
256
257
  check_schema_registry_parameters
257
258
  end
258
259
 
@@ -280,9 +281,11 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
280
281
 
281
282
  public
282
283
  def run(logstash_queue)
283
- @runner_consumers = consumer_threads.times.map { |i| create_consumer("#{client_id}-#{i}") }
284
- @runner_threads = @runner_consumers.map { |consumer| thread_runner(logstash_queue, consumer) }
285
- @runner_threads.each { |t| t.join }
284
+ @runner_consumers = consumer_threads.times.map { |i| subscribe(create_consumer("#{client_id}-#{i}")) }
285
+ @runner_threads = @runner_consumers.map.with_index { |consumer, i| thread_runner(logstash_queue, consumer,
286
+ "kafka-input-worker-#{client_id}-#{i}") }
287
+ @runner_threads.each(&:start)
288
+ @runner_threads.each(&:join)
286
289
  end # def run
287
290
 
288
291
  public
@@ -296,62 +299,100 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
296
299
  @runner_consumers
297
300
  end
298
301
 
299
- private
300
- def thread_runner(logstash_queue, consumer)
301
- Thread.new do
302
+ def subscribe(consumer)
303
+ @pattern.nil? ? consumer.subscribe(topics) : consumer.subscribe(@pattern)
304
+ consumer
305
+ end
306
+
307
+ def thread_runner(logstash_queue, consumer, name)
308
+ java.lang.Thread.new do
309
+ LogStash::Util::set_thread_name(name)
302
310
  begin
303
- unless @topics_pattern.nil?
304
- nooplistener = org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener.new
305
- pattern = java.util.regex.Pattern.compile(@topics_pattern)
306
- consumer.subscribe(pattern, nooplistener)
307
- else
308
- consumer.subscribe(topics);
309
- end
310
311
  codec_instance = @codec.clone
311
- while !stop?
312
- records = consumer.poll(poll_timeout_ms)
313
- next unless records.count > 0
314
- for record in records do
315
- codec_instance.decode(record.value.to_s) do |event|
316
- decorate(event)
317
- if schema_registry_url
318
- json = LogStash::Json.load(record.value.to_s)
319
- json.each do |k, v|
320
- event.set(k, v)
321
- end
322
- event.remove("message")
323
- end
324
- if @metadata_mode.include?(:record_props)
325
- event.set("[@metadata][kafka][topic]", record.topic)
326
- event.set("[@metadata][kafka][consumer_group]", @group_id)
327
- event.set("[@metadata][kafka][partition]", record.partition)
328
- event.set("[@metadata][kafka][offset]", record.offset)
329
- event.set("[@metadata][kafka][key]", record.key)
330
- event.set("[@metadata][kafka][timestamp]", record.timestamp)
331
- end
332
- if @metadata_mode.include?(:headers)
333
- record.headers.each do |header|
334
- s = String.from_java_bytes(header.value)
335
- s.force_encoding(Encoding::UTF_8)
336
- if s.valid_encoding?
337
- event.set("[@metadata][kafka][headers]["+header.key+"]", s)
338
- end
339
- end
340
- end
341
- logstash_queue << event
342
- end
312
+ until stop?
313
+ records = do_poll(consumer)
314
+ unless records.empty?
315
+ records.each { |record| handle_record(record, codec_instance, logstash_queue) }
316
+ maybe_commit_offset(consumer)
343
317
  end
344
- # Manual offset commit
345
- consumer.commitSync if @enable_auto_commit.eql?(false)
346
318
  end
347
- rescue org.apache.kafka.common.errors.WakeupException => e
348
- raise e if !stop?
349
319
  ensure
350
320
  consumer.close
351
321
  end
352
322
  end
353
323
  end
354
324
 
325
+ def do_poll(consumer)
326
+ records = []
327
+ begin
328
+ records = consumer.poll(poll_timeout_ms)
329
+ rescue org.apache.kafka.common.errors.WakeupException => e
330
+ logger.debug("Wake up from poll", :kafka_error_message => e)
331
+ raise e unless stop?
332
+ rescue => e
333
+ logger.error("Unable to poll Kafka consumer",
334
+ :kafka_error_message => e,
335
+ :cause => e.respond_to?(:getCause) ? e.getCause : nil)
336
+ Stud.stoppable_sleep(1) { stop? }
337
+ end
338
+ records
339
+ end
340
+
341
+ def handle_record(record, codec_instance, queue)
342
+ codec_instance.decode(record.value.to_s) do |event|
343
+ decorate(event)
344
+ maybe_apply_schema(event, record)
345
+ maybe_set_metadata(event, record)
346
+ queue << event
347
+ end
348
+ end
349
+
350
+ def maybe_apply_schema(event, record)
351
+ if schema_registry_url
352
+ json = LogStash::Json.load(record.value.to_s)
353
+ json.each do |k, v|
354
+ event.set(k, v)
355
+ end
356
+ event.remove("message")
357
+ end
358
+ end
359
+
360
+ def maybe_set_metadata(event, record)
361
+ if @metadata_mode.include?(:record_props)
362
+ event.set("[@metadata][kafka][topic]", record.topic)
363
+ event.set("[@metadata][kafka][consumer_group]", @group_id)
364
+ event.set("[@metadata][kafka][partition]", record.partition)
365
+ event.set("[@metadata][kafka][offset]", record.offset)
366
+ event.set("[@metadata][kafka][key]", record.key)
367
+ event.set("[@metadata][kafka][timestamp]", record.timestamp)
368
+ end
369
+ if @metadata_mode.include?(:headers)
370
+ record.headers.each do |header|
371
+ s = String.from_java_bytes(header.value)
372
+ s.force_encoding(Encoding::UTF_8)
373
+ if s.valid_encoding?
374
+ event.set("[@metadata][kafka][headers][" + header.key + "]", s)
375
+ end
376
+ end
377
+ end
378
+ end
379
+
380
+ def maybe_commit_offset(consumer)
381
+ begin
382
+ consumer.commitSync if @enable_auto_commit.eql?(false)
383
+ rescue org.apache.kafka.common.errors.WakeupException => e
384
+ logger.debug("Wake up from commitSync", :kafka_error_message => e)
385
+ raise e unless stop?
386
+ rescue StandardError => e
387
+ # For transient errors, the commit should be successful after the next set of
388
+ # polled records has been processed.
389
+ # But, it might also be worth thinking about adding a configurable retry mechanism
390
+ logger.error("Unable to commit records",
391
+ :kafka_error_message => e,
392
+ :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
393
+ end
394
+ end
395
+
355
396
  private
356
397
  def create_consumer(client_id)
357
398
  begin
@@ -392,13 +433,16 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
392
433
  if schema_registry_url
393
434
  props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, Java::io.confluent.kafka.serializers.KafkaAvroDeserializer.java_class)
394
435
  serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
395
- props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, schema_registry_url.to_s)
436
+ props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, schema_registry_url.uri.to_s)
396
437
  if schema_registry_proxy && !schema_registry_proxy.empty?
397
438
  props.put(serdes_config::PROXY_HOST, @schema_registry_proxy_host)
398
439
  props.put(serdes_config::PROXY_PORT, @schema_registry_proxy_port)
399
440
  end
400
441
  if schema_registry_key && !schema_registry_key.empty?
442
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
401
443
  props.put(serdes_config::USER_INFO_CONFIG, schema_registry_key + ":" + schema_registry_secret.value)
444
+ else
445
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'URL')
402
446
  end
403
447
  end
404
448
  if security_protocol == "SSL"
@@ -22,6 +22,10 @@ module LogStash
22
22
  # Option to set the proxy of the Schema Registry.
23
23
  # This option permits to define a proxy to be used to reach the schema registry service instance.
24
24
  config :schema_registry_proxy, :validate => :uri
25
+
26
+ # Option to skip validating the schema registry during registration. This can be useful when using
27
+ # certificate based auth
28
+ config :schema_registry_validation, :validate => ['auto', 'skip'], :default => 'auto'
25
29
  end
26
30
 
27
31
  def check_schema_registry_parameters
@@ -29,10 +33,21 @@ module LogStash
29
33
  check_for_schema_registry_conflicts
30
34
  @schema_registry_proxy_host, @schema_registry_proxy_port = split_proxy_into_host_and_port(schema_registry_proxy)
31
35
  check_for_key_and_secret
32
- check_for_schema_registry_connectivity_and_subjects
36
+ check_for_schema_registry_connectivity_and_subjects if schema_registry_validation?
33
37
  end
34
38
  end
35
39
 
40
+ def schema_registry_validation?
41
+ return false if schema_registry_validation.to_s == 'skip'
42
+ return false if using_kerberos? # pre-validation doesn't support kerberos
43
+
44
+ true
45
+ end
46
+
47
+ def using_kerberos?
48
+ security_protocol == "SASL_PLAINTEXT" || security_protocol == "SASL_SSL"
49
+ end
50
+
36
51
  private
37
52
  def check_for_schema_registry_conflicts
38
53
  if @value_deserializer_class != LogStash::Inputs::Kafka::DEFAULT_DESERIALIZER_CLASS
@@ -53,9 +68,8 @@ module LogStash
53
68
  options[:auth] = {:user => schema_registry_key, :password => schema_registry_secret.value}
54
69
  end
55
70
  client = Manticore::Client.new(options)
56
-
57
71
  begin
58
- response = client.get(@schema_registry_url.to_s + '/subjects').body
72
+ response = client.get(@schema_registry_url.uri.to_s + '/subjects').body
59
73
  rescue Manticore::ManticoreException => e
60
74
  raise LogStash::ConfigurationError.new("Schema registry service doesn't respond, error: #{e.message}")
61
75
  end
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '10.7.4'
3
+ s.version = '10.8.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -51,6 +51,7 @@ Gem::Specification.new do |s|
51
51
 
52
52
  s.add_development_dependency 'logstash-devutils'
53
53
  s.add_development_dependency 'rspec-wait'
54
- s.add_development_dependency 'ruby-kafka'
54
+ s.add_development_dependency 'digest-crc', '~> 0.5.1' # 0.6.0 started using a C-ext
55
+ s.add_development_dependency 'ruby-kafka' # depends on digest-crc
55
56
  s.add_development_dependency 'snappy'
56
57
  end
@@ -0,0 +1,36 @@
1
+ # encoding: utf-8
2
+ require 'logstash-integration-kafka_jars'
3
+
4
+ describe "[DOCS]" do
5
+
6
+ let(:docs_files) do
7
+ ['index.asciidoc', 'input-kafka.asciidoc', 'output-kafka.asciidoc'].map { |name| File.join('docs', name) }
8
+ end
9
+
10
+ let(:kafka_version_properties) do
11
+ loader = java.lang.Thread.currentThread.getContextClassLoader
12
+ version = loader.getResource('kafka/kafka-version.properties')
13
+ fail "kafka-version.properties missing" unless version
14
+ properties = java.util.Properties.new
15
+ properties.load version.openStream
16
+ properties
17
+ end
18
+
19
+ it 'is sync-ed with Kafka client version' do
20
+ version = kafka_version_properties.get('version') # e.g. '2.5.1'
21
+
22
+ fails = docs_files.map do |file|
23
+ if line = File.readlines(file).find { |line| line.index(':kafka_client:') }
24
+ puts "found #{line.inspect} in #{file}" if $VERBOSE # e.g. ":kafka_client: 2.5\n"
25
+ if !version.start_with?(line.strip.split[1])
26
+ "documentation at #{file} is out of sync with kafka-clients version (#{version.inspect}), detected line: #{line.inspect}"
27
+ else
28
+ nil
29
+ end
30
+ end
31
+ end
32
+
33
+ fail "\n" + fails.join("\n") if fails.flatten.any?
34
+ end
35
+
36
+ end
@@ -0,0 +1,5 @@
1
+ SchemaRegistry-Props {
2
+ org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
3
+ file="build/confluent_platform/etc/schema-registry/pwd"
4
+ debug="true";
5
+ };
data/spec/fixtures/pwd ADDED
@@ -0,0 +1,5 @@
1
+ fred: OBF:1w8t1tvf1w261w8v1w1c1tvn1w8x,user,admin
2
+ barney: changeme,user,developer
3
+ admin:admin,admin
4
+ betty: MD5:164c88b302622e17050af52c89945d44,user
5
+ wilma: CRYPT:adpexzg3FUZAk,admin,sr-user
@@ -198,13 +198,24 @@ def consume_messages(config, queue: Queue.new, timeout:, event_count:)
198
198
  wait(timeout).for { queue.length }.to eq(event_count) unless timeout.eql?(false)
199
199
  block_given? ? yield(queue, kafka_input) : queue
200
200
  ensure
201
+ kafka_input.do_stop
201
202
  t.kill
202
- t.join(30_000)
203
+ t.join(30)
203
204
  end
204
205
  end
205
206
 
206
207
 
207
208
  describe "schema registry connection options" do
209
+ schema_registry = Manticore::Client.new
210
+ before (:all) do
211
+ shutdown_schema_registry
212
+ startup_schema_registry(schema_registry)
213
+ end
214
+
215
+ after(:all) do
216
+ shutdown_schema_registry
217
+ end
218
+
208
219
  context "remote endpoint validation" do
209
220
  it "should fail if not reachable" do
210
221
  config = {'schema_registry_url' => 'http://localnothost:8081'}
@@ -231,8 +242,7 @@ describe "schema registry connection options" do
231
242
  end
232
243
 
233
244
  after(:each) do
234
- schema_registry_client = Manticore::Client.new
235
- delete_remote_schema(schema_registry_client, SUBJECT_NAME)
245
+ delete_remote_schema(schema_registry, SUBJECT_NAME)
236
246
  end
237
247
 
238
248
  it "should correctly complete registration phase" do
@@ -263,9 +273,25 @@ end
263
273
 
264
274
  # AdminClientConfig = org.alpache.kafka.clients.admin.AdminClientConfig
265
275
 
276
+ def startup_schema_registry(schema_registry, auth=false)
277
+ system('./stop_schema_registry.sh')
278
+ auth ? system('./start_auth_schema_registry.sh') : system('./start_schema_registry.sh')
279
+ url = auth ? "http://barney:changeme@localhost:8081" : "http://localhost:8081"
280
+ Stud.try(20.times, [Manticore::SocketException, StandardError, RSpec::Expectations::ExpectationNotMetError]) do
281
+ expect(schema_registry.get(url).code).to eq(200)
282
+ end
283
+ end
284
+
266
285
  describe "Schema registry API", :integration => true do
286
+ schema_registry = Manticore::Client.new
287
+
288
+ before(:all) do
289
+ startup_schema_registry(schema_registry)
290
+ end
267
291
 
268
- let(:schema_registry) { Manticore::Client.new }
292
+ after(:all) do
293
+ shutdown_schema_registry
294
+ end
269
295
 
270
296
  context 'listing subject on clean instance' do
271
297
  it "should return an empty set" do
@@ -291,37 +317,58 @@ describe "Schema registry API", :integration => true do
291
317
  expect( subjects ).to be_empty
292
318
  end
293
319
  end
320
+ end
321
+
322
+ def shutdown_schema_registry
323
+ system('./stop_schema_registry.sh')
324
+ end
325
+
326
+ describe "Deserializing with the schema registry", :integration => true do
327
+ schema_registry = Manticore::Client.new
328
+
329
+ shared_examples 'it reads from a topic using a schema registry' do |with_auth|
330
+
331
+ before(:all) do
332
+ shutdown_schema_registry
333
+ startup_schema_registry(schema_registry, with_auth)
334
+ end
335
+
336
+ after(:all) do
337
+ shutdown_schema_registry
338
+ end
294
339
 
295
- context 'use the schema to serialize' do
296
340
  after(:each) do
297
- expect( schema_registry.delete('http://localhost:8081/subjects/topic_avro-value').code ).to be(200)
341
+ expect( schema_registry.delete("#{subject_url}/#{avro_topic_name}-value").code ).to be(200)
298
342
  sleep 1
299
- expect( schema_registry.delete('http://localhost:8081/subjects/topic_avro-value?permanent=true').code ).to be(200)
343
+ expect( schema_registry.delete("#{subject_url}/#{avro_topic_name}-value?permanent=true").code ).to be(200)
300
344
 
301
345
  Stud.try(3.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
302
346
  wait(10).for do
303
- subjects = JSON.parse schema_registry.get('http://localhost:8081/subjects').body
347
+ subjects = JSON.parse schema_registry.get(subject_url).body
304
348
  subjects.empty?
305
349
  end.to be_truthy
306
350
  end
307
351
  end
308
352
 
309
- let(:group_id_1) {rand(36**8).to_s(36)}
310
-
311
- let(:avro_topic_name) { "topic_avro" }
312
-
313
- let(:plain_config) do
314
- { 'schema_registry_url' => 'http://localhost:8081',
315
- 'topics' => [avro_topic_name],
316
- 'codec' => 'plain',
317
- 'group_id' => group_id_1,
318
- 'auto_offset_reset' => 'earliest' }
353
+ let(:base_config) do
354
+ {
355
+ 'topics' => [avro_topic_name],
356
+ 'codec' => 'plain',
357
+ 'group_id' => group_id_1,
358
+ 'auto_offset_reset' => 'earliest'
359
+ }
319
360
  end
320
361
 
321
- def delete_topic_if_exists(topic_name)
362
+ let(:group_id_1) {rand(36**8).to_s(36)}
363
+
364
+ def delete_topic_if_exists(topic_name, user = nil, password = nil)
322
365
  props = java.util.Properties.new
323
366
  props.put(Java::org.apache.kafka.clients.admin.AdminClientConfig::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
324
-
367
+ serdes_config = Java::io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig
368
+ unless user.nil?
369
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
370
+ props.put(serdes_config::USER_INFO_CONFIG, "#{user}:#{password}")
371
+ end
325
372
  admin_client = org.apache.kafka.clients.admin.AdminClient.create(props)
326
373
  topics_list = admin_client.listTopics().names().get()
327
374
  if topics_list.contains(topic_name)
@@ -330,7 +377,7 @@ describe "Schema registry API", :integration => true do
330
377
  end
331
378
  end
332
379
 
333
- def write_some_data_to(topic_name)
380
+ def write_some_data_to(topic_name, user = nil, password = nil)
334
381
  props = java.util.Properties.new
335
382
  config = org.apache.kafka.clients.producer.ProducerConfig
336
383
 
@@ -338,6 +385,10 @@ describe "Schema registry API", :integration => true do
338
385
  props.put(serdes_config::SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081")
339
386
 
340
387
  props.put(config::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
388
+ unless user.nil?
389
+ props.put(serdes_config::BASIC_AUTH_CREDENTIALS_SOURCE, 'USER_INFO')
390
+ props.put(serdes_config::USER_INFO_CONFIG, "#{user}:#{password}")
391
+ end
341
392
  props.put(config::KEY_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringSerializer.java_class)
342
393
  props.put(config::VALUE_SERIALIZER_CLASS_CONFIG, Java::io.confluent.kafka.serializers.KafkaAvroSerializer.java_class)
343
394
 
@@ -359,11 +410,11 @@ describe "Schema registry API", :integration => true do
359
410
  end
360
411
 
361
412
  it "stored a new schema using Avro Kafka serdes" do
362
- delete_topic_if_exists avro_topic_name
363
- write_some_data_to avro_topic_name
413
+ auth ? delete_topic_if_exists(avro_topic_name, user, password) : delete_topic_if_exists(avro_topic_name)
414
+ auth ? write_some_data_to(avro_topic_name, user, password) : write_some_data_to(avro_topic_name)
364
415
 
365
- subjects = JSON.parse schema_registry.get('http://localhost:8081/subjects').body
366
- expect( subjects ).to contain_exactly("topic_avro-value")
416
+ subjects = JSON.parse schema_registry.get(subject_url).body
417
+ expect( subjects ).to contain_exactly("#{avro_topic_name}-value")
367
418
 
368
419
  num_events = 1
369
420
  queue = consume_messages(plain_config, timeout: 30, event_count: num_events)
@@ -374,4 +425,43 @@ describe "Schema registry API", :integration => true do
374
425
  expect( elem.get("map_field")["inner_field"] ).to eq("inner value")
375
426
  end
376
427
  end
428
+
429
+ context 'with an unauthed schema registry' do
430
+ let(:auth) { false }
431
+ let(:avro_topic_name) { "topic_avro" }
432
+ let(:subject_url) { "http://localhost:8081/subjects" }
433
+ let(:plain_config) { base_config.merge!({'schema_registry_url' => "http://localhost:8081"}) }
434
+
435
+ it_behaves_like 'it reads from a topic using a schema registry', false
436
+ end
437
+
438
+ context 'with an authed schema registry' do
439
+ let(:auth) { true }
440
+ let(:user) { "barney" }
441
+ let(:password) { "changeme" }
442
+ let(:avro_topic_name) { "topic_avro_auth" }
443
+ let(:subject_url) { "http://#{user}:#{password}@localhost:8081/subjects" }
444
+
445
+ context 'using schema_registry_key' do
446
+ let(:plain_config) do
447
+ base_config.merge!({
448
+ 'schema_registry_url' => "http://localhost:8081",
449
+ 'schema_registry_key' => user,
450
+ 'schema_registry_secret' => password
451
+ })
452
+ end
453
+
454
+ it_behaves_like 'it reads from a topic using a schema registry', true
455
+ end
456
+
457
+ context 'using schema_registry_url' do
458
+ let(:plain_config) do
459
+ base_config.merge!({
460
+ 'schema_registry_url' => "http://#{user}:#{password}@localhost:8081"
461
+ })
462
+ end
463
+
464
+ it_behaves_like 'it reads from a topic using a schema registry', true
465
+ end
466
+ end
377
467
  end
@@ -44,7 +44,7 @@ describe "outputs/kafka", :integration => true do
44
44
  end
45
45
 
46
46
  context 'when outputting messages serialized as Byte Array' do
47
- let(:test_topic) { 'topic1b' }
47
+ let(:test_topic) { 'logstash_integration_topicbytearray' }
48
48
  let(:num_events) { 3 }
49
49
 
50
50
  before :each do
@@ -3,45 +3,190 @@ require "logstash/devutils/rspec/spec_helper"
3
3
  require "logstash/inputs/kafka"
4
4
  require "concurrent"
5
5
 
6
- class MockConsumer
7
- def initialize
8
- @wake = Concurrent::AtomicBoolean.new(false)
9
- end
10
6
 
11
- def subscribe(topics)
12
- end
13
-
14
- def poll(ms)
15
- if @wake.value
16
- raise org.apache.kafka.common.errors.WakeupException.new
17
- else
18
- 10.times.map do
19
- org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
7
+ describe LogStash::Inputs::Kafka do
8
+ let(:common_config) { { 'topics' => ['logstash'] } }
9
+ let(:config) { common_config }
10
+ let(:consumer_double) { double(:consumer) }
11
+ let(:needs_raise) { false }
12
+ let(:payload) {
13
+ 10.times.map do
14
+ org.apache.kafka.clients.consumer.ConsumerRecord.new("logstash", 0, 0, "key", "value")
15
+ end
16
+ }
17
+ subject { LogStash::Inputs::Kafka.new(config) }
18
+
19
+ describe '#poll' do
20
+ before do
21
+ polled = false
22
+ allow(consumer_double).to receive(:poll) do
23
+ if polled
24
+ []
25
+ else
26
+ polled = true
27
+ payload
28
+ end
20
29
  end
21
30
  end
31
+
32
+ it 'should poll' do
33
+ expect(consumer_double).to receive(:poll)
34
+ expect(subject.do_poll(consumer_double)).to eq(payload)
35
+ end
36
+
37
+ it 'should return nil if Kafka Exception is encountered' do
38
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
39
+ expect(subject.do_poll(consumer_double)).to be_empty
40
+ end
41
+
42
+ it 'should not throw if Kafka Exception is encountered' do
43
+ expect(consumer_double).to receive(:poll).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
44
+ expect{subject.do_poll(consumer_double)}.not_to raise_error
45
+ end
46
+
47
+ it 'should return no records if Assertion Error is encountered' do
48
+ expect(consumer_double).to receive(:poll).and_raise(java.lang.AssertionError.new(''))
49
+ expect{subject.do_poll(consumer_double)}.to raise_error(java.lang.AssertionError)
50
+ end
22
51
  end
23
52
 
24
- def close
53
+ describe '#maybe_commit_offset' do
54
+ context 'with auto commit disabled' do
55
+ let(:config) { common_config.merge('enable_auto_commit' => false) }
56
+
57
+ it 'should call commit on the consumer' do
58
+ expect(consumer_double).to receive(:commitSync)
59
+ subject.maybe_commit_offset(consumer_double)
60
+ end
61
+ it 'should not throw if a Kafka Exception is encountered' do
62
+ expect(consumer_double).to receive(:commitSync).and_raise(org.apache.kafka.common.errors.TopicAuthorizationException.new(''))
63
+ expect{subject.maybe_commit_offset(consumer_double)}.not_to raise_error
64
+ end
65
+
66
+ it 'should throw if Assertion Error is encountered' do
67
+ expect(consumer_double).to receive(:commitSync).and_raise(java.lang.AssertionError.new(''))
68
+ expect{subject.maybe_commit_offset(consumer_double)}.to raise_error(java.lang.AssertionError)
69
+ end
70
+ end
71
+
72
+ context 'with auto commit enabled' do
73
+ let(:config) { common_config.merge('enable_auto_commit' => true) }
74
+
75
+ it 'should not call commit on the consumer' do
76
+ expect(consumer_double).not_to receive(:commitSync)
77
+ subject.maybe_commit_offset(consumer_double)
78
+ end
79
+ end
25
80
  end
26
81
 
27
- def wakeup
28
- @wake.make_true
82
+ describe '#register' do
83
+ it "should register" do
84
+ expect { subject.register }.to_not raise_error
85
+ end
29
86
  end
30
- end
31
87
 
32
- describe LogStash::Inputs::Kafka do
33
- let(:config) { { 'topics' => ['logstash'], 'consumer_threads' => 4 } }
34
- subject { LogStash::Inputs::Kafka.new(config) }
88
+ describe '#running' do
89
+ let(:q) { Queue.new }
90
+ let(:config) { common_config.merge('client_id' => 'test') }
91
+
92
+ before do
93
+ expect(subject).to receive(:create_consumer).once.and_return(consumer_double)
94
+ allow(consumer_double).to receive(:wakeup)
95
+ allow(consumer_double).to receive(:close)
96
+ allow(consumer_double).to receive(:subscribe)
97
+ end
98
+
99
+ context 'when running' do
100
+ before do
101
+ polled = false
102
+ allow(consumer_double).to receive(:poll) do
103
+ if polled
104
+ []
105
+ else
106
+ polled = true
107
+ payload
108
+ end
109
+ end
110
+
111
+ subject.register
112
+ t = Thread.new do
113
+ sleep(1)
114
+ subject.do_stop
115
+ end
116
+ subject.run(q)
117
+ t.join
118
+ end
119
+
120
+ it 'should process the correct number of events' do
121
+ expect(q.size).to eq(10)
122
+ end
123
+
124
+ it 'should set the consumer thread name' do
125
+ expect(subject.instance_variable_get('@runner_threads').first.get_name).to eq("kafka-input-worker-test-0")
126
+ end
127
+ end
128
+
129
+ context 'when errors are encountered during poll' do
130
+ before do
131
+ raised, polled = false
132
+ allow(consumer_double).to receive(:poll) do
133
+ unless raised
134
+ raised = true
135
+ raise exception
136
+ end
137
+ if polled
138
+ []
139
+ else
140
+ polled = true
141
+ payload
142
+ end
143
+ end
144
+
145
+ subject.register
146
+ t = Thread.new do
147
+ sleep 2
148
+ subject.do_stop
149
+ end
150
+ subject.run(q)
151
+ t.join
152
+ end
153
+
154
+ context "when a Kafka exception is raised" do
155
+ let(:exception) { org.apache.kafka.common.errors.TopicAuthorizationException.new('Invalid topic') }
156
+
157
+ it 'should poll successfully' do
158
+ expect(q.size).to eq(10)
159
+ end
160
+ end
161
+
162
+ context "when a StandardError is raised" do
163
+ let(:exception) { StandardError.new('Standard Error') }
164
+
165
+ it 'should retry and poll successfully' do
166
+ expect(q.size).to eq(10)
167
+ end
168
+ end
169
+
170
+ context "when a java error is raised" do
171
+ let(:exception) { java.lang.AssertionError.new('Fatal assertion') }
35
172
 
36
- it "should register" do
37
- expect { subject.register }.to_not raise_error
173
+ it "should not retry" do
174
+ expect(q.size).to eq(0)
175
+ end
176
+ end
177
+ end
38
178
  end
39
179
 
40
- context "register parameter verification" do
180
+ describe "schema registry parameter verification" do
181
+ let(:base_config) do {
182
+ 'schema_registry_url' => 'http://localhost:8081',
183
+ 'topics' => ['logstash'],
184
+ 'consumer_threads' => 4
185
+ }
186
+ end
187
+
41
188
  context "schema_registry_url" do
42
- let(:config) do
43
- { 'schema_registry_url' => 'http://localhost:8081', 'topics' => ['logstash'], 'consumer_threads' => 4 }
44
- end
189
+ let(:config) { base_config }
45
190
 
46
191
  it "conflict with value_deserializer_class should fail" do
47
192
  config['value_deserializer_class'] = 'my.fantasy.Deserializer'
@@ -54,19 +199,63 @@ describe LogStash::Inputs::Kafka do
54
199
  end
55
200
  end
56
201
 
57
- context "decorate_events" do
58
- let(:config) { { 'decorate_events' => 'extended'} }
202
+ context 'when kerberos auth is used' do
203
+ ['SASL_SSL', 'SASL_PLAINTEXT'].each do |protocol|
204
+ context "with #{protocol}" do
205
+ ['auto', 'skip'].each do |vsr|
206
+ context "when validata_schema_registry is #{vsr}" do
207
+ let(:config) { base_config.merge({'security_protocol' => protocol,
208
+ 'schema_registry_validation' => vsr})
209
+ }
210
+ it 'skips verification' do
211
+ expect(subject).not_to receive(:check_for_schema_registry_connectivity_and_subjects)
212
+ expect { subject.register }.not_to raise_error
213
+ end
214
+ end
215
+ end
216
+ end
217
+ end
218
+ end
59
219
 
60
- it "should raise error for invalid value" do
61
- config['decorate_events'] = 'avoid'
62
- expect { subject.register }.to raise_error LogStash::ConfigurationError, /Something is wrong with your configuration./
220
+ context 'when kerberos auth is not used' do
221
+ context "when skip_verify is set to auto" do
222
+ let(:config) { base_config.merge({'schema_registry_validation' => 'auto'})}
223
+ it 'performs verification' do
224
+ expect(subject).to receive(:check_for_schema_registry_connectivity_and_subjects)
225
+ expect { subject.register }.not_to raise_error
226
+ end
63
227
  end
64
228
 
65
- it "should map old true boolean value to :record_props mode" do
66
- config['decorate_events'] = "true"
67
- subject.register
68
- expect(subject.metadata_mode).to include(:record_props)
229
+ context "when skip_verify is set to default" do
230
+ let(:config) { base_config }
231
+ it 'performs verification' do
232
+ expect(subject).to receive(:check_for_schema_registry_connectivity_and_subjects)
233
+ expect { subject.register }.not_to raise_error
234
+ end
69
235
  end
236
+
237
+ context "when skip_verify is set to skip" do
238
+ let(:config) { base_config.merge({'schema_registry_validation' => 'skip'})}
239
+ it 'should skip verification' do
240
+ expect(subject).not_to receive(:check_for_schema_registry_connectivity_and_subjects)
241
+ expect { subject.register }.not_to raise_error
242
+ end
243
+ end
244
+ end
245
+ end
246
+
247
+ context "decorate_events" do
248
+ let(:config) { { 'decorate_events' => 'extended'} }
249
+
250
+ it "should raise error for invalid value" do
251
+ config['decorate_events'] = 'avoid'
252
+ expect { subject.register }.to raise_error LogStash::ConfigurationError, /Something is wrong with your configuration./
253
+ end
254
+
255
+ it "should map old true boolean value to :record_props mode" do
256
+ config['decorate_events'] = "true"
257
+ subject.register
258
+ expect(subject.metadata_mode).to include(:record_props)
70
259
  end
71
260
  end
72
261
 
@@ -8,6 +8,8 @@ describe "outputs/kafka" do
8
8
  let (:event) { LogStash::Event.new({'message' => 'hello', 'topic_name' => 'my_topic', 'host' => '172.0.0.1',
9
9
  '@timestamp' => LogStash::Timestamp.now}) }
10
10
 
11
+ let(:future) { double('kafka producer future') }
12
+
11
13
  context 'when initializing' do
12
14
  it "should register" do
13
15
  output = LogStash::Plugin.lookup("output", "kafka").new(simple_kafka_config)
@@ -24,8 +26,8 @@ describe "outputs/kafka" do
24
26
 
25
27
  context 'when outputting messages' do
26
28
  it 'should send logstash event to kafka broker' do
27
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
28
- .with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord)).and_call_original
29
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).
30
+ with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord))
29
31
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
30
32
  kafka.register
31
33
  kafka.multi_receive([event])
@@ -33,18 +35,18 @@ describe "outputs/kafka" do
33
35
 
34
36
  it 'should support Event#sprintf placeholders in topic_id' do
35
37
  topic_field = 'topic_name'
36
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
37
- .with("my_topic", event.to_s).and_call_original
38
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
38
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
39
+ with("my_topic", event.to_s).and_call_original
40
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
39
41
  kafka = LogStash::Outputs::Kafka.new({'topic_id' => "%{#{topic_field}}"})
40
42
  kafka.register
41
43
  kafka.multi_receive([event])
42
44
  end
43
45
 
44
46
  it 'should support field referenced message_keys' do
45
- expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
46
- .with("test", "172.0.0.1", event.to_s).and_call_original
47
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).and_call_original
47
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new).
48
+ with("test", "172.0.0.1", event.to_s).and_call_original
49
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
48
50
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"message_key" => "%{host}"}))
49
51
  kafka.register
50
52
  kafka.multi_receive([event])
@@ -71,22 +73,24 @@ describe "outputs/kafka" do
71
73
  before do
72
74
  count = 0
73
75
  expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
74
- .exactly(sendcount).times
75
- .and_wrap_original do |m, *args|
76
+ .exactly(sendcount).times do
76
77
  if count < failcount # fail 'failcount' times in a row.
77
78
  count += 1
78
79
  # Pick an exception at random
79
80
  raise exception_classes.shuffle.first.new("injected exception for testing")
80
81
  else
81
- m.call(*args) # call original
82
+ count = :done
83
+ future # return future
82
84
  end
83
85
  end
86
+ expect(future).to receive :get
84
87
  end
85
88
 
86
89
  it "should retry until successful" do
87
90
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
88
91
  kafka.register
89
92
  kafka.multi_receive([event])
93
+ sleep(1.0) # allow for future.get call
90
94
  end
91
95
  end
92
96
 
@@ -101,15 +105,13 @@ describe "outputs/kafka" do
101
105
 
102
106
  before do
103
107
  count = 0
104
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
105
- .exactly(1).times
106
- .and_wrap_original do |m, *args|
108
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(1).times do
107
109
  if count < failcount # fail 'failcount' times in a row.
108
110
  count += 1
109
111
  # Pick an exception at random
110
112
  raise exception_classes.shuffle.first.new("injected exception for testing")
111
113
  else
112
- m.call(*args) # call original
114
+ fail 'unexpected producer#send invocation'
113
115
  end
114
116
  end
115
117
  end
@@ -131,25 +133,24 @@ describe "outputs/kafka" do
131
133
 
132
134
  it "should retry until successful" do
133
135
  count = 0
134
-
135
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
136
- .exactly(sendcount).times
137
- .and_wrap_original do |m, *args|
136
+ success = nil
137
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).exactly(sendcount).times do
138
138
  if count < failcount
139
139
  count += 1
140
140
  # inject some failures.
141
141
 
142
142
  # Return a custom Future that will raise an exception to simulate a Kafka send() problem.
143
143
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
144
- future.run
145
- future
146
144
  else
147
- m.call(*args)
145
+ success = true
146
+ future = java.util.concurrent.FutureTask.new { nil } # return no-op future
148
147
  end
148
+ future.tap { Thread.start { future.run } }
149
149
  end
150
150
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
151
151
  kafka.register
152
152
  kafka.multi_receive([event])
153
+ expect( success ).to be true
153
154
  end
154
155
  end
155
156
 
@@ -158,9 +159,7 @@ describe "outputs/kafka" do
158
159
  let(:max_sends) { 1 }
159
160
 
160
161
  it "should should only send once" do
161
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
162
- .once
163
- .and_wrap_original do |m, *args|
162
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
164
163
  # Always fail.
165
164
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
166
165
  future.run
@@ -172,9 +171,7 @@ describe "outputs/kafka" do
172
171
  end
173
172
 
174
173
  it 'should not sleep' do
175
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
176
- .once
177
- .and_wrap_original do |m, *args|
174
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).once do
178
175
  # Always fail.
179
176
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
180
177
  future.run
@@ -193,13 +190,10 @@ describe "outputs/kafka" do
193
190
  let(:max_sends) { retries + 1 }
194
191
 
195
192
  it "should give up after retries are exhausted" do
196
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
197
- .at_most(max_sends).times
198
- .and_wrap_original do |m, *args|
193
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
199
194
  # Always fail.
200
195
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
201
- future.run
202
- future
196
+ future.tap { Thread.start { future.run } }
203
197
  end
204
198
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge("retries" => retries))
205
199
  kafka.register
@@ -207,9 +201,7 @@ describe "outputs/kafka" do
207
201
  end
208
202
 
209
203
  it 'should only sleep retries number of times' do
210
- expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
211
- .at_most(max_sends).times
212
- .and_wrap_original do |m, *args|
204
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send).at_most(max_sends).times do
213
205
  # Always fail.
214
206
  future = java.util.concurrent.FutureTask.new { raise org.apache.kafka.common.errors.TimeoutException.new("Failed") }
215
207
  future.run
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 10.7.4
4
+ version: 10.8.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-04-14 00:00:00.000000000 Z
11
+ date: 2021-07-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -168,6 +168,20 @@ dependencies:
168
168
  - - ">="
169
169
  - !ruby/object:Gem::Version
170
170
  version: '0'
171
+ - !ruby/object:Gem::Dependency
172
+ requirement: !ruby/object:Gem::Requirement
173
+ requirements:
174
+ - - "~>"
175
+ - !ruby/object:Gem::Version
176
+ version: 0.5.1
177
+ name: digest-crc
178
+ prerelease: false
179
+ type: :development
180
+ version_requirements: !ruby/object:Gem::Requirement
181
+ requirements:
182
+ - - "~>"
183
+ - !ruby/object:Gem::Version
184
+ version: 0.5.1
171
185
  - !ruby/object:Gem::Dependency
172
186
  requirement: !ruby/object:Gem::Requirement
173
187
  requirements:
@@ -220,6 +234,9 @@ files:
220
234
  - lib/logstash/plugin_mixins/common.rb
221
235
  - lib/logstash/plugin_mixins/kafka_support.rb
222
236
  - logstash-integration-kafka.gemspec
237
+ - spec/check_docs_spec.rb
238
+ - spec/fixtures/jaas.config
239
+ - spec/fixtures/pwd
223
240
  - spec/fixtures/trust-store_stub.jks
224
241
  - spec/integration/inputs/kafka_spec.rb
225
242
  - spec/integration/outputs/kafka_spec.rb
@@ -269,6 +286,9 @@ signing_key:
269
286
  specification_version: 4
270
287
  summary: Integration with Kafka - input and output plugins
271
288
  test_files:
289
+ - spec/check_docs_spec.rb
290
+ - spec/fixtures/jaas.config
291
+ - spec/fixtures/pwd
272
292
  - spec/fixtures/trust-store_stub.jks
273
293
  - spec/integration/inputs/kafka_spec.rb
274
294
  - spec/integration/outputs/kafka_spec.rb