rdkafka 0.12.0 → 0.15.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +57 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +155 -93
  10. data/Gemfile +2 -0
  11. data/{LICENSE → MIT-LICENSE} +2 -1
  12. data/README.md +76 -29
  13. data/Rakefile +2 -0
  14. data/certs/cert_chain.pem +26 -0
  15. data/docker-compose.yml +18 -15
  16. data/ext/README.md +1 -1
  17. data/ext/Rakefile +46 -27
  18. data/lib/rdkafka/abstract_handle.rb +41 -25
  19. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  20. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  21. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  22. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  23. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  24. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  25. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  26. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  29. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  30. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  31. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  32. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  33. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  34. data/lib/rdkafka/admin.rb +494 -35
  35. data/lib/rdkafka/bindings.rb +180 -41
  36. data/lib/rdkafka/callbacks.rb +202 -1
  37. data/lib/rdkafka/config.rb +62 -25
  38. data/lib/rdkafka/consumer/headers.rb +24 -9
  39. data/lib/rdkafka/consumer/message.rb +3 -1
  40. data/lib/rdkafka/consumer/partition.rb +2 -0
  41. data/lib/rdkafka/consumer/topic_partition_list.rb +13 -8
  42. data/lib/rdkafka/consumer.rb +243 -111
  43. data/lib/rdkafka/error.rb +15 -0
  44. data/lib/rdkafka/helpers/time.rb +14 -0
  45. data/lib/rdkafka/metadata.rb +25 -2
  46. data/lib/rdkafka/native_kafka.rb +120 -0
  47. data/lib/rdkafka/producer/delivery_handle.rb +16 -2
  48. data/lib/rdkafka/producer/delivery_report.rb +22 -2
  49. data/lib/rdkafka/producer.rb +151 -21
  50. data/lib/rdkafka/version.rb +5 -3
  51. data/lib/rdkafka.rb +24 -2
  52. data/rdkafka.gemspec +21 -5
  53. data/renovate.json +6 -0
  54. data/spec/rdkafka/abstract_handle_spec.rb +1 -1
  55. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  56. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  57. data/spec/rdkafka/admin/create_topic_handle_spec.rb +1 -1
  58. data/spec/rdkafka/admin/create_topic_report_spec.rb +1 -1
  59. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  60. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  61. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +1 -1
  62. data/spec/rdkafka/admin/delete_topic_report_spec.rb +1 -1
  63. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  64. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  65. data/spec/rdkafka/admin_spec.rb +209 -5
  66. data/spec/rdkafka/bindings_spec.rb +2 -1
  67. data/spec/rdkafka/callbacks_spec.rb +1 -1
  68. data/spec/rdkafka/config_spec.rb +24 -3
  69. data/spec/rdkafka/consumer/headers_spec.rb +60 -0
  70. data/spec/rdkafka/consumer/message_spec.rb +1 -1
  71. data/spec/rdkafka/consumer/partition_spec.rb +1 -1
  72. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +20 -1
  73. data/spec/rdkafka/consumer_spec.rb +352 -61
  74. data/spec/rdkafka/error_spec.rb +1 -1
  75. data/spec/rdkafka/metadata_spec.rb +4 -3
  76. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -35
  77. data/spec/rdkafka/producer/delivery_handle_spec.rb +4 -1
  78. data/spec/rdkafka/producer/delivery_report_spec.rb +11 -3
  79. data/spec/rdkafka/producer_spec.rb +234 -22
  80. data/spec/spec_helper.rb +20 -2
  81. data.tar.gz.sig +0 -0
  82. metadata +81 -17
  83. metadata.gz.sig +0 -0
  84. data/.semaphore/semaphore.yml +0 -23
  85. data/bin/console +0 -11
  86. data/lib/rdkafka/producer/client.rb +0 -47
@@ -0,0 +1,60 @@
1
+ # frozen_string_literal: true
2
+
3
+ describe Rdkafka::Consumer::Headers do
4
+ let(:headers) do
5
+ { # Note String keys!
6
+ "version" => "2.1.3",
7
+ "type" => "String"
8
+ }
9
+ end
10
+ let(:native_message) { double('native message') }
11
+ let(:headers_ptr) { double('headers pointer') }
12
+
13
+ describe '.from_native' do
14
+ before do
15
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(native_message, anything) do |_, headers_ptrptr|
16
+ expect(headers_ptrptr).to receive(:read_pointer).and_return(headers_ptr)
17
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
18
+ end
19
+
20
+ expect(Rdkafka::Bindings).to \
21
+ receive(:rd_kafka_header_get_all)
22
+ .with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
23
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: headers.keys[0]))
24
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[0].size)
25
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers.values[0]))
26
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
27
+ end
28
+
29
+ expect(Rdkafka::Bindings).to \
30
+ receive(:rd_kafka_header_get_all)
31
+ .with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
32
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: headers.keys[1]))
33
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[1].size)
34
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers.values[1]))
35
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
36
+ end
37
+
38
+ expect(Rdkafka::Bindings).to \
39
+ receive(:rd_kafka_header_get_all)
40
+ .with(headers_ptr, 2, anything, anything, anything)
41
+ .and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
42
+ end
43
+
44
+ subject { described_class.from_native(native_message) }
45
+
46
+ it { is_expected.to eq(headers) }
47
+ it { is_expected.to be_frozen }
48
+
49
+ it 'allows String key' do
50
+ expect(subject['version']).to eq("2.1.3")
51
+ end
52
+
53
+ it 'allows Symbol key, but warns' do
54
+ expect(Kernel).to \
55
+ receive(:warn).with("rdkafka deprecation warning: header access with Symbol key :version treated as a String. " \
56
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
57
+ expect(subject[:version]).to eq("2.1.3")
58
+ end
59
+ end
60
+ end
@@ -1,4 +1,4 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
2
 
3
3
  describe Rdkafka::Consumer::Message do
4
4
  let(:native_client) { new_native_client }
@@ -1,4 +1,4 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
2
 
3
3
  describe Rdkafka::Consumer::Partition do
4
4
  let(:offset) { 100 }
@@ -1,4 +1,4 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
2
 
3
3
  describe Rdkafka::Consumer::TopicPartitionList do
4
4
  it "should create a new list and add unassigned topics" do
@@ -219,5 +219,24 @@ describe Rdkafka::Consumer::TopicPartitionList do
219
219
 
220
220
  expect(list).to eq other
221
221
  end
222
+
223
+ it "should create a native list with timetamp offsets if offsets are Time" do
224
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
225
+ list.add_topic_and_partitions_with_offsets("topic", 0 => Time.at(1505069646, 250_000))
226
+ end
227
+
228
+ tpl = list.to_native_tpl
229
+
230
+ compare_list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
231
+ list.add_topic_and_partitions_with_offsets(
232
+ "topic",
233
+ 0 => (Time.at(1505069646, 250_000).to_f * 1000).floor
234
+ )
235
+ end
236
+
237
+ native_list = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
238
+
239
+ expect(native_list).to eq compare_list
240
+ end
222
241
  end
223
242
  end
@@ -1,4 +1,5 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
+
2
3
  require "ostruct"
3
4
  require 'securerandom'
4
5
 
@@ -9,6 +10,10 @@ describe Rdkafka::Consumer do
9
10
  after { consumer.close }
10
11
  after { producer.close }
11
12
 
13
+ describe '#name' do
14
+ it { expect(consumer.name).to include('rdkafka#consumer-') }
15
+ end
16
+
12
17
  describe "#subscribe, #unsubscribe and #subscription" do
13
18
  it "should subscribe, unsubscribe and return the subscription" do
14
19
  expect(consumer.subscription).to be_empty
@@ -49,11 +54,35 @@ describe Rdkafka::Consumer do
49
54
  consumer.subscription
50
55
  }.to raise_error(Rdkafka::RdkafkaError)
51
56
  end
57
+
58
+ context "when using consumer without the poll set" do
59
+ let(:consumer) do
60
+ config = rdkafka_consumer_config
61
+ config.consumer_poll_set = false
62
+ config.consumer
63
+ end
64
+
65
+ it "should subscribe, unsubscribe and return the subscription" do
66
+ expect(consumer.subscription).to be_empty
67
+
68
+ consumer.subscribe("consume_test_topic")
69
+
70
+ expect(consumer.subscription).not_to be_empty
71
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
72
+ list.add_topic("consume_test_topic")
73
+ end
74
+ expect(consumer.subscription).to eq expected_subscription
75
+
76
+ consumer.unsubscribe
77
+
78
+ expect(consumer.subscription).to be_empty
79
+ end
80
+ end
52
81
  end
53
82
 
54
83
  describe "#pause and #resume" do
55
84
  context "subscription" do
56
- let(:timeout) { 1000 }
85
+ let(:timeout) { 2000 }
57
86
 
58
87
  before { consumer.subscribe("consume_test_topic") }
59
88
  after { consumer.unsubscribe }
@@ -268,6 +297,28 @@ describe Rdkafka::Consumer do
268
297
  end
269
298
  end
270
299
 
300
+ describe '#assignment_lost?' do
301
+ it "should not return true as we do have an assignment" do
302
+ consumer.subscribe("consume_test_topic")
303
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
304
+ list.add_topic("consume_test_topic")
305
+ end
306
+
307
+ expect(consumer.assignment_lost?).to eq false
308
+ consumer.unsubscribe
309
+ end
310
+
311
+ it "should not return true after voluntary unsubscribing" do
312
+ consumer.subscribe("consume_test_topic")
313
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
314
+ list.add_topic("consume_test_topic")
315
+ end
316
+
317
+ consumer.unsubscribe
318
+ expect(consumer.assignment_lost?).to eq false
319
+ end
320
+ end
321
+
271
322
  describe "#close" do
272
323
  it "should close a consumer" do
273
324
  consumer.subscribe("consume_test_topic")
@@ -284,10 +335,34 @@ describe Rdkafka::Consumer do
284
335
  consumer.poll(100)
285
336
  }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
286
337
  end
338
+
339
+ context 'when there are outgoing operations in other threads' do
340
+ it 'should wait and not crash' do
341
+ times = []
342
+
343
+ # Run a long running poll
344
+ thread = Thread.new do
345
+ times << Time.now
346
+ consumer.subscribe("empty_test_topic")
347
+ times << Time.now
348
+ consumer.poll(1_000)
349
+ times << Time.now
350
+ end
351
+
352
+ # Make sure it starts before we close
353
+ sleep(0.1)
354
+ consumer.close
355
+ close_time = Time.now
356
+ thread.join
357
+
358
+ times.each { |op_time| expect(op_time).to be < close_time }
359
+ end
360
+ end
287
361
  end
288
362
 
289
- describe "#commit, #committed and #store_offset" do
290
- # Make sure there's a stored offset
363
+
364
+ describe "#position, #commit, #committed and #store_offset" do
365
+ # Make sure there are messages to work with
291
366
  let!(:report) do
292
367
  producer.produce(
293
368
  topic: "consume_test_topic",
@@ -305,29 +380,33 @@ describe Rdkafka::Consumer do
305
380
  )
306
381
  end
307
382
 
308
- it "should only accept a topic partition list in committed" do
309
- expect {
310
- consumer.committed("list")
311
- }.to raise_error TypeError
383
+ describe "#position" do
384
+ it "should only accept a topic partition list in position if not nil" do
385
+ expect {
386
+ consumer.position("list")
387
+ }.to raise_error TypeError
388
+ end
312
389
  end
313
390
 
314
- it "should commit in sync mode" do
315
- expect {
316
- consumer.commit(nil, true)
317
- }.not_to raise_error
318
- end
391
+ describe "#committed" do
392
+ it "should only accept a topic partition list in commit if not nil" do
393
+ expect {
394
+ consumer.commit("list")
395
+ }.to raise_error TypeError
396
+ end
319
397
 
320
- it "should only accept a topic partition list in commit if not nil" do
321
- expect {
322
- consumer.commit("list")
323
- }.to raise_error TypeError
398
+ it "should commit in sync mode" do
399
+ expect {
400
+ consumer.commit(nil, true)
401
+ }.not_to raise_error
402
+ end
324
403
  end
325
404
 
326
405
  context "with a committed consumer" do
327
406
  before :all do
328
407
  # Make sure there are some messages.
329
408
  handles = []
330
- producer = rdkafka_producer_config.producer
409
+ producer = rdkafka_config.producer
331
410
  10.times do
332
411
  (0..2).each do |i|
333
412
  handles << producer.produce(
@@ -371,34 +450,38 @@ describe Rdkafka::Consumer do
371
450
  }.to raise_error(Rdkafka::RdkafkaError)
372
451
  end
373
452
 
374
- it "should fetch the committed offsets for the current assignment" do
375
- partitions = consumer.committed.to_h["consume_test_topic"]
376
- expect(partitions).not_to be_nil
377
- expect(partitions[0].offset).to eq 1
378
- end
453
+ describe "#committed" do
454
+ it "should fetch the committed offsets for the current assignment" do
455
+ partitions = consumer.committed.to_h["consume_test_topic"]
456
+ expect(partitions).not_to be_nil
457
+ expect(partitions[0].offset).to eq 1
458
+ end
379
459
 
380
- it "should fetch the committed offsets for a specified topic partition list" do
381
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
382
- list.add_topic("consume_test_topic", [0, 1, 2])
460
+ it "should fetch the committed offsets for a specified topic partition list" do
461
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
462
+ list.add_topic("consume_test_topic", [0, 1, 2])
463
+ end
464
+ partitions = consumer.committed(list).to_h["consume_test_topic"]
465
+ expect(partitions).not_to be_nil
466
+ expect(partitions[0].offset).to eq 1
467
+ expect(partitions[1].offset).to eq 1
468
+ expect(partitions[2].offset).to eq 1
383
469
  end
384
- partitions = consumer.committed(list).to_h["consume_test_topic"]
385
- expect(partitions).not_to be_nil
386
- expect(partitions[0].offset).to eq 1
387
- expect(partitions[1].offset).to eq 1
388
- expect(partitions[2].offset).to eq 1
389
- end
390
470
 
391
- it "should raise an error when getting committed fails" do
392
- expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
393
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
394
- list.add_topic("consume_test_topic", [0, 1, 2])
471
+ it "should raise an error when getting committed fails" do
472
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
473
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
474
+ list.add_topic("consume_test_topic", [0, 1, 2])
475
+ end
476
+ expect {
477
+ consumer.committed(list)
478
+ }.to raise_error Rdkafka::RdkafkaError
395
479
  end
396
- expect {
397
- consumer.committed(list)
398
- }.to raise_error Rdkafka::RdkafkaError
399
480
  end
400
481
 
401
482
  describe "#store_offset" do
483
+ let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': false).consumer }
484
+
402
485
  before do
403
486
  config = {}
404
487
  config[:'enable.auto.offset.store'] = false
@@ -416,6 +499,8 @@ describe Rdkafka::Consumer do
416
499
  @new_consumer.store_offset(message)
417
500
  @new_consumer.commit
418
501
 
502
+ # TODO use position here, should be at offset
503
+
419
504
  list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
420
505
  list.add_topic("consume_test_topic", [0, 1, 2])
421
506
  end
@@ -430,6 +515,43 @@ describe Rdkafka::Consumer do
430
515
  @new_consumer.store_offset(message)
431
516
  }.to raise_error Rdkafka::RdkafkaError
432
517
  end
518
+
519
+ describe "#position" do
520
+ it "should fetch the positions for the current assignment" do
521
+ consumer.store_offset(message)
522
+
523
+ partitions = consumer.position.to_h["consume_test_topic"]
524
+ expect(partitions).not_to be_nil
525
+ expect(partitions[0].offset).to eq message.offset + 1
526
+ end
527
+
528
+ it "should fetch the positions for a specified assignment" do
529
+ consumer.store_offset(message)
530
+
531
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
532
+ list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => nil, 1 => nil, 2 => nil)
533
+ end
534
+ partitions = consumer.position(list).to_h["consume_test_topic"]
535
+ expect(partitions).not_to be_nil
536
+ expect(partitions[0].offset).to eq message.offset + 1
537
+ end
538
+
539
+ it "should raise an error when getting the position fails" do
540
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_position).and_return(20)
541
+
542
+ expect {
543
+ consumer.position
544
+ }.to raise_error(Rdkafka::RdkafkaError)
545
+ end
546
+ end
547
+
548
+ context "when trying to use with enable.auto.offset.store set to true" do
549
+ let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': true).consumer }
550
+
551
+ it "expect to raise invalid configuration error" do
552
+ expect { consumer.store_offset(message) }.to raise_error(Rdkafka::RdkafkaError, /invalid_arg/)
553
+ end
554
+ end
433
555
  end
434
556
  end
435
557
  end
@@ -593,7 +715,7 @@ describe Rdkafka::Consumer do
593
715
  end
594
716
 
595
717
  describe "#poll with headers" do
596
- it "should return message with headers" do
718
+ it "should return message with headers using string keys (when produced with symbol keys)" do
597
719
  report = producer.produce(
598
720
  topic: "consume_test_topic",
599
721
  key: "key headers",
@@ -603,7 +725,20 @@ describe Rdkafka::Consumer do
603
725
  message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
604
726
  expect(message).to be
605
727
  expect(message.key).to eq('key headers')
606
- expect(message.headers).to include(foo: 'bar')
728
+ expect(message.headers).to include('foo' => 'bar')
729
+ end
730
+
731
+ it "should return message with headers using string keys (when produced with string keys)" do
732
+ report = producer.produce(
733
+ topic: "consume_test_topic",
734
+ key: "key headers",
735
+ headers: { 'foo' => 'bar' }
736
+ ).wait
737
+
738
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
739
+ expect(message).to be
740
+ expect(message.key).to eq('key headers')
741
+ expect(message.headers).to include('foo' => 'bar')
607
742
  end
608
743
 
609
744
  it "should return message with no headers" do
@@ -698,7 +833,7 @@ describe Rdkafka::Consumer do
698
833
  n.times do |i|
699
834
  handles << producer.produce(
700
835
  topic: topic_name,
701
- payload: Time.new.to_f.to_s,
836
+ payload: i % 10 == 0 ? nil : Time.new.to_f.to_s,
702
837
  key: i.to_s,
703
838
  partition: 0
704
839
  )
@@ -723,7 +858,8 @@ describe Rdkafka::Consumer do
723
858
  #
724
859
  # This is, in effect, an integration test and the subsequent specs are
725
860
  # unit tests.
726
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
861
+ admin = rdkafka_config.admin
862
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
727
863
  create_topic_handle.wait(max_wait_timeout: 15.0)
728
864
  consumer.subscribe(topic_name)
729
865
  produce_n 42
@@ -736,6 +872,7 @@ describe Rdkafka::Consumer do
736
872
  expect(all_yields.flatten.size).to eq 42
737
873
  expect(all_yields.size).to be > 4
738
874
  expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
875
+ admin.close
739
876
  end
740
877
 
741
878
  it "should batch poll results and yield arrays of messages" do
@@ -778,13 +915,15 @@ describe Rdkafka::Consumer do
778
915
  end
779
916
 
780
917
  it "should yield [] if nothing is received before the timeout" do
781
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
918
+ admin = rdkafka_config.admin
919
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
782
920
  create_topic_handle.wait(max_wait_timeout: 15.0)
783
921
  consumer.subscribe(topic_name)
784
922
  consumer.each_batch do |batch|
785
923
  expect(batch).to eq([])
786
924
  break
787
925
  end
926
+ admin.close
788
927
  end
789
928
 
790
929
  it "should yield batchs of max_items in size if messages are already fetched" do
@@ -861,6 +1000,7 @@ describe Rdkafka::Consumer do
861
1000
  expect(batches_yielded.first.size).to eq 2
862
1001
  expect(exceptions_yielded.flatten.size).to eq 1
863
1002
  expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
1003
+ consumer.close
864
1004
  end
865
1005
  end
866
1006
 
@@ -902,10 +1042,107 @@ describe Rdkafka::Consumer do
902
1042
  expect(each_batch_iterations).to eq 0
903
1043
  expect(batches_yielded.size).to eq 0
904
1044
  expect(exceptions_yielded.size).to eq 0
1045
+ consumer.close
1046
+ end
1047
+ end
1048
+ end
1049
+
1050
+ describe "#offsets_for_times" do
1051
+ it "should raise when not TopicPartitionList" do
1052
+ expect { consumer.offsets_for_times([]) }.to raise_error(TypeError)
1053
+ end
1054
+
1055
+ it "should raise an error when offsets_for_times fails" do
1056
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
1057
+
1058
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_offsets_for_times).and_return(7)
1059
+
1060
+ expect { consumer.offsets_for_times(tpl) }.to raise_error(Rdkafka::RdkafkaError)
1061
+ end
1062
+
1063
+ context "when subscribed" do
1064
+ let(:timeout) { 1000 }
1065
+
1066
+ before do
1067
+ consumer.subscribe("consume_test_topic")
1068
+
1069
+ # 1. partitions are assigned
1070
+ wait_for_assignment(consumer)
1071
+ expect(consumer.assignment).not_to be_empty
1072
+
1073
+ # 2. eat unrelated messages
1074
+ while(consumer.poll(timeout)) do; end
1075
+ end
1076
+
1077
+ after { consumer.unsubscribe }
1078
+
1079
+ def send_one_message(val)
1080
+ producer.produce(
1081
+ topic: "consume_test_topic",
1082
+ payload: "payload #{val}",
1083
+ key: "key 0",
1084
+ partition: 0
1085
+ ).wait
1086
+ end
1087
+
1088
+ it "returns a TopicParticionList with updated offsets" do
1089
+ send_one_message("a")
1090
+ send_one_message("b")
1091
+ send_one_message("c")
1092
+
1093
+ consumer.poll(timeout)
1094
+ message = consumer.poll(timeout)
1095
+ consumer.poll(timeout)
1096
+
1097
+ tpl = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
1098
+ list.add_topic_and_partitions_with_offsets(
1099
+ "consume_test_topic",
1100
+ [
1101
+ [0, message.timestamp]
1102
+ ]
1103
+ )
1104
+ end
1105
+
1106
+ tpl_response = consumer.offsets_for_times(tpl)
1107
+
1108
+ expect(tpl_response.to_h["consume_test_topic"][0].offset).to eq message.offset
905
1109
  end
906
1110
  end
907
1111
  end
908
1112
 
1113
+ # Only relevant in case of a consumer with separate queues
1114
+ describe '#events_poll' do
1115
+ let(:stats) { [] }
1116
+
1117
+ before { Rdkafka::Config.statistics_callback = ->(published) { stats << published } }
1118
+
1119
+ after { Rdkafka::Config.statistics_callback = nil }
1120
+
1121
+ let(:consumer) do
1122
+ config = rdkafka_consumer_config('statistics.interval.ms': 100)
1123
+ config.consumer_poll_set = false
1124
+ config.consumer
1125
+ end
1126
+
1127
+ it "expect to run events_poll, operate and propagate stats on events_poll and not poll" do
1128
+ consumer.subscribe("consume_test_topic")
1129
+ consumer.poll(1_000)
1130
+ expect(stats).to be_empty
1131
+ consumer.events_poll(-1)
1132
+ expect(stats).not_to be_empty
1133
+ end
1134
+ end
1135
+
1136
+ describe '#consumer_group_metadata_pointer' do
1137
+ let(:pointer) { consumer.consumer_group_metadata_pointer }
1138
+
1139
+ after { Rdkafka::Bindings.rd_kafka_consumer_group_metadata_destroy(pointer) }
1140
+
1141
+ it 'expect to return a pointer' do
1142
+ expect(pointer).to be_a(FFI::Pointer)
1143
+ end
1144
+ end
1145
+
909
1146
  describe "a rebalance listener" do
910
1147
  let(:consumer) do
911
1148
  config = rdkafka_consumer_config
@@ -916,11 +1153,11 @@ describe Rdkafka::Consumer do
916
1153
  context "with a working listener" do
917
1154
  let(:listener) do
918
1155
  Struct.new(:queue) do
919
- def on_partitions_assigned(consumer, list)
1156
+ def on_partitions_assigned(list)
920
1157
  collect(:assign, list)
921
1158
  end
922
1159
 
923
- def on_partitions_revoked(consumer, list)
1160
+ def on_partitions_revoked(list)
924
1161
  collect(:revoke, list)
925
1162
  end
926
1163
 
@@ -944,12 +1181,12 @@ describe Rdkafka::Consumer do
944
1181
  context "with a broken listener" do
945
1182
  let(:listener) do
946
1183
  Struct.new(:queue) do
947
- def on_partitions_assigned(consumer, list)
1184
+ def on_partitions_assigned(list)
948
1185
  queue << :assigned
949
1186
  raise 'boom'
950
1187
  end
951
1188
 
952
- def on_partitions_revoked(consumer, list)
1189
+ def on_partitions_revoked(list)
953
1190
  queue << :revoked
954
1191
  raise 'boom'
955
1192
  end
@@ -962,18 +1199,6 @@ describe Rdkafka::Consumer do
962
1199
  expect(listener.queue).to eq([:assigned, :revoked])
963
1200
  end
964
1201
  end
965
-
966
- def notify_listener(listener)
967
- # 1. subscribe and poll
968
- consumer.subscribe("consume_test_topic")
969
- wait_for_assignment(consumer)
970
- consumer.poll(100)
971
-
972
- # 2. unsubscribe
973
- consumer.unsubscribe
974
- wait_for_unassignment(consumer)
975
- consumer.close
976
- end
977
1202
  end
978
1203
 
979
1204
  context "methods that should not be called after a consumer has been closed" do
@@ -992,7 +1217,7 @@ describe Rdkafka::Consumer do
992
1217
  :assign => [ nil ],
993
1218
  :assignment => nil,
994
1219
  :committed => [],
995
- :query_watermark_offsets => [ nil, nil ],
1220
+ :query_watermark_offsets => [ nil, nil ]
996
1221
  }.each do |method, args|
997
1222
  it "raises an exception if #{method} is called" do
998
1223
  expect {
@@ -1005,4 +1230,70 @@ describe Rdkafka::Consumer do
1005
1230
  end
1006
1231
  end
1007
1232
  end
1233
+
1234
+ it "provides a finalizer that closes the native kafka client" do
1235
+ expect(consumer.closed?).to eq(false)
1236
+
1237
+ consumer.finalizer.call("some-ignored-object-id")
1238
+
1239
+ expect(consumer.closed?).to eq(true)
1240
+ end
1241
+
1242
+ context "when the rebalance protocol is cooperative" do
1243
+ let(:consumer) do
1244
+ config = rdkafka_consumer_config(
1245
+ {
1246
+ :"partition.assignment.strategy" => "cooperative-sticky",
1247
+ :"debug" => "consumer",
1248
+ }
1249
+ )
1250
+ config.consumer_rebalance_listener = listener
1251
+ config.consumer
1252
+ end
1253
+
1254
+ let(:listener) do
1255
+ Struct.new(:queue) do
1256
+ def on_partitions_assigned(list)
1257
+ collect(:assign, list)
1258
+ end
1259
+
1260
+ def on_partitions_revoked(list)
1261
+ collect(:revoke, list)
1262
+ end
1263
+
1264
+ def collect(name, list)
1265
+ partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
1266
+ queue << ([name] + partitions)
1267
+ end
1268
+ end.new([])
1269
+ end
1270
+
1271
+ it "should be able to assign and unassign partitions using the cooperative partition assignment APIs" do
1272
+ notify_listener(listener) do
1273
+ handles = []
1274
+ 10.times do
1275
+ handles << producer.produce(
1276
+ topic: "consume_test_topic",
1277
+ payload: "payload 1",
1278
+ key: "key 1",
1279
+ partition: 0
1280
+ )
1281
+ end
1282
+ handles.each(&:wait)
1283
+
1284
+ consumer.subscribe("consume_test_topic")
1285
+ # Check the first 10 messages. Then close the consumer, which
1286
+ # should break the each loop.
1287
+ consumer.each_with_index do |message, i|
1288
+ expect(message).to be_a Rdkafka::Consumer::Message
1289
+ break if i == 10
1290
+ end
1291
+ end
1292
+
1293
+ expect(listener.queue).to eq([
1294
+ [:assign, "consume_test_topic", 0, 1, 2],
1295
+ [:revoke, "consume_test_topic", 0, 1, 2]
1296
+ ])
1297
+ end
1298
+ end
1008
1299
  end