rdkafka 0.6.0 → 0.9.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +23 -0
  3. data/CHANGELOG.md +27 -0
  4. data/README.md +9 -9
  5. data/docker-compose.yml +17 -11
  6. data/ext/README.md +10 -15
  7. data/ext/Rakefile +24 -3
  8. data/lib/rdkafka.rb +8 -0
  9. data/lib/rdkafka/abstract_handle.rb +82 -0
  10. data/lib/rdkafka/admin.rb +155 -0
  11. data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
  12. data/lib/rdkafka/admin/create_topic_report.rb +22 -0
  13. data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
  14. data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
  15. data/lib/rdkafka/bindings.rb +64 -18
  16. data/lib/rdkafka/callbacks.rb +106 -0
  17. data/lib/rdkafka/config.rb +38 -9
  18. data/lib/rdkafka/consumer.rb +221 -46
  19. data/lib/rdkafka/consumer/headers.rb +7 -5
  20. data/lib/rdkafka/consumer/partition.rb +1 -1
  21. data/lib/rdkafka/consumer/topic_partition_list.rb +6 -16
  22. data/lib/rdkafka/error.rb +35 -4
  23. data/lib/rdkafka/metadata.rb +92 -0
  24. data/lib/rdkafka/producer.rb +50 -24
  25. data/lib/rdkafka/producer/delivery_handle.rb +7 -49
  26. data/lib/rdkafka/producer/delivery_report.rb +7 -2
  27. data/lib/rdkafka/version.rb +3 -3
  28. data/rdkafka.gemspec +3 -3
  29. data/spec/rdkafka/abstract_handle_spec.rb +114 -0
  30. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  31. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  32. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  33. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  34. data/spec/rdkafka/admin_spec.rb +203 -0
  35. data/spec/rdkafka/bindings_spec.rb +28 -10
  36. data/spec/rdkafka/callbacks_spec.rb +20 -0
  37. data/spec/rdkafka/config_spec.rb +51 -9
  38. data/spec/rdkafka/consumer/message_spec.rb +6 -1
  39. data/spec/rdkafka/consumer_spec.rb +287 -20
  40. data/spec/rdkafka/error_spec.rb +7 -3
  41. data/spec/rdkafka/metadata_spec.rb +78 -0
  42. data/spec/rdkafka/producer/delivery_handle_spec.rb +3 -43
  43. data/spec/rdkafka/producer/delivery_report_spec.rb +5 -1
  44. data/spec/rdkafka/producer_spec.rb +220 -100
  45. data/spec/spec_helper.rb +34 -6
  46. metadata +37 -13
  47. data/.travis.yml +0 -34
@@ -1,4 +1,5 @@
1
1
  require "spec_helper"
2
+ require 'zlib'
2
3
 
3
4
  describe Rdkafka::Bindings do
4
5
  it "should load librdkafka" do
@@ -7,12 +8,12 @@ describe Rdkafka::Bindings do
7
8
 
8
9
  describe ".lib_extension" do
9
10
  it "should know the lib extension for darwin" do
10
- expect(Gem::Platform.local).to receive(:os).and_return("darwin-aaa")
11
+ stub_const('RbConfig::CONFIG', 'host_os' =>'darwin')
11
12
  expect(Rdkafka::Bindings.lib_extension).to eq "dylib"
12
13
  end
13
14
 
14
15
  it "should know the lib extension for linux" do
15
- expect(Gem::Platform.local).to receive(:os).and_return("linux")
16
+ stub_const('RbConfig::CONFIG', 'host_os' =>'linux')
16
17
  expect(Rdkafka::Bindings.lib_extension).to eq "so"
17
18
  end
18
19
  end
@@ -24,39 +25,56 @@ describe Rdkafka::Bindings do
24
25
  end
25
26
 
26
27
  describe "log callback" do
27
- let(:log) { StringIO.new }
28
+ let(:log_queue) { Rdkafka::Config.log_queue }
28
29
  before do
29
- Rdkafka::Config.logger = Logger.new(log)
30
+ allow(log_queue).to receive(:<<)
30
31
  end
31
32
 
32
33
  it "should log fatal messages" do
33
34
  Rdkafka::Bindings::LogCallback.call(nil, 0, nil, "log line")
34
- expect(log.string).to include "FATAL -- : rdkafka: log line"
35
+ expect(log_queue).to have_received(:<<).with([Logger::FATAL, "rdkafka: log line"])
35
36
  end
36
37
 
37
38
  it "should log error messages" do
38
39
  Rdkafka::Bindings::LogCallback.call(nil, 3, nil, "log line")
39
- expect(log.string).to include "ERROR -- : rdkafka: log line"
40
+ expect(log_queue).to have_received(:<<).with([Logger::ERROR, "rdkafka: log line"])
40
41
  end
41
42
 
42
43
  it "should log warning messages" do
43
44
  Rdkafka::Bindings::LogCallback.call(nil, 4, nil, "log line")
44
- expect(log.string).to include "WARN -- : rdkafka: log line"
45
+ expect(log_queue).to have_received(:<<).with([Logger::WARN, "rdkafka: log line"])
45
46
  end
46
47
 
47
48
  it "should log info messages" do
48
49
  Rdkafka::Bindings::LogCallback.call(nil, 5, nil, "log line")
49
- expect(log.string).to include "INFO -- : rdkafka: log line"
50
+ expect(log_queue).to have_received(:<<).with([Logger::INFO, "rdkafka: log line"])
50
51
  end
51
52
 
52
53
  it "should log debug messages" do
53
54
  Rdkafka::Bindings::LogCallback.call(nil, 7, nil, "log line")
54
- expect(log.string).to include "DEBUG -- : rdkafka: log line"
55
+ expect(log_queue).to have_received(:<<).with([Logger::DEBUG, "rdkafka: log line"])
55
56
  end
56
57
 
57
58
  it "should log unknown messages" do
58
59
  Rdkafka::Bindings::LogCallback.call(nil, 100, nil, "log line")
59
- expect(log.string).to include "ANY -- : rdkafka: log line"
60
+ expect(log_queue).to have_received(:<<).with([Logger::UNKNOWN, "rdkafka: log line"])
61
+ end
62
+ end
63
+
64
+ describe "partitioner" do
65
+ let(:partition_key) { ('a'..'z').to_a.shuffle.take(15).join('') }
66
+ let(:partition_count) { rand(50) + 1 }
67
+
68
+ it "should return the same partition for a similar string and the same partition count" do
69
+ result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
70
+ result_2 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
71
+ expect(result_1).to eq(result_2)
72
+ end
73
+
74
+ it "should match the old partitioner" do
75
+ result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
76
+ result_2 = (Zlib.crc32(partition_key) % partition_count)
77
+ expect(result_1).to eq(result_2)
60
78
  end
61
79
  end
62
80
 
@@ -0,0 +1,20 @@
1
+ require "spec_helper"
2
+
3
+ describe Rdkafka::Callbacks do
4
+
5
+ # The code in the call back functions is 100% covered by other specs. Due to
6
+ # the large number of collaborators, and the fact that FFI does not play
7
+ # nicely with doubles, it was very difficult to construct tests that were
8
+ # not over-mocked.
9
+
10
+ # For debugging purposes, if you suspect that you are running into trouble in
11
+ # one of the callback functions, it may be helpful to surround the inner body
12
+ # of the method with something like:
13
+ #
14
+ # begin
15
+ # <method body>
16
+ # rescue => ex; puts ex.inspect; puts ex.backtrace; end;
17
+ #
18
+ # This will output to STDOUT any exceptions that are being raised in the callback.
19
+
20
+ end
@@ -18,19 +18,46 @@ describe Rdkafka::Config do
18
18
  Rdkafka::Config.logger = nil
19
19
  }.to raise_error(Rdkafka::Config::NoLoggerError)
20
20
  end
21
+
22
+ it "supports logging queue" do
23
+ log = StringIO.new
24
+ Rdkafka::Config.logger = Logger.new(log)
25
+
26
+ Rdkafka::Config.log_queue << [Logger::FATAL, "I love testing"]
27
+ 20.times do
28
+ break if log.string != ""
29
+ sleep 0.05
30
+ end
31
+
32
+ expect(log.string).to include "FATAL -- : I love testing"
33
+ end
21
34
  end
22
35
 
23
36
  context "statistics callback" do
24
- it "should set the callback" do
25
- expect {
26
- Rdkafka::Config.statistics_callback = lambda do |stats|
27
- puts stats
37
+ context "with a proc/lambda" do
38
+ it "should set the callback" do
39
+ expect {
40
+ Rdkafka::Config.statistics_callback = lambda do |stats|
41
+ puts stats
42
+ end
43
+ }.not_to raise_error
44
+ expect(Rdkafka::Config.statistics_callback).to respond_to :call
45
+ end
46
+ end
47
+
48
+ context "with a callable object" do
49
+ it "should set the callback" do
50
+ callback = Class.new do
51
+ def call(stats); end
28
52
  end
29
- }.not_to raise_error
30
- expect(Rdkafka::Config.statistics_callback).to be_a Proc
53
+ expect {
54
+ Rdkafka::Config.statistics_callback = callback.new
55
+ }.not_to raise_error
56
+ expect(Rdkafka::Config.statistics_callback).to respond_to :call
57
+ end
31
58
  end
32
59
 
33
- it "should not accept a callback that's not a proc" do
60
+ it "should not accept a callback that's not callable" do
34
61
  expect {
35
62
  Rdkafka::Config.statistics_callback = 'a string'
36
63
  }.to raise_error(TypeError)
@@ -50,7 +77,9 @@ describe Rdkafka::Config do
50
77
  end
51
78
 
52
79
  it "should create a consumer with valid config" do
53
- expect(rdkafka_config.consumer).to be_a Rdkafka::Consumer
80
+ consumer = rdkafka_config.consumer
81
+ expect(consumer).to be_a Rdkafka::Consumer
82
+ consumer.close
54
83
  end
55
84
 
56
85
  it "should raise an error when creating a consumer with invalid config" do
@@ -76,7 +105,9 @@ describe Rdkafka::Config do
76
105
  end
77
106
 
78
107
  it "should create a producer with valid config" do
79
- expect(rdkafka_config.producer).to be_a Rdkafka::Producer
108
+ producer = rdkafka_config.producer
109
+ expect(producer).to be_a Rdkafka::Producer
110
+ producer.close
80
111
  end
81
112
 
82
113
  it "should raise an error when creating a producer with invalid config" do
@@ -86,6 +117,17 @@ describe Rdkafka::Config do
86
117
  }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"invalid.key\"")
87
118
  end
88
119
 
120
+ it "should allow configuring zstd compression" do
121
+ config = Rdkafka::Config.new('compression.codec' => 'zstd')
122
+ begin
123
+ expect(config.producer).to be_a Rdkafka::Producer
124
+ config.producer.close
125
+ rescue Rdkafka::Config::ConfigError => ex
126
+ pending "Zstd compression not supported on this machine"
127
+ raise ex
128
+ end
129
+ end
130
+
89
131
  it "should raise an error when client creation fails for a consumer" do
90
132
  config = Rdkafka::Config.new(
91
133
  "security.protocol" => "SSL",
@@ -1,7 +1,8 @@
1
1
  require "spec_helper"
2
2
 
3
3
  describe Rdkafka::Consumer::Message do
4
- let(:native_topic) { new_native_topic }
4
+ let(:native_client) { new_native_client }
5
+ let(:native_topic) { new_native_topic(native_client: native_client) }
5
6
  let(:payload) { nil }
6
7
  let(:key) { nil }
7
8
  let(:native_message) do
@@ -24,6 +25,10 @@ describe Rdkafka::Consumer::Message do
24
25
  end
25
26
  end
26
27
 
28
+ after(:each) do
29
+ Rdkafka::Bindings.rd_kafka_destroy(native_client)
30
+ end
31
+
27
32
  subject { Rdkafka::Consumer::Message.new(native_message) }
28
33
 
29
34
  before do
@@ -1,12 +1,16 @@
1
1
  require "spec_helper"
2
2
  require "ostruct"
3
+ require 'securerandom'
3
4
 
4
5
  describe Rdkafka::Consumer do
5
6
  let(:config) { rdkafka_config }
6
7
  let(:consumer) { config.consumer }
7
8
  let(:producer) { config.producer }
8
9
 
9
- describe "#subscripe, #unsubscribe and #subscription" do
10
+ after { consumer.close }
11
+ after { producer.close }
12
+
13
+ describe "#subscribe, #unsubscribe and #subscription" do
10
14
  it "should subscribe, unsubscribe and return the subscription" do
11
15
  expect(consumer.subscription).to be_empty
12
16
 
@@ -85,10 +89,9 @@ describe Rdkafka::Consumer do
85
89
  tpl.add_topic("consume_test_topic", (0..2))
86
90
  consumer.resume(tpl)
87
91
 
88
- # 8. ensure that message is successfuly consumed
92
+ # 8. ensure that message is successfully consumed
89
93
  records = consumer.poll(timeout)
90
94
  expect(records).not_to be_nil
91
- consumer.commit
92
95
  end
93
96
  end
94
97
 
@@ -205,8 +208,6 @@ describe Rdkafka::Consumer do
205
208
  expect(records&.payload).to eq "payload c"
206
209
  records = consumer.poll(timeout)
207
210
  expect(records).to be_nil
208
-
209
- consumer.commit
210
211
  end
211
212
  end
212
213
  end
@@ -271,8 +272,18 @@ describe Rdkafka::Consumer do
271
272
  describe "#close" do
272
273
  it "should close a consumer" do
273
274
  consumer.subscribe("consume_test_topic")
275
+ 100.times do |i|
276
+ report = producer.produce(
277
+ topic: "consume_test_topic",
278
+ payload: "payload #{i}",
279
+ key: "key #{i}",
280
+ partition: 0
281
+ ).wait
282
+ end
274
283
  consumer.close
275
- expect(consumer.poll(100)).to be_nil
284
+ expect {
285
+ consumer.poll(100)
286
+ }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
276
287
  end
277
288
  end
278
289
 
@@ -313,11 +324,11 @@ describe Rdkafka::Consumer do
313
324
  }.to raise_error TypeError
314
325
  end
315
326
 
316
- context "with a commited consumer" do
327
+ context "with a committed consumer" do
317
328
  before :all do
318
- # Make sure there are some message
319
- producer = rdkafka_config.producer
329
+ # Make sure there are some messages.
320
330
  handles = []
331
+ producer = rdkafka_config.producer
321
332
  10.times do
322
333
  (0..2).each do |i|
323
334
  handles << producer.produce(
@@ -329,6 +340,7 @@ describe Rdkafka::Consumer do
329
340
  end
330
341
  end
331
342
  handles.each(&:wait)
343
+ producer.close
332
344
  end
333
345
 
334
346
  before do
@@ -389,20 +401,26 @@ describe Rdkafka::Consumer do
389
401
 
390
402
  describe "#store_offset" do
391
403
  before do
404
+ config = {}
392
405
  config[:'enable.auto.offset.store'] = false
393
406
  config[:'enable.auto.commit'] = false
394
- consumer.subscribe("consume_test_topic")
395
- wait_for_assignment(consumer)
407
+ @new_consumer = rdkafka_config(config).consumer
408
+ @new_consumer.subscribe("consume_test_topic")
409
+ wait_for_assignment(@new_consumer)
410
+ end
411
+
412
+ after do
413
+ @new_consumer.close
396
414
  end
397
415
 
398
416
  it "should store the offset for a message" do
399
- consumer.store_offset(message)
400
- consumer.commit
417
+ @new_consumer.store_offset(message)
418
+ @new_consumer.commit
401
419
 
402
420
  list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
403
421
  list.add_topic("consume_test_topic", [0, 1, 2])
404
422
  end
405
- partitions = consumer.committed(list).to_h["consume_test_topic"]
423
+ partitions = @new_consumer.committed(list).to_h["consume_test_topic"]
406
424
  expect(partitions).not_to be_nil
407
425
  expect(partitions[message.partition].offset).to eq(message.offset + 1)
408
426
  end
@@ -410,7 +428,7 @@ describe Rdkafka::Consumer do
410
428
  it "should raise an error with invalid input" do
411
429
  allow(message).to receive(:partition).and_return(9999)
412
430
  expect {
413
- consumer.store_offset(message)
431
+ @new_consumer.store_offset(message)
414
432
  }.to raise_error Rdkafka::RdkafkaError
415
433
  end
416
434
  end
@@ -554,12 +572,12 @@ describe Rdkafka::Consumer do
554
572
  payload: "payload 1",
555
573
  key: "key 1"
556
574
  ).wait
557
-
558
575
  consumer.subscribe("consume_test_topic")
559
- message = consumer.poll(5000)
560
- expect(message).to be_a Rdkafka::Consumer::Message
576
+ message = consumer.each {|m| break m}
561
577
 
562
- # Message content is tested in producer spec
578
+ expect(message).to be_a Rdkafka::Consumer::Message
579
+ expect(message.payload).to eq('payload 1')
580
+ expect(message.key).to eq('key 1')
563
581
  end
564
582
 
565
583
  it "should raise an error when polling fails" do
@@ -655,7 +673,226 @@ describe Rdkafka::Consumer do
655
673
  # should break the each loop.
656
674
  consumer.each_with_index do |message, i|
657
675
  expect(message).to be_a Rdkafka::Consumer::Message
658
- consumer.close if i == 10
676
+ break if i == 10
677
+ end
678
+ consumer.close
679
+ end
680
+ end
681
+
682
+ describe "#each_batch" do
683
+ let(:message_payload) { 'a' * 10 }
684
+
685
+ before do
686
+ @topic = SecureRandom.base64(10).tr('+=/', '')
687
+ end
688
+
689
+ after do
690
+ @topic = nil
691
+ end
692
+
693
+ def topic_name
694
+ @topic
695
+ end
696
+
697
+ def produce_n(n)
698
+ handles = []
699
+ n.times do |i|
700
+ handles << producer.produce(
701
+ topic: topic_name,
702
+ payload: Time.new.to_f.to_s,
703
+ key: i.to_s,
704
+ partition: 0
705
+ )
706
+ end
707
+ handles.each(&:wait)
708
+ end
709
+
710
+ def new_message
711
+ instance_double("Rdkafka::Consumer::Message").tap do |message|
712
+ allow(message).to receive(:payload).and_return(message_payload)
713
+ end
714
+ end
715
+
716
+ it "retrieves messages produced into a topic" do
717
+ # This is the only each_batch test that actually produces real messages
718
+ # into a topic in the real kafka of the container.
719
+ #
720
+ # The other tests stub 'poll' which makes them faster and more reliable,
721
+ # but it makes sense to keep a single test with a fully integrated flow.
722
+ # This will help to catch breaking changes in the behavior of 'poll',
723
+ # libdrkafka, or Kafka.
724
+ #
725
+ # This is, in effect, an integration test and the subsequent specs are
726
+ # unit tests.
727
+ consumer.subscribe(topic_name)
728
+ produce_n 42
729
+ all_yields = []
730
+ consumer.each_batch(max_items: 10) do |batch|
731
+ all_yields << batch
732
+ break if all_yields.flatten.size >= 42
733
+ end
734
+ expect(all_yields.flatten.first).to be_a Rdkafka::Consumer::Message
735
+ expect(all_yields.flatten.size).to eq 42
736
+ expect(all_yields.size).to be > 4
737
+ expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
738
+ end
739
+
740
+ it "should batch poll results and yield arrays of messages" do
741
+ consumer.subscribe(topic_name)
742
+ all_yields = []
743
+ expect(consumer)
744
+ .to receive(:poll)
745
+ .exactly(10).times
746
+ .and_return(new_message)
747
+ consumer.each_batch(max_items: 10) do |batch|
748
+ all_yields << batch
749
+ break if all_yields.flatten.size >= 10
750
+ end
751
+ expect(all_yields.first).to be_instance_of(Array)
752
+ expect(all_yields.flatten.size).to eq 10
753
+ non_empty_yields = all_yields.reject { |batch| batch.empty? }
754
+ expect(non_empty_yields.size).to be < 10
755
+ end
756
+
757
+ it "should yield a partial batch if the timeout is hit with some messages" do
758
+ consumer.subscribe(topic_name)
759
+ poll_count = 0
760
+ expect(consumer)
761
+ .to receive(:poll)
762
+ .at_least(3).times do
763
+ poll_count = poll_count + 1
764
+ if poll_count > 2
765
+ sleep 0.1
766
+ nil
767
+ else
768
+ new_message
769
+ end
770
+ end
771
+ all_yields = []
772
+ consumer.each_batch(max_items: 10) do |batch|
773
+ all_yields << batch
774
+ break if all_yields.flatten.size >= 2
775
+ end
776
+ expect(all_yields.flatten.size).to eq 2
777
+ end
778
+
779
+ it "should yield [] if nothing is received before the timeout" do
780
+ consumer.subscribe(topic_name)
781
+ consumer.each_batch do |batch|
782
+ expect(batch).to eq([])
783
+ break
784
+ end
785
+ end
786
+
787
+ it "should yield batchs of max_items in size if messages are already fetched" do
788
+ yielded_batches = []
789
+ expect(consumer)
790
+ .to receive(:poll)
791
+ .with(anything)
792
+ .exactly(20).times
793
+ .and_return(new_message)
794
+
795
+ consumer.each_batch(max_items: 10, timeout_ms: 500) do |batch|
796
+ yielded_batches << batch
797
+ break if yielded_batches.flatten.size >= 20
798
+ break if yielded_batches.size >= 20 # so failure doesn't hang
799
+ end
800
+ expect(yielded_batches.size).to eq 2
801
+ expect(yielded_batches.map(&:size)).to eq 2.times.map { 10 }
802
+ end
803
+
804
+ it "should yield batchs as soon as bytes_threshold is hit" do
805
+ yielded_batches = []
806
+ expect(consumer)
807
+ .to receive(:poll)
808
+ .with(anything)
809
+ .exactly(20).times
810
+ .and_return(new_message)
811
+
812
+ consumer.each_batch(bytes_threshold: message_payload.size * 4, timeout_ms: 500) do |batch|
813
+ yielded_batches << batch
814
+ break if yielded_batches.flatten.size >= 20
815
+ break if yielded_batches.size >= 20 # so failure doesn't hang
816
+ end
817
+ expect(yielded_batches.size).to eq 5
818
+ expect(yielded_batches.map(&:size)).to eq 5.times.map { 4 }
819
+ end
820
+
821
+ context "error raised from poll and yield_on_error is true" do
822
+ it "should yield buffered exceptions on rebalance, then break" do
823
+ config = rdkafka_config({:"enable.auto.commit" => false,
824
+ :"enable.auto.offset.store" => false })
825
+ consumer = config.consumer
826
+ consumer.subscribe(topic_name)
827
+ loop_count = 0
828
+ batches_yielded = []
829
+ exceptions_yielded = []
830
+ each_batch_iterations = 0
831
+ poll_count = 0
832
+ expect(consumer)
833
+ .to receive(:poll)
834
+ .with(anything)
835
+ .exactly(3).times
836
+ .and_wrap_original do |method, *args|
837
+ poll_count = poll_count + 1
838
+ if poll_count == 3
839
+ raise Rdkafka::RdkafkaError.new(27,
840
+ "partitions ... too ... heavy ... must ... rebalance")
841
+ else
842
+ new_message
843
+ end
844
+ end
845
+ expect {
846
+ consumer.each_batch(max_items: 30, yield_on_error: true) do |batch, pending_error|
847
+ batches_yielded << batch
848
+ exceptions_yielded << pending_error
849
+ each_batch_iterations = each_batch_iterations + 1
850
+ end
851
+ }.to raise_error(Rdkafka::RdkafkaError)
852
+ expect(poll_count).to eq 3
853
+ expect(each_batch_iterations).to eq 1
854
+ expect(batches_yielded.size).to eq 1
855
+ expect(batches_yielded.first.size).to eq 2
856
+ expect(exceptions_yielded.flatten.size).to eq 1
857
+ expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
858
+ end
859
+ end
860
+
861
+ context "error raised from poll and yield_on_error is false" do
862
+ it "should yield buffered exceptions on rebalance, then break" do
863
+ config = rdkafka_config({:"enable.auto.commit" => false,
864
+ :"enable.auto.offset.store" => false })
865
+ consumer = config.consumer
866
+ consumer.subscribe(topic_name)
867
+ loop_count = 0
868
+ batches_yielded = []
869
+ exceptions_yielded = []
870
+ each_batch_iterations = 0
871
+ poll_count = 0
872
+ expect(consumer)
873
+ .to receive(:poll)
874
+ .with(anything)
875
+ .exactly(3).times
876
+ .and_wrap_original do |method, *args|
877
+ poll_count = poll_count + 1
878
+ if poll_count == 3
879
+ raise Rdkafka::RdkafkaError.new(27,
880
+ "partitions ... too ... heavy ... must ... rebalance")
881
+ else
882
+ new_message
883
+ end
884
+ end
885
+ expect {
886
+ consumer.each_batch(max_items: 30, yield_on_error: false) do |batch, pending_error|
887
+ batches_yielded << batch
888
+ exceptions_yielded << pending_error
889
+ each_batch_iterations = each_batch_iterations + 1
890
+ end
891
+ }.to raise_error(Rdkafka::RdkafkaError)
892
+ expect(poll_count).to eq 3
893
+ expect(each_batch_iterations).to eq 0
894
+ expect(batches_yielded.size).to eq 0
895
+ expect(exceptions_yielded.size).to eq 0
659
896
  end
660
897
  end
661
898
  end
@@ -716,4 +953,34 @@ describe Rdkafka::Consumer do
716
953
  consumer.close
717
954
  end
718
955
  end
956
+
957
+ context "methods that should not be called after a consumer has been closed" do
958
+ before do
959
+ consumer.close
960
+ end
961
+
962
+ # Affected methods and a non-invalid set of parameters for the method
963
+ {
964
+ :subscribe => [ nil ],
965
+ :unsubscribe => nil,
966
+ :each_batch => nil,
967
+ :pause => [ nil ],
968
+ :resume => [ nil ],
969
+ :subscription => nil,
970
+ :assign => [ nil ],
971
+ :assignment => nil,
972
+ :committed => [],
973
+ :query_watermark_offsets => [ nil, nil ],
974
+ }.each do |method, args|
975
+ it "raises an exception if #{method} is called" do
976
+ expect {
977
+ if args.nil?
978
+ consumer.public_send(method)
979
+ else
980
+ consumer.public_send(method, *args)
981
+ end
982
+ }.to raise_exception(Rdkafka::ClosedConsumerError, /#{method.to_s}/)
983
+ end
984
+ end
985
+ end
719
986
  end