rdkafka 0.8.0 → 0.11.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/.rspec +1 -0
  3. data/.semaphore/semaphore.yml +23 -0
  4. data/CHANGELOG.md +24 -1
  5. data/Guardfile +19 -0
  6. data/README.md +8 -3
  7. data/bin/console +11 -0
  8. data/docker-compose.yml +5 -3
  9. data/ext/README.md +8 -1
  10. data/ext/Rakefile +5 -20
  11. data/lib/rdkafka/abstract_handle.rb +82 -0
  12. data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
  13. data/lib/rdkafka/admin/create_topic_report.rb +22 -0
  14. data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
  15. data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
  16. data/lib/rdkafka/admin.rb +155 -0
  17. data/lib/rdkafka/bindings.rb +57 -18
  18. data/lib/rdkafka/callbacks.rb +106 -0
  19. data/lib/rdkafka/config.rb +59 -3
  20. data/lib/rdkafka/consumer.rb +125 -5
  21. data/lib/rdkafka/error.rb +29 -3
  22. data/lib/rdkafka/metadata.rb +6 -5
  23. data/lib/rdkafka/producer/delivery_handle.rb +7 -53
  24. data/lib/rdkafka/producer/delivery_report.rb +1 -1
  25. data/lib/rdkafka/producer.rb +27 -12
  26. data/lib/rdkafka/version.rb +3 -3
  27. data/lib/rdkafka.rb +7 -0
  28. data/rdkafka.gemspec +9 -7
  29. data/spec/rdkafka/abstract_handle_spec.rb +113 -0
  30. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  31. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  32. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  33. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  34. data/spec/rdkafka/admin_spec.rb +203 -0
  35. data/spec/rdkafka/bindings_spec.rb +32 -8
  36. data/spec/rdkafka/callbacks_spec.rb +20 -0
  37. data/spec/rdkafka/config_spec.rb +78 -9
  38. data/spec/rdkafka/consumer_spec.rb +326 -42
  39. data/spec/rdkafka/error_spec.rb +4 -0
  40. data/spec/rdkafka/metadata_spec.rb +78 -0
  41. data/spec/rdkafka/producer/delivery_handle_spec.rb +1 -41
  42. data/spec/rdkafka/producer_spec.rb +102 -34
  43. data/spec/spec_helper.rb +78 -20
  44. metadata +84 -29
  45. data/.travis.yml +0 -48
@@ -25,39 +25,39 @@ describe Rdkafka::Bindings do
25
25
  end
26
26
 
27
27
  describe "log callback" do
28
- let(:log) { StringIO.new }
28
+ let(:log_queue) { Rdkafka::Config.log_queue }
29
29
  before do
30
- Rdkafka::Config.logger = Logger.new(log)
30
+ allow(log_queue).to receive(:<<)
31
31
  end
32
32
 
33
33
  it "should log fatal messages" do
34
34
  Rdkafka::Bindings::LogCallback.call(nil, 0, nil, "log line")
35
- expect(log.string).to include "FATAL -- : rdkafka: log line"
35
+ expect(log_queue).to have_received(:<<).with([Logger::FATAL, "rdkafka: log line"])
36
36
  end
37
37
 
38
38
  it "should log error messages" do
39
39
  Rdkafka::Bindings::LogCallback.call(nil, 3, nil, "log line")
40
- expect(log.string).to include "ERROR -- : rdkafka: log line"
40
+ expect(log_queue).to have_received(:<<).with([Logger::ERROR, "rdkafka: log line"])
41
41
  end
42
42
 
43
43
  it "should log warning messages" do
44
44
  Rdkafka::Bindings::LogCallback.call(nil, 4, nil, "log line")
45
- expect(log.string).to include "WARN -- : rdkafka: log line"
45
+ expect(log_queue).to have_received(:<<).with([Logger::WARN, "rdkafka: log line"])
46
46
  end
47
47
 
48
48
  it "should log info messages" do
49
49
  Rdkafka::Bindings::LogCallback.call(nil, 5, nil, "log line")
50
- expect(log.string).to include "INFO -- : rdkafka: log line"
50
+ expect(log_queue).to have_received(:<<).with([Logger::INFO, "rdkafka: log line"])
51
51
  end
52
52
 
53
53
  it "should log debug messages" do
54
54
  Rdkafka::Bindings::LogCallback.call(nil, 7, nil, "log line")
55
- expect(log.string).to include "DEBUG -- : rdkafka: log line"
55
+ expect(log_queue).to have_received(:<<).with([Logger::DEBUG, "rdkafka: log line"])
56
56
  end
57
57
 
58
58
  it "should log unknown messages" do
59
59
  Rdkafka::Bindings::LogCallback.call(nil, 100, nil, "log line")
60
- expect(log.string).to include "ANY -- : rdkafka: log line"
60
+ expect(log_queue).to have_received(:<<).with([Logger::UNKNOWN, "rdkafka: log line"])
61
61
  end
62
62
  end
63
63
 
@@ -100,4 +100,28 @@ describe Rdkafka::Bindings do
100
100
  end
101
101
  end
102
102
  end
103
+
104
+ describe "error callback" do
105
+ context "without an error callback" do
106
+ it "should do nothing" do
107
+ expect {
108
+ Rdkafka::Bindings::ErrorCallback.call(nil, 1, "error", nil)
109
+ }.not_to raise_error
110
+ end
111
+ end
112
+
113
+ context "with an error callback" do
114
+ before do
115
+ Rdkafka::Config.error_callback = lambda do |error|
116
+ $received_error = error
117
+ end
118
+ end
119
+
120
+ it "should call the error callback with an Rdkafka::Error" do
121
+ Rdkafka::Bindings::ErrorCallback.call(nil, 8, "Broker not available", nil)
122
+ expect($received_error.code).to eq(:broker_not_available)
123
+ expect($received_error.broker_message).to eq("Broker not available")
124
+ end
125
+ end
126
+ end
103
127
  end
@@ -0,0 +1,20 @@
1
+ require "spec_helper"
2
+
3
+ describe Rdkafka::Callbacks do
4
+
5
+ # The code in the call back functions is 100% covered by other specs. Due to
6
+ # the large number of collaborators, and the fact that FFI does not play
7
+ # nicely with doubles, it was very difficult to construct tests that were
8
+ # not over-mocked.
9
+
10
+ # For debugging purposes, if you suspect that you are running into trouble in
11
+ # one of the callback functions, it may be helpful to surround the inner body
12
+ # of the method with something like:
13
+ #
14
+ # begin
15
+ # <method body>
16
+ # rescue => ex; puts ex.inspect; puts ex.backtrace; end;
17
+ #
18
+ # This will output to STDOUT any exceptions that are being raised in the callback.
19
+
20
+ end
@@ -18,25 +18,83 @@ describe Rdkafka::Config do
18
18
  Rdkafka::Config.logger = nil
19
19
  }.to raise_error(Rdkafka::Config::NoLoggerError)
20
20
  end
21
+
22
+ it "supports logging queue" do
23
+ log = StringIO.new
24
+ Rdkafka::Config.logger = Logger.new(log)
25
+
26
+ Rdkafka::Config.log_queue << [Logger::FATAL, "I love testing"]
27
+ 20.times do
28
+ break if log.string != ""
29
+ sleep 0.05
30
+ end
31
+
32
+ expect(log.string).to include "FATAL -- : I love testing"
33
+ end
21
34
  end
22
35
 
23
36
  context "statistics callback" do
24
- it "should set the callback" do
25
- expect {
26
- Rdkafka::Config.statistics_callback = lambda do |stats|
27
- puts stats
37
+ context "with a proc/lambda" do
38
+ it "should set the callback" do
39
+ expect {
40
+ Rdkafka::Config.statistics_callback = lambda do |stats|
41
+ puts stats
42
+ end
43
+ }.not_to raise_error
44
+ expect(Rdkafka::Config.statistics_callback).to respond_to :call
45
+ end
46
+ end
47
+
48
+ context "with a callable object" do
49
+ it "should set the callback" do
50
+ callback = Class.new do
51
+ def call(stats); end
28
52
  end
29
- }.not_to raise_error
30
- expect(Rdkafka::Config.statistics_callback).to be_a Proc
53
+ expect {
54
+ Rdkafka::Config.statistics_callback = callback.new
55
+ }.not_to raise_error
56
+ expect(Rdkafka::Config.statistics_callback).to respond_to :call
57
+ end
31
58
  end
32
59
 
33
- it "should not accept a callback that's not a proc" do
60
+ it "should not accept a callback that's not callable" do
34
61
  expect {
35
62
  Rdkafka::Config.statistics_callback = 'a string'
36
63
  }.to raise_error(TypeError)
37
64
  end
38
65
  end
39
66
 
67
+ context "error callback" do
68
+ context "with a proc/lambda" do
69
+ it "should set the callback" do
70
+ expect {
71
+ Rdkafka::Config.error_callback = lambda do |error|
72
+ puts error
73
+ end
74
+ }.not_to raise_error
75
+ expect(Rdkafka::Config.error_callback).to respond_to :call
76
+ end
77
+ end
78
+
79
+ context "with a callable object" do
80
+ it "should set the callback" do
81
+ callback = Class.new do
82
+ def call(stats); end
83
+ end
84
+ expect {
85
+ Rdkafka::Config.error_callback = callback.new
86
+ }.not_to raise_error
87
+ expect(Rdkafka::Config.error_callback).to respond_to :call
88
+ end
89
+ end
90
+
91
+ it "should not accept a callback that's not callable" do
92
+ expect {
93
+ Rdkafka::Config.error_callback = 'a string'
94
+ }.to raise_error(TypeError)
95
+ end
96
+ end
97
+
40
98
  context "configuration" do
41
99
  it "should store configuration" do
42
100
  config = Rdkafka::Config.new
@@ -50,7 +108,7 @@ describe Rdkafka::Config do
50
108
  end
51
109
 
52
110
  it "should create a consumer with valid config" do
53
- consumer = rdkafka_config.consumer
111
+ consumer = rdkafka_consumer_config.consumer
54
112
  expect(consumer).to be_a Rdkafka::Consumer
55
113
  consumer.close
56
114
  end
@@ -78,7 +136,7 @@ describe Rdkafka::Config do
78
136
  end
79
137
 
80
138
  it "should create a producer with valid config" do
81
- producer = rdkafka_config.producer
139
+ producer = rdkafka_consumer_config.producer
82
140
  expect(producer).to be_a Rdkafka::Producer
83
141
  producer.close
84
142
  end
@@ -90,6 +148,17 @@ describe Rdkafka::Config do
90
148
  }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"invalid.key\"")
91
149
  end
92
150
 
151
+ it "should allow configuring zstd compression" do
152
+ config = Rdkafka::Config.new('compression.codec' => 'zstd')
153
+ begin
154
+ expect(config.producer).to be_a Rdkafka::Producer
155
+ config.producer.close
156
+ rescue Rdkafka::Config::ConfigError => ex
157
+ pending "Zstd compression not supported on this machine"
158
+ raise ex
159
+ end
160
+ end
161
+
93
162
  it "should raise an error when client creation fails for a consumer" do
94
163
  config = Rdkafka::Config.new(
95
164
  "security.protocol" => "SSL",
@@ -1,10 +1,10 @@
1
1
  require "spec_helper"
2
2
  require "ostruct"
3
+ require 'securerandom'
3
4
 
4
5
  describe Rdkafka::Consumer do
5
- let(:config) { rdkafka_config }
6
- let(:consumer) { config.consumer }
7
- let(:producer) { config.producer }
6
+ let(:consumer) { rdkafka_consumer_config.consumer }
7
+ let(:producer) { rdkafka_producer_config.producer }
8
8
 
9
9
  after { consumer.close }
10
10
  after { producer.close }
@@ -271,8 +271,18 @@ describe Rdkafka::Consumer do
271
271
  describe "#close" do
272
272
  it "should close a consumer" do
273
273
  consumer.subscribe("consume_test_topic")
274
+ 100.times do |i|
275
+ report = producer.produce(
276
+ topic: "consume_test_topic",
277
+ payload: "payload #{i}",
278
+ key: "key #{i}",
279
+ partition: 0
280
+ ).wait
281
+ end
274
282
  consumer.close
275
- expect(consumer.poll(100)).to be_nil
283
+ expect {
284
+ consumer.poll(100)
285
+ }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
276
286
  end
277
287
  end
278
288
 
@@ -317,7 +327,7 @@ describe Rdkafka::Consumer do
317
327
  before :all do
318
328
  # Make sure there are some messages.
319
329
  handles = []
320
- producer = rdkafka_config.producer
330
+ producer = rdkafka_producer_config.producer
321
331
  10.times do
322
332
  (0..2).each do |i|
323
333
  handles << producer.produce(
@@ -393,7 +403,7 @@ describe Rdkafka::Consumer do
393
403
  config = {}
394
404
  config[:'enable.auto.offset.store'] = false
395
405
  config[:'enable.auto.commit'] = false
396
- @new_consumer = rdkafka_config(config).consumer
406
+ @new_consumer = rdkafka_consumer_config(config).consumer
397
407
  @new_consumer.subscribe("consume_test_topic")
398
408
  wait_for_assignment(@new_consumer)
399
409
  end
@@ -448,13 +458,13 @@ describe Rdkafka::Consumer do
448
458
  end
449
459
 
450
460
  describe "#lag" do
451
- let(:config) { rdkafka_config(:"enable.partition.eof" => true) }
461
+ let(:consumer) { rdkafka_consumer_config(:"enable.partition.eof" => true).consumer }
452
462
 
453
463
  it "should calculate the consumer lag" do
454
464
  # Make sure there's a message in every partition and
455
465
  # wait for the message to make sure everything is committed.
456
466
  (0..2).each do |i|
457
- report = producer.produce(
467
+ producer.produce(
458
468
  topic: "consume_test_topic",
459
469
  key: "key lag #{i}",
460
470
  partition: i
@@ -497,7 +507,7 @@ describe Rdkafka::Consumer do
497
507
 
498
508
  # Produce message on every topic again
499
509
  (0..2).each do |i|
500
- report = producer.produce(
510
+ producer.produce(
501
511
  topic: "consume_test_topic",
502
512
  key: "key lag #{i}",
503
513
  partition: i
@@ -662,57 +672,301 @@ describe Rdkafka::Consumer do
662
672
  # should break the each loop.
663
673
  consumer.each_with_index do |message, i|
664
674
  expect(message).to be_a Rdkafka::Consumer::Message
665
- consumer.close if i == 10
675
+ break if i == 10
666
676
  end
677
+ consumer.close
667
678
  end
668
679
  end
669
680
 
670
- describe "a rebalance listener" do
671
- it "should get notifications" do
672
- listener = Struct.new(:queue) do
673
- def on_partitions_assigned(consumer, list)
674
- collect(:assign, list)
675
- end
681
+ describe "#each_batch" do
682
+ let(:message_payload) { 'a' * 10 }
676
683
 
677
- def on_partitions_revoked(consumer, list)
678
- collect(:revoke, list)
679
- end
684
+ before do
685
+ @topic = SecureRandom.base64(10).tr('+=/', '')
686
+ end
680
687
 
681
- def collect(name, list)
682
- partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
683
- queue << ([name] + partitions)
684
- end
685
- end.new([])
688
+ after do
689
+ @topic = nil
690
+ end
686
691
 
687
- notify_listener(listener)
692
+ def topic_name
693
+ @topic
694
+ end
688
695
 
689
- expect(listener.queue).to eq([
690
- [:assign, "consume_test_topic", 0, 1, 2],
691
- [:revoke, "consume_test_topic", 0, 1, 2]
692
- ])
696
+ def produce_n(n)
697
+ handles = []
698
+ n.times do |i|
699
+ handles << producer.produce(
700
+ topic: topic_name,
701
+ payload: Time.new.to_f.to_s,
702
+ key: i.to_s,
703
+ partition: 0
704
+ )
705
+ end
706
+ handles.each(&:wait)
693
707
  end
694
708
 
695
- it 'should handle callback exceptions' do
696
- listener = Struct.new(:queue) do
697
- def on_partitions_assigned(consumer, list)
698
- queue << :assigned
699
- raise 'boom'
700
- end
709
+ def new_message
710
+ instance_double("Rdkafka::Consumer::Message").tap do |message|
711
+ allow(message).to receive(:payload).and_return(message_payload)
712
+ end
713
+ end
701
714
 
702
- def on_partitions_revoked(consumer, list)
703
- queue << :revoked
704
- raise 'boom'
715
+ it "retrieves messages produced into a topic" do
716
+ # This is the only each_batch test that actually produces real messages
717
+ # into a topic in the real kafka of the container.
718
+ #
719
+ # The other tests stub 'poll' which makes them faster and more reliable,
720
+ # but it makes sense to keep a single test with a fully integrated flow.
721
+ # This will help to catch breaking changes in the behavior of 'poll',
722
+ # libdrkafka, or Kafka.
723
+ #
724
+ # This is, in effect, an integration test and the subsequent specs are
725
+ # unit tests.
726
+ create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
727
+ create_topic_handle.wait(max_wait_timeout: 15.0)
728
+ consumer.subscribe(topic_name)
729
+ produce_n 42
730
+ all_yields = []
731
+ consumer.each_batch(max_items: 10) do |batch|
732
+ all_yields << batch
733
+ break if all_yields.flatten.size >= 42
734
+ end
735
+ expect(all_yields.flatten.first).to be_a Rdkafka::Consumer::Message
736
+ expect(all_yields.flatten.size).to eq 42
737
+ expect(all_yields.size).to be > 4
738
+ expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
739
+ end
740
+
741
+ it "should batch poll results and yield arrays of messages" do
742
+ consumer.subscribe(topic_name)
743
+ all_yields = []
744
+ expect(consumer)
745
+ .to receive(:poll)
746
+ .exactly(10).times
747
+ .and_return(new_message)
748
+ consumer.each_batch(max_items: 10) do |batch|
749
+ all_yields << batch
750
+ break if all_yields.flatten.size >= 10
751
+ end
752
+ expect(all_yields.first).to be_instance_of(Array)
753
+ expect(all_yields.flatten.size).to eq 10
754
+ non_empty_yields = all_yields.reject { |batch| batch.empty? }
755
+ expect(non_empty_yields.size).to be < 10
756
+ end
757
+
758
+ it "should yield a partial batch if the timeout is hit with some messages" do
759
+ consumer.subscribe(topic_name)
760
+ poll_count = 0
761
+ expect(consumer)
762
+ .to receive(:poll)
763
+ .at_least(3).times do
764
+ poll_count = poll_count + 1
765
+ if poll_count > 2
766
+ sleep 0.1
767
+ nil
768
+ else
769
+ new_message
705
770
  end
706
- end.new([])
771
+ end
772
+ all_yields = []
773
+ consumer.each_batch(max_items: 10) do |batch|
774
+ all_yields << batch
775
+ break if all_yields.flatten.size >= 2
776
+ end
777
+ expect(all_yields.flatten.size).to eq 2
778
+ end
779
+
780
+ it "should yield [] if nothing is received before the timeout" do
781
+ create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
782
+ create_topic_handle.wait(max_wait_timeout: 15.0)
783
+ consumer.subscribe(topic_name)
784
+ consumer.each_batch do |batch|
785
+ expect(batch).to eq([])
786
+ break
787
+ end
788
+ end
707
789
 
708
- notify_listener(listener)
790
+ it "should yield batchs of max_items in size if messages are already fetched" do
791
+ yielded_batches = []
792
+ expect(consumer)
793
+ .to receive(:poll)
794
+ .with(anything)
795
+ .exactly(20).times
796
+ .and_return(new_message)
709
797
 
710
- expect(listener.queue).to eq([:assigned, :revoked])
798
+ consumer.each_batch(max_items: 10, timeout_ms: 500) do |batch|
799
+ yielded_batches << batch
800
+ break if yielded_batches.flatten.size >= 20
801
+ break if yielded_batches.size >= 20 # so failure doesn't hang
802
+ end
803
+ expect(yielded_batches.size).to eq 2
804
+ expect(yielded_batches.map(&:size)).to eq 2.times.map { 10 }
805
+ end
806
+
807
+ it "should yield batchs as soon as bytes_threshold is hit" do
808
+ yielded_batches = []
809
+ expect(consumer)
810
+ .to receive(:poll)
811
+ .with(anything)
812
+ .exactly(20).times
813
+ .and_return(new_message)
814
+
815
+ consumer.each_batch(bytes_threshold: message_payload.size * 4, timeout_ms: 500) do |batch|
816
+ yielded_batches << batch
817
+ break if yielded_batches.flatten.size >= 20
818
+ break if yielded_batches.size >= 20 # so failure doesn't hang
819
+ end
820
+ expect(yielded_batches.size).to eq 5
821
+ expect(yielded_batches.map(&:size)).to eq 5.times.map { 4 }
822
+ end
823
+
824
+ context "error raised from poll and yield_on_error is true" do
825
+ it "should yield buffered exceptions on rebalance, then break" do
826
+ config = rdkafka_consumer_config(
827
+ {
828
+ :"enable.auto.commit" => false,
829
+ :"enable.auto.offset.store" => false
830
+ }
831
+ )
832
+ consumer = config.consumer
833
+ consumer.subscribe(topic_name)
834
+ loop_count = 0
835
+ batches_yielded = []
836
+ exceptions_yielded = []
837
+ each_batch_iterations = 0
838
+ poll_count = 0
839
+ expect(consumer)
840
+ .to receive(:poll)
841
+ .with(anything)
842
+ .exactly(3).times
843
+ .and_wrap_original do |method, *args|
844
+ poll_count = poll_count + 1
845
+ if poll_count == 3
846
+ raise Rdkafka::RdkafkaError.new(27,
847
+ "partitions ... too ... heavy ... must ... rebalance")
848
+ else
849
+ new_message
850
+ end
851
+ end
852
+ expect {
853
+ consumer.each_batch(max_items: 30, yield_on_error: true) do |batch, pending_error|
854
+ batches_yielded << batch
855
+ exceptions_yielded << pending_error
856
+ each_batch_iterations = each_batch_iterations + 1
857
+ end
858
+ }.to raise_error(Rdkafka::RdkafkaError)
859
+ expect(poll_count).to eq 3
860
+ expect(each_batch_iterations).to eq 1
861
+ expect(batches_yielded.size).to eq 1
862
+ expect(batches_yielded.first.size).to eq 2
863
+ expect(exceptions_yielded.flatten.size).to eq 1
864
+ expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
865
+ end
866
+ end
867
+
868
+ context "error raised from poll and yield_on_error is false" do
869
+ it "should yield buffered exceptions on rebalance, then break" do
870
+ config = rdkafka_consumer_config(
871
+ {
872
+ :"enable.auto.commit" => false,
873
+ :"enable.auto.offset.store" => false
874
+ }
875
+ )
876
+ consumer = config.consumer
877
+ consumer.subscribe(topic_name)
878
+ loop_count = 0
879
+ batches_yielded = []
880
+ exceptions_yielded = []
881
+ each_batch_iterations = 0
882
+ poll_count = 0
883
+ expect(consumer)
884
+ .to receive(:poll)
885
+ .with(anything)
886
+ .exactly(3).times
887
+ .and_wrap_original do |method, *args|
888
+ poll_count = poll_count + 1
889
+ if poll_count == 3
890
+ raise Rdkafka::RdkafkaError.new(27,
891
+ "partitions ... too ... heavy ... must ... rebalance")
892
+ else
893
+ new_message
894
+ end
895
+ end
896
+ expect {
897
+ consumer.each_batch(max_items: 30, yield_on_error: false) do |batch, pending_error|
898
+ batches_yielded << batch
899
+ exceptions_yielded << pending_error
900
+ each_batch_iterations = each_batch_iterations + 1
901
+ end
902
+ }.to raise_error(Rdkafka::RdkafkaError)
903
+ expect(poll_count).to eq 3
904
+ expect(each_batch_iterations).to eq 0
905
+ expect(batches_yielded.size).to eq 0
906
+ expect(exceptions_yielded.size).to eq 0
907
+ end
908
+ end
909
+ end
910
+
911
+ describe "a rebalance listener" do
912
+ let(:consumer) do
913
+ config = rdkafka_consumer_config
914
+ config.consumer_rebalance_listener = listener
915
+ config.consumer
916
+ end
917
+
918
+ context "with a working listener" do
919
+ let(:listener) do
920
+ Struct.new(:queue) do
921
+ def on_partitions_assigned(consumer, list)
922
+ collect(:assign, list)
923
+ end
924
+
925
+ def on_partitions_revoked(consumer, list)
926
+ collect(:revoke, list)
927
+ end
928
+
929
+ def collect(name, list)
930
+ partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
931
+ queue << ([name] + partitions)
932
+ end
933
+ end.new([])
934
+ end
935
+
936
+ it "should get notifications" do
937
+ notify_listener(listener)
938
+
939
+ expect(listener.queue).to eq([
940
+ [:assign, "consume_test_topic", 0, 1, 2],
941
+ [:revoke, "consume_test_topic", 0, 1, 2]
942
+ ])
943
+ end
944
+ end
945
+
946
+ context "with a broken listener" do
947
+ let(:listener) do
948
+ Struct.new(:queue) do
949
+ def on_partitions_assigned(consumer, list)
950
+ queue << :assigned
951
+ raise 'boom'
952
+ end
953
+
954
+ def on_partitions_revoked(consumer, list)
955
+ queue << :revoked
956
+ raise 'boom'
957
+ end
958
+ end.new([])
959
+ end
960
+
961
+ it 'should handle callback exceptions' do
962
+ notify_listener(listener)
963
+
964
+ expect(listener.queue).to eq([:assigned, :revoked])
965
+ end
711
966
  end
712
967
 
713
968
  def notify_listener(listener)
714
969
  # 1. subscribe and poll
715
- config.consumer_rebalance_listener = listener
716
970
  consumer.subscribe("consume_test_topic")
717
971
  wait_for_assignment(consumer)
718
972
  consumer.poll(100)
@@ -723,4 +977,34 @@ describe Rdkafka::Consumer do
723
977
  consumer.close
724
978
  end
725
979
  end
980
+
981
+ context "methods that should not be called after a consumer has been closed" do
982
+ before do
983
+ consumer.close
984
+ end
985
+
986
+ # Affected methods and a non-invalid set of parameters for the method
987
+ {
988
+ :subscribe => [ nil ],
989
+ :unsubscribe => nil,
990
+ :each_batch => nil,
991
+ :pause => [ nil ],
992
+ :resume => [ nil ],
993
+ :subscription => nil,
994
+ :assign => [ nil ],
995
+ :assignment => nil,
996
+ :committed => [],
997
+ :query_watermark_offsets => [ nil, nil ],
998
+ }.each do |method, args|
999
+ it "raises an exception if #{method} is called" do
1000
+ expect {
1001
+ if args.nil?
1002
+ consumer.public_send(method)
1003
+ else
1004
+ consumer.public_send(method, *args)
1005
+ end
1006
+ }.to raise_exception(Rdkafka::ClosedConsumerError, /#{method.to_s}/)
1007
+ end
1008
+ end
1009
+ end
726
1010
  end
@@ -11,6 +11,10 @@ describe Rdkafka::RdkafkaError do
11
11
  expect(Rdkafka::RdkafkaError.new(10, "message prefix").message_prefix).to eq "message prefix"
12
12
  end
13
13
 
14
+ it "should create an error with a broker message" do
15
+ expect(Rdkafka::RdkafkaError.new(10, broker_message: "broker message").broker_message).to eq "broker message"
16
+ end
17
+
14
18
  describe "#code" do
15
19
  it "should handle an invalid response" do
16
20
  expect(Rdkafka::RdkafkaError.new(933975).code).to eq :err_933975?