rdkafka 0.8.0.beta.1 → 0.10.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +23 -0
  3. data/CHANGELOG.md +18 -0
  4. data/README.md +5 -2
  5. data/docker-compose.yml +2 -0
  6. data/ext/README.md +7 -0
  7. data/ext/Rakefile +2 -1
  8. data/lib/rdkafka/abstract_handle.rb +82 -0
  9. data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
  10. data/lib/rdkafka/admin/create_topic_report.rb +22 -0
  11. data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
  12. data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
  13. data/lib/rdkafka/admin.rb +155 -0
  14. data/lib/rdkafka/bindings.rb +57 -18
  15. data/lib/rdkafka/callbacks.rb +106 -0
  16. data/lib/rdkafka/config.rb +59 -3
  17. data/lib/rdkafka/consumer.rb +125 -5
  18. data/lib/rdkafka/error.rb +29 -3
  19. data/lib/rdkafka/metadata.rb +6 -5
  20. data/lib/rdkafka/producer/delivery_handle.rb +7 -53
  21. data/lib/rdkafka/producer.rb +25 -11
  22. data/lib/rdkafka/version.rb +3 -3
  23. data/lib/rdkafka.rb +7 -0
  24. data/spec/rdkafka/abstract_handle_spec.rb +114 -0
  25. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  26. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  27. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  28. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  29. data/spec/rdkafka/admin_spec.rb +203 -0
  30. data/spec/rdkafka/bindings_spec.rb +32 -8
  31. data/spec/rdkafka/callbacks_spec.rb +20 -0
  32. data/spec/rdkafka/config_spec.rb +76 -7
  33. data/spec/rdkafka/consumer_spec.rb +266 -2
  34. data/spec/rdkafka/error_spec.rb +4 -0
  35. data/spec/rdkafka/metadata_spec.rb +78 -0
  36. data/spec/rdkafka/producer/delivery_handle_spec.rb +1 -41
  37. data/spec/rdkafka/producer_spec.rb +98 -31
  38. data/spec/spec_helper.rb +28 -11
  39. metadata +32 -9
  40. data/.travis.yml +0 -45
@@ -1,5 +1,6 @@
1
1
  require "spec_helper"
2
2
  require "ostruct"
3
+ require 'securerandom'
3
4
 
4
5
  describe Rdkafka::Consumer do
5
6
  let(:config) { rdkafka_config }
@@ -271,8 +272,18 @@ describe Rdkafka::Consumer do
271
272
  describe "#close" do
272
273
  it "should close a consumer" do
273
274
  consumer.subscribe("consume_test_topic")
275
+ 100.times do |i|
276
+ report = producer.produce(
277
+ topic: "consume_test_topic",
278
+ payload: "payload #{i}",
279
+ key: "key #{i}",
280
+ partition: 0
281
+ ).wait
282
+ end
274
283
  consumer.close
275
- expect(consumer.poll(100)).to be_nil
284
+ expect {
285
+ consumer.poll(100)
286
+ }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
276
287
  end
277
288
  end
278
289
 
@@ -662,7 +673,230 @@ describe Rdkafka::Consumer do
662
673
  # should break the each loop.
663
674
  consumer.each_with_index do |message, i|
664
675
  expect(message).to be_a Rdkafka::Consumer::Message
665
- consumer.close if i == 10
676
+ break if i == 10
677
+ end
678
+ consumer.close
679
+ end
680
+ end
681
+
682
+ describe "#each_batch" do
683
+ let(:message_payload) { 'a' * 10 }
684
+
685
+ before do
686
+ @topic = SecureRandom.base64(10).tr('+=/', '')
687
+ end
688
+
689
+ after do
690
+ @topic = nil
691
+ end
692
+
693
+ def topic_name
694
+ @topic
695
+ end
696
+
697
+ def produce_n(n)
698
+ handles = []
699
+ n.times do |i|
700
+ handles << producer.produce(
701
+ topic: topic_name,
702
+ payload: Time.new.to_f.to_s,
703
+ key: i.to_s,
704
+ partition: 0
705
+ )
706
+ end
707
+ handles.each(&:wait)
708
+ end
709
+
710
+ def new_message
711
+ instance_double("Rdkafka::Consumer::Message").tap do |message|
712
+ allow(message).to receive(:payload).and_return(message_payload)
713
+ end
714
+ end
715
+
716
+ it "retrieves messages produced into a topic" do
717
+ # This is the only each_batch test that actually produces real messages
718
+ # into a topic in the real kafka of the container.
719
+ #
720
+ # The other tests stub 'poll' which makes them faster and more reliable,
721
+ # but it makes sense to keep a single test with a fully integrated flow.
722
+ # This will help to catch breaking changes in the behavior of 'poll',
723
+ # libdrkafka, or Kafka.
724
+ #
725
+ # This is, in effect, an integration test and the subsequent specs are
726
+ # unit tests.
727
+ create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
728
+ create_topic_handle.wait(max_wait_timeout: 15.0)
729
+ consumer.subscribe(topic_name)
730
+ produce_n 42
731
+ all_yields = []
732
+ consumer.each_batch(max_items: 10) do |batch|
733
+ all_yields << batch
734
+ break if all_yields.flatten.size >= 42
735
+ end
736
+ expect(all_yields.flatten.first).to be_a Rdkafka::Consumer::Message
737
+ expect(all_yields.flatten.size).to eq 42
738
+ expect(all_yields.size).to be > 4
739
+ expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
740
+ end
741
+
742
+ it "should batch poll results and yield arrays of messages" do
743
+ consumer.subscribe(topic_name)
744
+ all_yields = []
745
+ expect(consumer)
746
+ .to receive(:poll)
747
+ .exactly(10).times
748
+ .and_return(new_message)
749
+ consumer.each_batch(max_items: 10) do |batch|
750
+ all_yields << batch
751
+ break if all_yields.flatten.size >= 10
752
+ end
753
+ expect(all_yields.first).to be_instance_of(Array)
754
+ expect(all_yields.flatten.size).to eq 10
755
+ non_empty_yields = all_yields.reject { |batch| batch.empty? }
756
+ expect(non_empty_yields.size).to be < 10
757
+ end
758
+
759
+ it "should yield a partial batch if the timeout is hit with some messages" do
760
+ consumer.subscribe(topic_name)
761
+ poll_count = 0
762
+ expect(consumer)
763
+ .to receive(:poll)
764
+ .at_least(3).times do
765
+ poll_count = poll_count + 1
766
+ if poll_count > 2
767
+ sleep 0.1
768
+ nil
769
+ else
770
+ new_message
771
+ end
772
+ end
773
+ all_yields = []
774
+ consumer.each_batch(max_items: 10) do |batch|
775
+ all_yields << batch
776
+ break if all_yields.flatten.size >= 2
777
+ end
778
+ expect(all_yields.flatten.size).to eq 2
779
+ end
780
+
781
+ it "should yield [] if nothing is received before the timeout" do
782
+ create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
783
+ create_topic_handle.wait(max_wait_timeout: 15.0)
784
+ consumer.subscribe(topic_name)
785
+ consumer.each_batch do |batch|
786
+ expect(batch).to eq([])
787
+ break
788
+ end
789
+ end
790
+
791
+ it "should yield batchs of max_items in size if messages are already fetched" do
792
+ yielded_batches = []
793
+ expect(consumer)
794
+ .to receive(:poll)
795
+ .with(anything)
796
+ .exactly(20).times
797
+ .and_return(new_message)
798
+
799
+ consumer.each_batch(max_items: 10, timeout_ms: 500) do |batch|
800
+ yielded_batches << batch
801
+ break if yielded_batches.flatten.size >= 20
802
+ break if yielded_batches.size >= 20 # so failure doesn't hang
803
+ end
804
+ expect(yielded_batches.size).to eq 2
805
+ expect(yielded_batches.map(&:size)).to eq 2.times.map { 10 }
806
+ end
807
+
808
+ it "should yield batchs as soon as bytes_threshold is hit" do
809
+ yielded_batches = []
810
+ expect(consumer)
811
+ .to receive(:poll)
812
+ .with(anything)
813
+ .exactly(20).times
814
+ .and_return(new_message)
815
+
816
+ consumer.each_batch(bytes_threshold: message_payload.size * 4, timeout_ms: 500) do |batch|
817
+ yielded_batches << batch
818
+ break if yielded_batches.flatten.size >= 20
819
+ break if yielded_batches.size >= 20 # so failure doesn't hang
820
+ end
821
+ expect(yielded_batches.size).to eq 5
822
+ expect(yielded_batches.map(&:size)).to eq 5.times.map { 4 }
823
+ end
824
+
825
+ context "error raised from poll and yield_on_error is true" do
826
+ it "should yield buffered exceptions on rebalance, then break" do
827
+ config = rdkafka_config({:"enable.auto.commit" => false,
828
+ :"enable.auto.offset.store" => false })
829
+ consumer = config.consumer
830
+ consumer.subscribe(topic_name)
831
+ loop_count = 0
832
+ batches_yielded = []
833
+ exceptions_yielded = []
834
+ each_batch_iterations = 0
835
+ poll_count = 0
836
+ expect(consumer)
837
+ .to receive(:poll)
838
+ .with(anything)
839
+ .exactly(3).times
840
+ .and_wrap_original do |method, *args|
841
+ poll_count = poll_count + 1
842
+ if poll_count == 3
843
+ raise Rdkafka::RdkafkaError.new(27,
844
+ "partitions ... too ... heavy ... must ... rebalance")
845
+ else
846
+ new_message
847
+ end
848
+ end
849
+ expect {
850
+ consumer.each_batch(max_items: 30, yield_on_error: true) do |batch, pending_error|
851
+ batches_yielded << batch
852
+ exceptions_yielded << pending_error
853
+ each_batch_iterations = each_batch_iterations + 1
854
+ end
855
+ }.to raise_error(Rdkafka::RdkafkaError)
856
+ expect(poll_count).to eq 3
857
+ expect(each_batch_iterations).to eq 1
858
+ expect(batches_yielded.size).to eq 1
859
+ expect(batches_yielded.first.size).to eq 2
860
+ expect(exceptions_yielded.flatten.size).to eq 1
861
+ expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
862
+ end
863
+ end
864
+
865
+ context "error raised from poll and yield_on_error is false" do
866
+ it "should yield buffered exceptions on rebalance, then break" do
867
+ config = rdkafka_config({:"enable.auto.commit" => false,
868
+ :"enable.auto.offset.store" => false })
869
+ consumer = config.consumer
870
+ consumer.subscribe(topic_name)
871
+ loop_count = 0
872
+ batches_yielded = []
873
+ exceptions_yielded = []
874
+ each_batch_iterations = 0
875
+ poll_count = 0
876
+ expect(consumer)
877
+ .to receive(:poll)
878
+ .with(anything)
879
+ .exactly(3).times
880
+ .and_wrap_original do |method, *args|
881
+ poll_count = poll_count + 1
882
+ if poll_count == 3
883
+ raise Rdkafka::RdkafkaError.new(27,
884
+ "partitions ... too ... heavy ... must ... rebalance")
885
+ else
886
+ new_message
887
+ end
888
+ end
889
+ expect {
890
+ consumer.each_batch(max_items: 30, yield_on_error: false) do |batch, pending_error|
891
+ batches_yielded << batch
892
+ exceptions_yielded << pending_error
893
+ each_batch_iterations = each_batch_iterations + 1
894
+ end
895
+ }.to raise_error(Rdkafka::RdkafkaError)
896
+ expect(poll_count).to eq 3
897
+ expect(each_batch_iterations).to eq 0
898
+ expect(batches_yielded.size).to eq 0
899
+ expect(exceptions_yielded.size).to eq 0
666
900
  end
667
901
  end
668
902
  end
@@ -723,4 +957,34 @@ describe Rdkafka::Consumer do
723
957
  consumer.close
724
958
  end
725
959
  end
960
+
961
+ context "methods that should not be called after a consumer has been closed" do
962
+ before do
963
+ consumer.close
964
+ end
965
+
966
+ # Affected methods and a non-invalid set of parameters for the method
967
+ {
968
+ :subscribe => [ nil ],
969
+ :unsubscribe => nil,
970
+ :each_batch => nil,
971
+ :pause => [ nil ],
972
+ :resume => [ nil ],
973
+ :subscription => nil,
974
+ :assign => [ nil ],
975
+ :assignment => nil,
976
+ :committed => [],
977
+ :query_watermark_offsets => [ nil, nil ],
978
+ }.each do |method, args|
979
+ it "raises an exception if #{method} is called" do
980
+ expect {
981
+ if args.nil?
982
+ consumer.public_send(method)
983
+ else
984
+ consumer.public_send(method, *args)
985
+ end
986
+ }.to raise_exception(Rdkafka::ClosedConsumerError, /#{method.to_s}/)
987
+ end
988
+ end
989
+ end
726
990
  end
@@ -11,6 +11,10 @@ describe Rdkafka::RdkafkaError do
11
11
  expect(Rdkafka::RdkafkaError.new(10, "message prefix").message_prefix).to eq "message prefix"
12
12
  end
13
13
 
14
+ it "should create an error with a broker message" do
15
+ expect(Rdkafka::RdkafkaError.new(10, broker_message: "broker message").broker_message).to eq "broker message"
16
+ end
17
+
14
18
  describe "#code" do
15
19
  it "should handle an invalid response" do
16
20
  expect(Rdkafka::RdkafkaError.new(933975).code).to eq :err_933975?
@@ -0,0 +1,78 @@
1
+ require "spec_helper"
2
+ require "securerandom"
3
+
4
+ describe Rdkafka::Metadata do
5
+ let(:config) { rdkafka_config }
6
+ let(:native_config) { config.send(:native_config) }
7
+ let(:native_kafka) { config.send(:native_kafka, native_config, :rd_kafka_consumer) }
8
+
9
+ after do
10
+ Rdkafka::Bindings.rd_kafka_consumer_close(native_kafka)
11
+ Rdkafka::Bindings.rd_kafka_destroy(native_kafka)
12
+ end
13
+
14
+ context "passing in a topic name" do
15
+ context "that is non-existent topic" do
16
+ let(:topic_name) { SecureRandom.uuid.to_s }
17
+
18
+ it "raises an appropriate exception" do
19
+ expect {
20
+ described_class.new(native_kafka, topic_name)
21
+ }.to raise_exception(Rdkafka::RdkafkaError, "Broker: Unknown topic or partition (unknown_topic_or_part)")
22
+ end
23
+ end
24
+
25
+ context "that is one of our test topics" do
26
+ subject { described_class.new(native_kafka, topic_name) }
27
+ let(:topic_name) { "partitioner_test_topic" }
28
+
29
+ it "#brokers returns our single broker" do
30
+ expect(subject.brokers.length).to eq(1)
31
+ expect(subject.brokers[0][:broker_id]).to eq(1)
32
+ expect(subject.brokers[0][:broker_name]).to eq("localhost")
33
+ expect(subject.brokers[0][:broker_port]).to eq(9092)
34
+ end
35
+
36
+ it "#topics returns data on our test topic" do
37
+ expect(subject.topics.length).to eq(1)
38
+ expect(subject.topics[0][:partition_count]).to eq(25)
39
+ expect(subject.topics[0][:partitions].length).to eq(25)
40
+ expect(subject.topics[0][:topic_name]).to eq(topic_name)
41
+ end
42
+ end
43
+ end
44
+
45
+ context "not passing in a topic name" do
46
+ subject { described_class.new(native_kafka, topic_name) }
47
+ let(:topic_name) { nil }
48
+ let(:test_topics) {
49
+ %w(consume_test_topic empty_test_topic load_test_topic produce_test_topic rake_test_topic watermarks_test_topic partitioner_test_topic)
50
+ } # Test topics crated in spec_helper.rb
51
+
52
+ it "#brokers returns our single broker" do
53
+ expect(subject.brokers.length).to eq(1)
54
+ expect(subject.brokers[0][:broker_id]).to eq(1)
55
+ expect(subject.brokers[0][:broker_name]).to eq("localhost")
56
+ expect(subject.brokers[0][:broker_port]).to eq(9092)
57
+ end
58
+
59
+ it "#topics returns data about all of our test topics" do
60
+ result = subject.topics.map { |topic| topic[:topic_name] }
61
+ expect(result).to include(*test_topics)
62
+ end
63
+ end
64
+
65
+ context "when a non-zero error code is returned" do
66
+ let(:topic_name) { SecureRandom.uuid.to_s }
67
+
68
+ before do
69
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_metadata).and_return(-165)
70
+ end
71
+
72
+ it "creating the instance raises an exception" do
73
+ expect {
74
+ described_class.new(native_kafka, topic_name)
75
+ }.to raise_error(Rdkafka::RdkafkaError, /Local: Required feature not supported by broker \(unsupported_feature\)/)
76
+ end
77
+ end
78
+ end
@@ -12,42 +12,13 @@ describe Rdkafka::Producer::DeliveryHandle do
12
12
  end
13
13
  end
14
14
 
15
- describe ".register and .remove" do
16
- let(:pending_handle) { true }
17
-
18
- it "should register and remove a delivery handle" do
19
- Rdkafka::Producer::DeliveryHandle.register(subject.to_ptr.address, subject)
20
- removed = Rdkafka::Producer::DeliveryHandle.remove(subject.to_ptr.address)
21
- expect(removed).to eq subject
22
- expect(Rdkafka::Producer::DeliveryHandle::REGISTRY).to be_empty
23
- end
24
- end
25
-
26
- describe "#pending?" do
27
- context "when true" do
28
- let(:pending_handle) { true }
29
-
30
- it "should be true" do
31
- expect(subject.pending?).to be true
32
- end
33
- end
34
-
35
- context "when not true" do
36
- let(:pending_handle) { false }
37
-
38
- it "should be false" do
39
- expect(subject.pending?).to be false
40
- end
41
- end
42
- end
43
-
44
15
  describe "#wait" do
45
16
  let(:pending_handle) { true }
46
17
 
47
18
  it "should wait until the timeout and then raise an error" do
48
19
  expect {
49
20
  subject.wait(max_wait_timeout: 0.1)
50
- }.to raise_error Rdkafka::Producer::DeliveryHandle::WaitTimeoutError
21
+ }.to raise_error Rdkafka::Producer::DeliveryHandle::WaitTimeoutError, /delivery/
51
22
  end
52
23
 
53
24
  context "when not pending anymore and no error" do
@@ -67,16 +38,5 @@ describe Rdkafka::Producer::DeliveryHandle do
67
38
  expect(report.offset).to eq(100)
68
39
  end
69
40
  end
70
-
71
- context "when not pending anymore and there was an error" do
72
- let(:pending_handle) { false }
73
- let(:response) { 20 }
74
-
75
- it "should raise an rdkafka error" do
76
- expect {
77
- subject.wait
78
- }.to raise_error Rdkafka::RdkafkaError
79
- end
80
- end
81
41
  end
82
42
  end
@@ -12,47 +12,92 @@ describe Rdkafka::Producer do
12
12
  end
13
13
 
14
14
  context "delivery callback" do
15
- it "should set the callback" do
16
- expect {
17
- producer.delivery_callback = lambda do |delivery_handle|
18
- puts stats
15
+ context "with a proc/lambda" do
16
+ it "should set the callback" do
17
+ expect {
18
+ producer.delivery_callback = lambda do |delivery_handle|
19
+ puts delivery_handle
20
+ end
21
+ }.not_to raise_error
22
+ expect(producer.delivery_callback).to respond_to :call
23
+ end
24
+
25
+ it "should call the callback when a message is delivered" do
26
+ @callback_called = false
27
+
28
+ producer.delivery_callback = lambda do |report|
29
+ expect(report).not_to be_nil
30
+ expect(report.partition).to eq 1
31
+ expect(report.offset).to be >= 0
32
+ @callback_called = true
19
33
  end
20
- }.not_to raise_error
21
- expect(producer.delivery_callback).to be_a Proc
22
- end
23
34
 
24
- it "should not accept a callback that's not a proc" do
25
- expect {
26
- producer.delivery_callback = 'a string'
27
- }.to raise_error(TypeError)
28
- end
35
+ # Produce a message
36
+ handle = producer.produce(
37
+ topic: "produce_test_topic",
38
+ payload: "payload",
39
+ key: "key"
40
+ )
29
41
 
30
- it "should call the callback when a message is delivered" do
31
- @callback_called = false
42
+ # Wait for it to be delivered
43
+ handle.wait(max_wait_timeout: 15)
32
44
 
45
+ # Join the producer thread.
46
+ producer.close
33
47
 
34
- producer.delivery_callback = lambda do |report|
35
- expect(report).not_to be_nil
36
- expect(report.partition).to eq 1
37
- expect(report.offset).to be >= 0
38
- @callback_called = true
48
+ # Callback should have been called
49
+ expect(@callback_called).to be true
39
50
  end
51
+ end
40
52
 
41
- # Produce a message
42
- handle = producer.produce(
43
- topic: "produce_test_topic",
44
- payload: "payload",
45
- key: "key"
46
- )
53
+ context "with a callable object" do
54
+ it "should set the callback" do
55
+ callback = Class.new do
56
+ def call(stats); end
57
+ end
58
+ expect {
59
+ producer.delivery_callback = callback.new
60
+ }.not_to raise_error
61
+ expect(producer.delivery_callback).to respond_to :call
62
+ end
47
63
 
48
- # Wait for it to be delivered
49
- handle.wait(max_wait_timeout: 15)
64
+ it "should call the callback when a message is delivered" do
65
+ called_report = []
66
+ callback = Class.new do
67
+ def initialize(called_report)
68
+ @called_report = called_report
69
+ end
50
70
 
51
- # Join the producer thread.
52
- producer.close
71
+ def call(report)
72
+ @called_report << report
73
+ end
74
+ end
75
+ producer.delivery_callback = callback.new(called_report)
76
+
77
+ # Produce a message
78
+ handle = producer.produce(
79
+ topic: "produce_test_topic",
80
+ payload: "payload",
81
+ key: "key"
82
+ )
83
+
84
+ # Wait for it to be delivered
85
+ handle.wait(max_wait_timeout: 15)
53
86
 
54
- # Callback should have been called
55
- expect(@callback_called).to be true
87
+ # Join the producer thread.
88
+ producer.close
89
+
90
+ # Callback should have been called
91
+ expect(called_report.first).not_to be_nil
92
+ expect(called_report.first.partition).to eq 1
93
+ expect(called_report.first.offset).to be >= 0
94
+ end
95
+ end
96
+
97
+ it "should not accept a callback that's not callable" do
98
+ expect {
99
+ producer.delivery_callback = 'a string'
100
+ }.to raise_error(TypeError)
56
101
  end
57
102
  end
58
103
 
@@ -407,4 +452,26 @@ describe Rdkafka::Producer do
407
452
  # Waiting a second time should work
408
453
  handle.wait(max_wait_timeout: 5)
409
454
  end
455
+
456
+ context "methods that should not be called after a producer has been closed" do
457
+ before do
458
+ producer.close
459
+ end
460
+
461
+ # Affected methods and a non-invalid set of parameters for the method
462
+ {
463
+ :produce => { topic: nil },
464
+ :partition_count => nil,
465
+ }.each do |method, args|
466
+ it "raises an exception if #{method} is called" do
467
+ expect {
468
+ if args.is_a?(Hash)
469
+ producer.public_send(method, **args)
470
+ else
471
+ producer.public_send(method, args)
472
+ end
473
+ }.to raise_exception(Rdkafka::ClosedProducerError, /#{method.to_s}/)
474
+ end
475
+ end
476
+ end
410
477
  end