rdkafka 0.10.0 → 0.12.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -3,9 +3,8 @@ require "ostruct"
3
3
  require 'securerandom'
4
4
 
5
5
  describe Rdkafka::Consumer do
6
- let(:config) { rdkafka_config }
7
- let(:consumer) { config.consumer }
8
- let(:producer) { config.producer }
6
+ let(:consumer) { rdkafka_consumer_config.consumer }
7
+ let(:producer) { rdkafka_producer_config.producer }
9
8
 
10
9
  after { consumer.close }
11
10
  after { producer.close }
@@ -242,7 +241,7 @@ describe Rdkafka::Consumer do
242
241
 
243
242
  it "should return the assignment when subscribed" do
244
243
  # Make sure there's a message
245
- report = producer.produce(
244
+ producer.produce(
246
245
  topic: "consume_test_topic",
247
246
  payload: "payload 1",
248
247
  key: "key 1",
@@ -273,7 +272,7 @@ describe Rdkafka::Consumer do
273
272
  it "should close a consumer" do
274
273
  consumer.subscribe("consume_test_topic")
275
274
  100.times do |i|
276
- report = producer.produce(
275
+ producer.produce(
277
276
  topic: "consume_test_topic",
278
277
  payload: "payload #{i}",
279
278
  key: "key #{i}",
@@ -290,7 +289,7 @@ describe Rdkafka::Consumer do
290
289
  describe "#commit, #committed and #store_offset" do
291
290
  # Make sure there's a stored offset
292
291
  let!(:report) do
293
- report = producer.produce(
292
+ producer.produce(
294
293
  topic: "consume_test_topic",
295
294
  payload: "payload 1",
296
295
  key: "key 1",
@@ -328,7 +327,7 @@ describe Rdkafka::Consumer do
328
327
  before :all do
329
328
  # Make sure there are some messages.
330
329
  handles = []
331
- producer = rdkafka_config.producer
330
+ producer = rdkafka_producer_config.producer
332
331
  10.times do
333
332
  (0..2).each do |i|
334
333
  handles << producer.produce(
@@ -404,7 +403,7 @@ describe Rdkafka::Consumer do
404
403
  config = {}
405
404
  config[:'enable.auto.offset.store'] = false
406
405
  config[:'enable.auto.commit'] = false
407
- @new_consumer = rdkafka_config(config).consumer
406
+ @new_consumer = rdkafka_consumer_config(config).consumer
408
407
  @new_consumer.subscribe("consume_test_topic")
409
408
  wait_for_assignment(@new_consumer)
410
409
  end
@@ -459,13 +458,13 @@ describe Rdkafka::Consumer do
459
458
  end
460
459
 
461
460
  describe "#lag" do
462
- let(:config) { rdkafka_config(:"enable.partition.eof" => true) }
461
+ let(:consumer) { rdkafka_consumer_config(:"enable.partition.eof" => true).consumer }
463
462
 
464
463
  it "should calculate the consumer lag" do
465
464
  # Make sure there's a message in every partition and
466
465
  # wait for the message to make sure everything is committed.
467
466
  (0..2).each do |i|
468
- report = producer.produce(
467
+ producer.produce(
469
468
  topic: "consume_test_topic",
470
469
  key: "key lag #{i}",
471
470
  partition: i
@@ -508,7 +507,7 @@ describe Rdkafka::Consumer do
508
507
 
509
508
  # Produce message on every topic again
510
509
  (0..2).each do |i|
511
- report = producer.produce(
510
+ producer.produce(
512
511
  topic: "consume_test_topic",
513
512
  key: "key lag #{i}",
514
513
  partition: i
@@ -824,11 +823,14 @@ describe Rdkafka::Consumer do
824
823
 
825
824
  context "error raised from poll and yield_on_error is true" do
826
825
  it "should yield buffered exceptions on rebalance, then break" do
827
- config = rdkafka_config({:"enable.auto.commit" => false,
828
- :"enable.auto.offset.store" => false })
826
+ config = rdkafka_consumer_config(
827
+ {
828
+ :"enable.auto.commit" => false,
829
+ :"enable.auto.offset.store" => false
830
+ }
831
+ )
829
832
  consumer = config.consumer
830
833
  consumer.subscribe(topic_name)
831
- loop_count = 0
832
834
  batches_yielded = []
833
835
  exceptions_yielded = []
834
836
  each_batch_iterations = 0
@@ -864,11 +866,14 @@ describe Rdkafka::Consumer do
864
866
 
865
867
  context "error raised from poll and yield_on_error is false" do
866
868
  it "should yield buffered exceptions on rebalance, then break" do
867
- config = rdkafka_config({:"enable.auto.commit" => false,
868
- :"enable.auto.offset.store" => false })
869
+ config = rdkafka_consumer_config(
870
+ {
871
+ :"enable.auto.commit" => false,
872
+ :"enable.auto.offset.store" => false
873
+ }
874
+ )
869
875
  consumer = config.consumer
870
876
  consumer.subscribe(topic_name)
871
- loop_count = 0
872
877
  batches_yielded = []
873
878
  exceptions_yielded = []
874
879
  each_batch_iterations = 0
@@ -902,51 +907,64 @@ describe Rdkafka::Consumer do
902
907
  end
903
908
 
904
909
  describe "a rebalance listener" do
905
- it "should get notifications" do
906
- listener = Struct.new(:queue) do
907
- def on_partitions_assigned(consumer, list)
908
- collect(:assign, list)
909
- end
910
+ let(:consumer) do
911
+ config = rdkafka_consumer_config
912
+ config.consumer_rebalance_listener = listener
913
+ config.consumer
914
+ end
910
915
 
911
- def on_partitions_revoked(consumer, list)
912
- collect(:revoke, list)
913
- end
916
+ context "with a working listener" do
917
+ let(:listener) do
918
+ Struct.new(:queue) do
919
+ def on_partitions_assigned(consumer, list)
920
+ collect(:assign, list)
921
+ end
914
922
 
915
- def collect(name, list)
916
- partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
917
- queue << ([name] + partitions)
918
- end
919
- end.new([])
923
+ def on_partitions_revoked(consumer, list)
924
+ collect(:revoke, list)
925
+ end
920
926
 
921
- notify_listener(listener)
927
+ def collect(name, list)
928
+ partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
929
+ queue << ([name] + partitions)
930
+ end
931
+ end.new([])
932
+ end
922
933
 
923
- expect(listener.queue).to eq([
924
- [:assign, "consume_test_topic", 0, 1, 2],
925
- [:revoke, "consume_test_topic", 0, 1, 2]
926
- ])
934
+ it "should get notifications" do
935
+ notify_listener(listener)
936
+
937
+ expect(listener.queue).to eq([
938
+ [:assign, "consume_test_topic", 0, 1, 2],
939
+ [:revoke, "consume_test_topic", 0, 1, 2]
940
+ ])
941
+ end
927
942
  end
928
943
 
929
- it 'should handle callback exceptions' do
930
- listener = Struct.new(:queue) do
931
- def on_partitions_assigned(consumer, list)
932
- queue << :assigned
933
- raise 'boom'
934
- end
944
+ context "with a broken listener" do
945
+ let(:listener) do
946
+ Struct.new(:queue) do
947
+ def on_partitions_assigned(consumer, list)
948
+ queue << :assigned
949
+ raise 'boom'
950
+ end
935
951
 
936
- def on_partitions_revoked(consumer, list)
937
- queue << :revoked
938
- raise 'boom'
939
- end
940
- end.new([])
952
+ def on_partitions_revoked(consumer, list)
953
+ queue << :revoked
954
+ raise 'boom'
955
+ end
956
+ end.new([])
957
+ end
941
958
 
942
- notify_listener(listener)
959
+ it 'should handle callback exceptions' do
960
+ notify_listener(listener)
943
961
 
944
- expect(listener.queue).to eq([:assigned, :revoked])
962
+ expect(listener.queue).to eq([:assigned, :revoked])
963
+ end
945
964
  end
946
965
 
947
966
  def notify_listener(listener)
948
967
  # 1. subscribe and poll
949
- config.consumer_rebalance_listener = listener
950
968
  consumer.subscribe("consume_test_topic")
951
969
  wait_for_assignment(consumer)
952
970
  consumer.poll(100)
@@ -2,7 +2,7 @@ require "spec_helper"
2
2
  require "securerandom"
3
3
 
4
4
  describe Rdkafka::Metadata do
5
- let(:config) { rdkafka_config }
5
+ let(:config) { rdkafka_consumer_config }
6
6
  let(:native_config) { config.send(:native_config) }
7
7
  let(:native_kafka) { config.send(:native_kafka, native_config, :rd_kafka_consumer) }
8
8
 
@@ -0,0 +1,145 @@
1
+ require "spec_helper"
2
+
3
+ describe Rdkafka::Producer::Client do
4
+ let(:config) { rdkafka_producer_config }
5
+ let(:native) { config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer) }
6
+ let(:closing) { false }
7
+ let(:thread) { double(Thread) }
8
+
9
+ subject(:client) { described_class.new(native) }
10
+
11
+ before do
12
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).and_call_original
13
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(instance_of(FFI::Pointer)).and_return(0).and_call_original
14
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_destroy)
15
+ allow(Thread).to receive(:new).and_return(thread)
16
+
17
+ allow(thread).to receive(:[]=).with(:closing, anything)
18
+ allow(thread).to receive(:join)
19
+ allow(thread).to receive(:abort_on_exception=).with(anything)
20
+ end
21
+
22
+ context "defaults" do
23
+ it "sets the thread to abort on exception" do
24
+ expect(thread).to receive(:abort_on_exception=).with(true)
25
+
26
+ client
27
+ end
28
+
29
+ it "sets the thread `closing` flag to false" do
30
+ expect(thread).to receive(:[]=).with(:closing, false)
31
+
32
+ client
33
+ end
34
+ end
35
+
36
+ context "the polling thread" do
37
+ it "is created" do
38
+ expect(Thread).to receive(:new)
39
+
40
+ client
41
+ end
42
+
43
+ it "polls the native with default 250ms timeout" do
44
+ polling_loop_expects do
45
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).at_least(:once)
46
+ end
47
+ end
48
+
49
+ it "check the out queue of native client" do
50
+ polling_loop_expects do
51
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(native).at_least(:once)
52
+ end
53
+ end
54
+ end
55
+
56
+ def polling_loop_expects(&block)
57
+ Thread.current[:closing] = true # this forces the loop break with line #12
58
+
59
+ allow(Thread).to receive(:new).and_yield do |_|
60
+ block.call
61
+ end.and_return(thread)
62
+
63
+ client
64
+ end
65
+
66
+ it "exposes `native` client" do
67
+ expect(client.native).to eq(native)
68
+ end
69
+
70
+ context "when client was not yet closed (`nil`)" do
71
+ it "is not closed" do
72
+ expect(client.closed?).to eq(false)
73
+ end
74
+
75
+ context "and attempt to close" do
76
+ it "calls the `destroy` binding" do
77
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy).with(native)
78
+
79
+ client.close
80
+ end
81
+
82
+ it "indicates to the polling thread that it is closing" do
83
+ expect(thread).to receive(:[]=).with(:closing, true)
84
+
85
+ client.close
86
+ end
87
+
88
+ it "joins the polling thread" do
89
+ expect(thread).to receive(:join)
90
+
91
+ client.close
92
+ end
93
+
94
+ it "closes and unassign the native client" do
95
+ client.close
96
+
97
+ expect(client.native).to eq(nil)
98
+ expect(client.closed?).to eq(true)
99
+ end
100
+ end
101
+ end
102
+
103
+ context "when client was already closed" do
104
+ before { client.close }
105
+
106
+ it "is closed" do
107
+ expect(client.closed?).to eq(true)
108
+ end
109
+
110
+ context "and attempt to close again" do
111
+ it "does not call the `destroy` binding" do
112
+ expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy)
113
+
114
+ client.close
115
+ end
116
+
117
+ it "does not indicate to the polling thread that it is closing" do
118
+ expect(thread).not_to receive(:[]=).with(:closing, true)
119
+
120
+ client.close
121
+ end
122
+
123
+ it "does not join the polling thread" do
124
+ expect(thread).not_to receive(:join)
125
+
126
+ client.close
127
+ end
128
+
129
+ it "does not close and unassign the native client again" do
130
+ client.close
131
+
132
+ expect(client.native).to eq(nil)
133
+ expect(client.closed?).to eq(true)
134
+ end
135
+ end
136
+ end
137
+
138
+ it "provide a finalizer Proc that closes the `native` client" do
139
+ expect(client.closed?).to eq(false)
140
+
141
+ client.finalizer.call("some-ignored-object-id")
142
+
143
+ expect(client.closed?).to eq(true)
144
+ end
145
+ end
@@ -1,8 +1,9 @@
1
1
  require "spec_helper"
2
+ require "zlib"
2
3
 
3
4
  describe Rdkafka::Producer do
4
- let(:producer) { rdkafka_config.producer }
5
- let(:consumer) { rdkafka_config.consumer }
5
+ let(:producer) { rdkafka_producer_config.producer }
6
+ let(:consumer) { rdkafka_consumer_config.consumer }
6
7
 
7
8
  after do
8
9
  # Registry should always end up being empty
@@ -48,6 +49,27 @@ describe Rdkafka::Producer do
48
49
  # Callback should have been called
49
50
  expect(@callback_called).to be true
50
51
  end
52
+
53
+ it "should provide handle" do
54
+ @callback_handle = nil
55
+
56
+ producer.delivery_callback = lambda { |_, handle| @callback_handle = handle }
57
+
58
+ # Produce a message
59
+ handle = producer.produce(
60
+ topic: "produce_test_topic",
61
+ payload: "payload",
62
+ key: "key"
63
+ )
64
+
65
+ # Wait for it to be delivered
66
+ handle.wait(max_wait_timeout: 15)
67
+
68
+ # Join the producer thread.
69
+ producer.close
70
+
71
+ expect(handle).to be @callback_handle
72
+ end
51
73
  end
52
74
 
53
75
  context "with a callable object" do
@@ -92,6 +114,36 @@ describe Rdkafka::Producer do
92
114
  expect(called_report.first.partition).to eq 1
93
115
  expect(called_report.first.offset).to be >= 0
94
116
  end
117
+
118
+ it "should provide handle" do
119
+ callback_handles = []
120
+ callback = Class.new do
121
+ def initialize(callback_handles)
122
+ @callback_handles = callback_handles
123
+ end
124
+
125
+ def call(_, handle)
126
+ @callback_handles << handle
127
+ end
128
+ end
129
+ producer.delivery_callback = callback.new(callback_handles)
130
+
131
+ # Produce a message
132
+ handle = producer.produce(
133
+ topic: "produce_test_topic",
134
+ payload: "payload",
135
+ key: "key"
136
+ )
137
+
138
+ # Wait for it to be delivered
139
+ handle.wait(max_wait_timeout: 15)
140
+
141
+ # Join the producer thread.
142
+ producer.close
143
+
144
+ # Callback should have been called
145
+ expect(handle).to be callback_handles.first
146
+ end
95
147
  end
96
148
 
97
149
  it "should not accept a callback that's not callable" do
@@ -375,20 +427,16 @@ describe Rdkafka::Producer do
375
427
  end
376
428
  end
377
429
 
378
- it "should produce a message in a forked process" do
430
+ it "should produce a message in a forked process", skip: defined?(JRUBY_VERSION) && "Kernel#fork is not available" do
379
431
  # Fork, produce a message, send the report over a pipe and
380
432
  # wait for and check the message in the main process.
381
-
382
- # Kernel#fork is not available in JRuby
383
- skip if defined?(JRUBY_VERSION)
384
-
385
433
  reader, writer = IO.pipe
386
434
 
387
435
  fork do
388
436
  reader.close
389
437
 
390
438
  # Avoids sharing the socket between processes.
391
- producer = rdkafka_config.producer
439
+ producer = rdkafka_producer_config.producer
392
440
 
393
441
  handle = producer.produce(
394
442
  topic: "produce_test_topic",
data/spec/spec_helper.rb CHANGED
@@ -8,27 +8,57 @@ end
8
8
  require "pry"
9
9
  require "rspec"
10
10
  require "rdkafka"
11
+ require "timeout"
11
12
 
12
- def rdkafka_config(config_overrides={})
13
- config = {
13
+ def rdkafka_base_config
14
+ {
14
15
  :"api.version.request" => false,
15
16
  :"broker.version.fallback" => "1.0",
16
17
  :"bootstrap.servers" => "localhost:9092",
17
- :"group.id" => "ruby-test-#{Random.new.rand(0..1_000_000)}",
18
- :"auto.offset.reset" => "earliest",
19
- :"enable.partition.eof" => false
20
18
  }
19
+ end
20
+
21
+ def rdkafka_config(config_overrides={})
22
+ # Generate the base config
23
+ config = rdkafka_base_config
24
+ # Merge overrides
25
+ config.merge!(config_overrides)
26
+ # Return it
27
+ Rdkafka::Config.new(config)
28
+ end
29
+
30
+ def rdkafka_consumer_config(config_overrides={})
31
+ # Generate the base config
32
+ config = rdkafka_base_config
33
+ # Add consumer specific fields to it
34
+ config[:"auto.offset.reset"] = "earliest"
35
+ config[:"enable.partition.eof"] = false
36
+ config[:"group.id"] = "ruby-test-#{Random.new.rand(0..1_000_000)}"
37
+ # Enable debug mode if required
38
+ if ENV["DEBUG_CONSUMER"]
39
+ config[:debug] = "cgrp,topic,fetch"
40
+ end
41
+ # Merge overrides
42
+ config.merge!(config_overrides)
43
+ # Return it
44
+ Rdkafka::Config.new(config)
45
+ end
46
+
47
+ def rdkafka_producer_config(config_overrides={})
48
+ # Generate the base config
49
+ config = rdkafka_base_config
50
+ # Enable debug mode if required
21
51
  if ENV["DEBUG_PRODUCER"]
22
52
  config[:debug] = "broker,topic,msg"
23
- elsif ENV["DEBUG_CONSUMER"]
24
- config[:debug] = "cgrp,topic,fetch"
25
53
  end
54
+ # Merge overrides
26
55
  config.merge!(config_overrides)
56
+ # Return it
27
57
  Rdkafka::Config.new(config)
28
58
  end
29
59
 
30
60
  def new_native_client
31
- config = rdkafka_config
61
+ config = rdkafka_consumer_config
32
62
  config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer)
33
63
  end
34
64
 
@@ -42,7 +72,7 @@ end
42
72
 
43
73
  def wait_for_message(topic:, delivery_report:, timeout_in_seconds: 30, consumer: nil)
44
74
  new_consumer = !!consumer
45
- consumer ||= rdkafka_config.consumer
75
+ consumer ||= rdkafka_consumer_config.consumer
46
76
  consumer.subscribe(topic)
47
77
  timeout = Time.now.to_i + timeout_in_seconds
48
78
  loop do
@@ -75,6 +105,9 @@ def wait_for_unassignment(consumer)
75
105
  end
76
106
 
77
107
  RSpec.configure do |config|
108
+ config.filter_run focus: true
109
+ config.run_all_when_everything_filtered = true
110
+
78
111
  config.before(:suite) do
79
112
  admin = rdkafka_config.admin
80
113
  {
@@ -95,4 +128,12 @@ RSpec.configure do |config|
95
128
  end
96
129
  admin.close
97
130
  end
131
+
132
+ config.around(:each) do |example|
133
+ # Timeout specs after a minute. If they take longer
134
+ # they are probably stuck
135
+ Timeout::timeout(60) do
136
+ example.run
137
+ end
138
+ end
98
139
  end