rdkafka 0.11.1 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +7 -3
  3. data/CHANGELOG.md +23 -2
  4. data/Gemfile +2 -0
  5. data/README.md +26 -0
  6. data/Rakefile +2 -0
  7. data/dist/librdkafka_2.0.2.tar.gz +0 -0
  8. data/ext/Rakefile +56 -27
  9. data/lib/rdkafka/abstract_handle.rb +2 -0
  10. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  11. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  12. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  13. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  14. data/lib/rdkafka/admin.rb +50 -33
  15. data/lib/rdkafka/bindings.rb +59 -39
  16. data/lib/rdkafka/callbacks.rb +7 -1
  17. data/lib/rdkafka/config.rb +15 -12
  18. data/lib/rdkafka/consumer/headers.rb +24 -7
  19. data/lib/rdkafka/consumer/message.rb +3 -1
  20. data/lib/rdkafka/consumer/partition.rb +2 -0
  21. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  22. data/lib/rdkafka/consumer.rb +86 -44
  23. data/lib/rdkafka/error.rb +15 -0
  24. data/lib/rdkafka/metadata.rb +4 -2
  25. data/lib/rdkafka/native_kafka.rb +115 -0
  26. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  27. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  28. data/lib/rdkafka/producer.rb +56 -38
  29. data/lib/rdkafka/version.rb +5 -3
  30. data/lib/rdkafka.rb +3 -0
  31. data/rdkafka.gemspec +2 -0
  32. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  36. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  37. data/spec/rdkafka/admin_spec.rb +4 -3
  38. data/spec/rdkafka/bindings_spec.rb +9 -0
  39. data/spec/rdkafka/callbacks_spec.rb +2 -0
  40. data/spec/rdkafka/config_spec.rb +17 -2
  41. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  42. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  44. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  45. data/spec/rdkafka/consumer_spec.rb +123 -27
  46. data/spec/rdkafka/error_spec.rb +2 -0
  47. data/spec/rdkafka/metadata_spec.rb +2 -0
  48. data/spec/rdkafka/native_kafka_spec.rb +124 -0
  49. data/spec/rdkafka/producer/delivery_handle_spec.rb +5 -0
  50. data/spec/rdkafka/producer/delivery_report_spec.rb +8 -2
  51. data/spec/rdkafka/producer_spec.rb +103 -24
  52. data/spec/spec_helper.rb +17 -1
  53. metadata +13 -9
  54. data/bin/console +0 -11
@@ -1,4 +1,6 @@
1
- require "securerandom"
1
+ # frozen_string_literal: true
2
+
3
+ require "objspace"
2
4
 
3
5
  module Rdkafka
4
6
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
@@ -10,29 +12,22 @@ module Rdkafka
10
12
  attr_reader :delivery_callback
11
13
 
12
14
  # @private
13
- def initialize(native_kafka)
14
- @id = SecureRandom.uuid
15
- @closing = false
15
+ # Returns the number of arguments accepted by the callback, by default this is nil.
16
+ #
17
+ # @return [Integer, nil]
18
+ attr_reader :delivery_callback_arity
19
+
20
+ # @private
21
+ def initialize(native_kafka, partitioner_name)
16
22
  @native_kafka = native_kafka
23
+ @partitioner_name = partitioner_name || "consistent_random"
17
24
 
18
- # Makes sure, that the producer gets closed before it gets GCed by Ruby
19
- ObjectSpace.define_finalizer(@id, proc { close })
20
-
21
- # Start thread to poll client for delivery callbacks
22
- @polling_thread = Thread.new do
23
- loop do
24
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
25
- # Exit thread if closing and the poll queue is empty
26
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
27
- break
28
- end
29
- end
30
- end
31
- @polling_thread.abort_on_exception = true
25
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
26
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
32
27
  end
33
28
 
34
29
  # Set a callback that will be called every time a message is successfully produced.
35
- # The callback is called with a {DeliveryReport}
30
+ # The callback is called with a {DeliveryReport} and {DeliveryHandle}
36
31
  #
37
32
  # @param callback [Proc, #call] The callback
38
33
  #
@@ -40,20 +35,31 @@ module Rdkafka
40
35
  def delivery_callback=(callback)
41
36
  raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
42
37
  @delivery_callback = callback
38
+ @delivery_callback_arity = arity(callback)
43
39
  end
44
40
 
45
41
  # Close this producer and wait for the internal poll queue to empty.
46
42
  def close
47
- ObjectSpace.undefine_finalizer(@id)
43
+ return if closed?
44
+ ObjectSpace.undefine_finalizer(self)
45
+ @native_kafka.close
46
+ end
48
47
 
49
- return unless @native_kafka
48
+ # Whether this producer has closed
49
+ def closed?
50
+ @native_kafka.closed?
51
+ end
52
+
53
+ # Wait until all outstanding producer requests are completed, with the given timeout
54
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
55
+ #
56
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
57
+ def flush(timeout_ms=5_000)
58
+ closed_producer_check(__method__)
50
59
 
51
- # Indicate to polling thread that we're closing
52
- @closing = true
53
- # Wait for the polling thread to finish up
54
- @polling_thread.join
55
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
56
- @native_kafka = nil
60
+ @native_kafka.with_inner do |inner|
61
+ Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
62
+ end
57
63
  end
58
64
 
59
65
  # Partition count for a given topic.
@@ -62,10 +68,11 @@ module Rdkafka
62
68
  # @param topic [String] The topic name.
63
69
  #
64
70
  # @return partition count [Integer,nil]
65
- #
66
71
  def partition_count(topic)
67
72
  closed_producer_check(__method__)
68
- Rdkafka::Metadata.new(@native_kafka, topic).topics&.first[:partition_count]
73
+ @native_kafka.with_inner do |inner|
74
+ Rdkafka::Metadata.new(inner, topic).topics&.first[:partition_count]
75
+ end
69
76
  end
70
77
 
71
78
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -106,7 +113,7 @@ module Rdkafka
106
113
  if partition_key
107
114
  partition_count = partition_count(topic)
108
115
  # If the topic is not present, set to -1
109
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count) if partition_count
116
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count
110
117
  end
111
118
 
112
119
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -156,10 +163,12 @@ module Rdkafka
156
163
  args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
157
164
 
158
165
  # Produce the message
159
- response = Rdkafka::Bindings.rd_kafka_producev(
160
- @native_kafka,
161
- *args
162
- )
166
+ response = @native_kafka.with_inner do |inner|
167
+ Rdkafka::Bindings.rd_kafka_producev(
168
+ inner,
169
+ *args
170
+ )
171
+ end
163
172
 
164
173
  # Raise error if the produce call was not successful
165
174
  if response != 0
@@ -170,13 +179,22 @@ module Rdkafka
170
179
  delivery_handle
171
180
  end
172
181
 
173
- # @private
174
- def call_delivery_callback(delivery_handle)
175
- @delivery_callback.call(delivery_handle) if @delivery_callback
182
+ def call_delivery_callback(delivery_report, delivery_handle)
183
+ return unless @delivery_callback
184
+
185
+ args = [delivery_report, delivery_handle].take(@delivery_callback_arity)
186
+ @delivery_callback.call(*args)
187
+ end
188
+
189
+ def arity(callback)
190
+ return callback.arity if callback.respond_to?(:arity)
191
+
192
+ callback.method(:call).arity
176
193
  end
177
194
 
195
+ private
178
196
  def closed_producer_check(method)
179
- raise Rdkafka::ClosedProducerError.new(method) if @native_kafka.nil?
197
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
180
198
  end
181
199
  end
182
200
  end
@@ -1,5 +1,7 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
- VERSION = "0.11.1"
3
- LIBRDKAFKA_VERSION = "1.8.2"
4
- LIBRDKAFKA_SOURCE_SHA256 = "6a747d293a7a4613bd2897e28e8791476fbe1ae7361f2530a876e0fd483482a6"
4
+ VERSION = "0.13.1"
5
+ LIBRDKAFKA_VERSION = "2.0.2"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "f321bcb1e015a34114c83cf1aa7b99ee260236aab096b85c003170c90a47ca9d"
5
7
  end
data/lib/rdkafka.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "rdkafka/version"
2
4
 
3
5
  require "rdkafka/abstract_handle"
@@ -16,6 +18,7 @@ require "rdkafka/consumer/partition"
16
18
  require "rdkafka/consumer/topic_partition_list"
17
19
  require "rdkafka/error"
18
20
  require "rdkafka/metadata"
21
+ require "rdkafka/native_kafka"
19
22
  require "rdkafka/producer"
20
23
  require "rdkafka/producer/delivery_handle"
21
24
  require "rdkafka/producer/delivery_report"
data/rdkafka.gemspec CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('lib/rdkafka/version', __dir__)
2
4
 
3
5
  Gem::Specification.new do |gem|
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::AbstractHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::CreateTopicHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::CreateTopicReport do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::DeleteTopicHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::DeleteTopicReport do
@@ -1,9 +1,11 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
  require "ostruct"
3
5
 
4
6
  describe Rdkafka::Admin do
5
- let(:config) { rdkafka_config }
6
- let(:admin) { config.admin }
7
+ let(:config) { rdkafka_config }
8
+ let(:admin) { config.admin }
7
9
 
8
10
  after do
9
11
  # Registry should always end up being empty
@@ -174,7 +176,6 @@ describe Rdkafka::Admin do
174
176
  end
175
177
  end
176
178
 
177
-
178
179
  it "deletes a topic that was newly created" do
179
180
  create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor)
180
181
  create_topic_report = create_topic_handle.wait(max_wait_timeout: 15.0)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
  require 'zlib'
3
5
 
@@ -76,6 +78,13 @@ describe Rdkafka::Bindings do
76
78
  result_2 = (Zlib.crc32(partition_key) % partition_count)
77
79
  expect(result_1).to eq(result_2)
78
80
  end
81
+
82
+ it "should return the partition calculated by the specified partitioner" do
83
+ result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
84
+ ptr = FFI::MemoryPointer.from_string(partition_key)
85
+ result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
86
+ expect(result_1).to eq(result_2)
87
+ end
79
88
  end
80
89
 
81
90
  describe "stats callback" do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Callbacks do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Config do
@@ -148,11 +150,24 @@ describe Rdkafka::Config do
148
150
  }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"invalid.key\"")
149
151
  end
150
152
 
153
+ it "allows string partitioner key" do
154
+ expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original
155
+ config = Rdkafka::Config.new("partitioner" => "murmur2")
156
+ config.producer.close
157
+ end
158
+
159
+ it "allows symbol partitioner key" do
160
+ expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original
161
+ config = Rdkafka::Config.new(:partitioner => "murmur2")
162
+ config.producer.close
163
+ end
164
+
151
165
  it "should allow configuring zstd compression" do
152
166
  config = Rdkafka::Config.new('compression.codec' => 'zstd')
153
167
  begin
154
- expect(config.producer).to be_a Rdkafka::Producer
155
- config.producer.close
168
+ producer = config.producer
169
+ expect(producer).to be_a Rdkafka::Producer
170
+ producer.close
156
171
  rescue Rdkafka::Config::ConfigError => ex
157
172
  pending "Zstd compression not supported on this machine"
158
173
  raise ex
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Consumer::Headers do
6
+ let(:headers) do
7
+ { # Note String keys!
8
+ "version" => "2.1.3",
9
+ "type" => "String"
10
+ }
11
+ end
12
+ let(:native_message) { double('native message') }
13
+ let(:headers_ptr) { double('headers pointer') }
14
+
15
+ describe '.from_native' do
16
+ before do
17
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(native_message, anything) do |_, headers_ptrptr|
18
+ expect(headers_ptrptr).to receive(:read_pointer).and_return(headers_ptr)
19
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
20
+ end
21
+
22
+ expect(Rdkafka::Bindings).to \
23
+ receive(:rd_kafka_header_get_all)
24
+ .with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
25
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: headers.keys[0]))
26
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[0].size)
27
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers.values[0]))
28
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
29
+ end
30
+
31
+ expect(Rdkafka::Bindings).to \
32
+ receive(:rd_kafka_header_get_all)
33
+ .with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
34
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: headers.keys[1]))
35
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[1].size)
36
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers.values[1]))
37
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
38
+ end
39
+
40
+ expect(Rdkafka::Bindings).to \
41
+ receive(:rd_kafka_header_get_all)
42
+ .with(headers_ptr, 2, anything, anything, anything)
43
+ .and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
44
+ end
45
+
46
+ subject { described_class.from_native(native_message) }
47
+
48
+ it { is_expected.to eq(headers) }
49
+ it { is_expected.to be_frozen }
50
+
51
+ it 'allows String key' do
52
+ expect(subject['version']).to eq("2.1.3")
53
+ end
54
+
55
+ it 'allows Symbol key, but warns' do
56
+ expect(Kernel).to \
57
+ receive(:warn).with("rdkafka deprecation warning: header access with Symbol key :version treated as a String. " \
58
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
59
+ expect(subject[:version]).to eq("2.1.3")
60
+ end
61
+ end
62
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Consumer::Message do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Consumer::Partition do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Consumer::TopicPartitionList do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
  require "ostruct"
3
5
  require 'securerandom'
@@ -53,7 +55,7 @@ describe Rdkafka::Consumer do
53
55
 
54
56
  describe "#pause and #resume" do
55
57
  context "subscription" do
56
- let(:timeout) { 1000 }
58
+ let(:timeout) { 2000 }
57
59
 
58
60
  before { consumer.subscribe("consume_test_topic") }
59
61
  after { consumer.unsubscribe }
@@ -241,7 +243,7 @@ describe Rdkafka::Consumer do
241
243
 
242
244
  it "should return the assignment when subscribed" do
243
245
  # Make sure there's a message
244
- report = producer.produce(
246
+ producer.produce(
245
247
  topic: "consume_test_topic",
246
248
  payload: "payload 1",
247
249
  key: "key 1",
@@ -272,7 +274,7 @@ describe Rdkafka::Consumer do
272
274
  it "should close a consumer" do
273
275
  consumer.subscribe("consume_test_topic")
274
276
  100.times do |i|
275
- report = producer.produce(
277
+ producer.produce(
276
278
  topic: "consume_test_topic",
277
279
  payload: "payload #{i}",
278
280
  key: "key #{i}",
@@ -284,12 +286,35 @@ describe Rdkafka::Consumer do
284
286
  consumer.poll(100)
285
287
  }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
286
288
  end
289
+
290
+ context 'when there are outgoing operations in other threads' do
291
+ it 'should wait and not crash' do
292
+ times = []
293
+
294
+ # Run a long running poll
295
+ thread = Thread.new do
296
+ times << Time.now
297
+ consumer.subscribe("empty_test_topic")
298
+ times << Time.now
299
+ consumer.poll(1_000)
300
+ times << Time.now
301
+ end
302
+
303
+ # Make sure it starts before we close
304
+ sleep(0.1)
305
+ consumer.close
306
+ close_time = Time.now
307
+ thread.join
308
+
309
+ times.each { |op_time| expect(op_time).to be < close_time }
310
+ end
311
+ end
287
312
  end
288
313
 
289
314
  describe "#commit, #committed and #store_offset" do
290
315
  # Make sure there's a stored offset
291
316
  let!(:report) do
292
- report = producer.produce(
317
+ producer.produce(
293
318
  topic: "consume_test_topic",
294
319
  payload: "payload 1",
295
320
  key: "key 1",
@@ -593,7 +618,7 @@ describe Rdkafka::Consumer do
593
618
  end
594
619
 
595
620
  describe "#poll with headers" do
596
- it "should return message with headers" do
621
+ it "should return message with headers using string keys (when produced with symbol keys)" do
597
622
  report = producer.produce(
598
623
  topic: "consume_test_topic",
599
624
  key: "key headers",
@@ -603,7 +628,20 @@ describe Rdkafka::Consumer do
603
628
  message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
604
629
  expect(message).to be
605
630
  expect(message.key).to eq('key headers')
606
- expect(message.headers).to include(foo: 'bar')
631
+ expect(message.headers).to include('foo' => 'bar')
632
+ end
633
+
634
+ it "should return message with headers using string keys (when produced with string keys)" do
635
+ report = producer.produce(
636
+ topic: "consume_test_topic",
637
+ key: "key headers",
638
+ headers: { 'foo' => 'bar' }
639
+ ).wait
640
+
641
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
642
+ expect(message).to be
643
+ expect(message.key).to eq('key headers')
644
+ expect(message.headers).to include('foo' => 'bar')
607
645
  end
608
646
 
609
647
  it "should return message with no headers" do
@@ -698,7 +736,7 @@ describe Rdkafka::Consumer do
698
736
  n.times do |i|
699
737
  handles << producer.produce(
700
738
  topic: topic_name,
701
- payload: Time.new.to_f.to_s,
739
+ payload: i % 10 == 0 ? nil : Time.new.to_f.to_s,
702
740
  key: i.to_s,
703
741
  partition: 0
704
742
  )
@@ -723,7 +761,8 @@ describe Rdkafka::Consumer do
723
761
  #
724
762
  # This is, in effect, an integration test and the subsequent specs are
725
763
  # unit tests.
726
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
764
+ admin = rdkafka_config.admin
765
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
727
766
  create_topic_handle.wait(max_wait_timeout: 15.0)
728
767
  consumer.subscribe(topic_name)
729
768
  produce_n 42
@@ -736,6 +775,7 @@ describe Rdkafka::Consumer do
736
775
  expect(all_yields.flatten.size).to eq 42
737
776
  expect(all_yields.size).to be > 4
738
777
  expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
778
+ admin.close
739
779
  end
740
780
 
741
781
  it "should batch poll results and yield arrays of messages" do
@@ -778,13 +818,15 @@ describe Rdkafka::Consumer do
778
818
  end
779
819
 
780
820
  it "should yield [] if nothing is received before the timeout" do
781
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
821
+ admin = rdkafka_config.admin
822
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
782
823
  create_topic_handle.wait(max_wait_timeout: 15.0)
783
824
  consumer.subscribe(topic_name)
784
825
  consumer.each_batch do |batch|
785
826
  expect(batch).to eq([])
786
827
  break
787
828
  end
829
+ admin.close
788
830
  end
789
831
 
790
832
  it "should yield batchs of max_items in size if messages are already fetched" do
@@ -831,7 +873,6 @@ describe Rdkafka::Consumer do
831
873
  )
832
874
  consumer = config.consumer
833
875
  consumer.subscribe(topic_name)
834
- loop_count = 0
835
876
  batches_yielded = []
836
877
  exceptions_yielded = []
837
878
  each_batch_iterations = 0
@@ -862,6 +903,7 @@ describe Rdkafka::Consumer do
862
903
  expect(batches_yielded.first.size).to eq 2
863
904
  expect(exceptions_yielded.flatten.size).to eq 1
864
905
  expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
906
+ consumer.close
865
907
  end
866
908
  end
867
909
 
@@ -875,7 +917,6 @@ describe Rdkafka::Consumer do
875
917
  )
876
918
  consumer = config.consumer
877
919
  consumer.subscribe(topic_name)
878
- loop_count = 0
879
920
  batches_yielded = []
880
921
  exceptions_yielded = []
881
922
  each_batch_iterations = 0
@@ -904,6 +945,7 @@ describe Rdkafka::Consumer do
904
945
  expect(each_batch_iterations).to eq 0
905
946
  expect(batches_yielded.size).to eq 0
906
947
  expect(exceptions_yielded.size).to eq 0
948
+ consumer.close
907
949
  end
908
950
  end
909
951
  end
@@ -918,11 +960,11 @@ describe Rdkafka::Consumer do
918
960
  context "with a working listener" do
919
961
  let(:listener) do
920
962
  Struct.new(:queue) do
921
- def on_partitions_assigned(consumer, list)
963
+ def on_partitions_assigned(list)
922
964
  collect(:assign, list)
923
965
  end
924
966
 
925
- def on_partitions_revoked(consumer, list)
967
+ def on_partitions_revoked(list)
926
968
  collect(:revoke, list)
927
969
  end
928
970
 
@@ -946,12 +988,12 @@ describe Rdkafka::Consumer do
946
988
  context "with a broken listener" do
947
989
  let(:listener) do
948
990
  Struct.new(:queue) do
949
- def on_partitions_assigned(consumer, list)
991
+ def on_partitions_assigned(list)
950
992
  queue << :assigned
951
993
  raise 'boom'
952
994
  end
953
995
 
954
- def on_partitions_revoked(consumer, list)
996
+ def on_partitions_revoked(list)
955
997
  queue << :revoked
956
998
  raise 'boom'
957
999
  end
@@ -964,18 +1006,6 @@ describe Rdkafka::Consumer do
964
1006
  expect(listener.queue).to eq([:assigned, :revoked])
965
1007
  end
966
1008
  end
967
-
968
- def notify_listener(listener)
969
- # 1. subscribe and poll
970
- consumer.subscribe("consume_test_topic")
971
- wait_for_assignment(consumer)
972
- consumer.poll(100)
973
-
974
- # 2. unsubscribe
975
- consumer.unsubscribe
976
- wait_for_unassignment(consumer)
977
- consumer.close
978
- end
979
1009
  end
980
1010
 
981
1011
  context "methods that should not be called after a consumer has been closed" do
@@ -1007,4 +1037,70 @@ describe Rdkafka::Consumer do
1007
1037
  end
1008
1038
  end
1009
1039
  end
1040
+
1041
+ it "provides a finalizer that closes the native kafka client" do
1042
+ expect(consumer.closed?).to eq(false)
1043
+
1044
+ consumer.finalizer.call("some-ignored-object-id")
1045
+
1046
+ expect(consumer.closed?).to eq(true)
1047
+ end
1048
+
1049
+ context "when the rebalance protocol is cooperative" do
1050
+ let(:consumer) do
1051
+ config = rdkafka_consumer_config(
1052
+ {
1053
+ :"partition.assignment.strategy" => "cooperative-sticky",
1054
+ :"debug" => "consumer",
1055
+ }
1056
+ )
1057
+ config.consumer_rebalance_listener = listener
1058
+ config.consumer
1059
+ end
1060
+
1061
+ let(:listener) do
1062
+ Struct.new(:queue) do
1063
+ def on_partitions_assigned(list)
1064
+ collect(:assign, list)
1065
+ end
1066
+
1067
+ def on_partitions_revoked(list)
1068
+ collect(:revoke, list)
1069
+ end
1070
+
1071
+ def collect(name, list)
1072
+ partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
1073
+ queue << ([name] + partitions)
1074
+ end
1075
+ end.new([])
1076
+ end
1077
+
1078
+ it "should be able to assign and unassign partitions using the cooperative partition assignment APIs" do
1079
+ notify_listener(listener) do
1080
+ handles = []
1081
+ 10.times do
1082
+ handles << producer.produce(
1083
+ topic: "consume_test_topic",
1084
+ payload: "payload 1",
1085
+ key: "key 1",
1086
+ partition: 0
1087
+ )
1088
+ end
1089
+ handles.each(&:wait)
1090
+
1091
+ consumer.subscribe("consume_test_topic")
1092
+ # Check the first 10 messages. Then close the consumer, which
1093
+ # should break the each loop.
1094
+ consumer.each_with_index do |message, i|
1095
+ expect(message).to be_a Rdkafka::Consumer::Message
1096
+ break if i == 10
1097
+ end
1098
+ end
1099
+
1100
+ expect(listener.queue).to eq([
1101
+ [:assign, "consume_test_topic", 0, 1, 2],
1102
+ [:revoke, "consume_test_topic", 0, 1, 2]
1103
+ ])
1104
+ end
1105
+ end
1010
1106
  end