karafka-rdkafka 0.12.3 → 0.13.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +1 -1
  4. data/CHANGELOG.md +23 -0
  5. data/Gemfile +2 -0
  6. data/README.md +26 -0
  7. data/Rakefile +2 -0
  8. data/ext/Rakefile +2 -0
  9. data/karafka-rdkafka.gemspec +2 -0
  10. data/lib/rdkafka/abstract_handle.rb +2 -0
  11. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  12. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  13. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  14. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  15. data/lib/rdkafka/admin.rb +95 -73
  16. data/lib/rdkafka/bindings.rb +52 -37
  17. data/lib/rdkafka/callbacks.rb +2 -0
  18. data/lib/rdkafka/config.rb +13 -10
  19. data/lib/rdkafka/consumer/headers.rb +24 -7
  20. data/lib/rdkafka/consumer/message.rb +3 -1
  21. data/lib/rdkafka/consumer/partition.rb +2 -0
  22. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  23. data/lib/rdkafka/consumer.rb +100 -44
  24. data/lib/rdkafka/error.rb +9 -0
  25. data/lib/rdkafka/metadata.rb +25 -2
  26. data/lib/rdkafka/native_kafka.rb +81 -0
  27. data/lib/rdkafka/producer/delivery_handle.rb +2 -0
  28. data/lib/rdkafka/producer/delivery_report.rb +3 -1
  29. data/lib/rdkafka/producer.rb +75 -12
  30. data/lib/rdkafka/version.rb +3 -1
  31. data/lib/rdkafka.rb +3 -1
  32. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  36. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  37. data/spec/rdkafka/admin_spec.rb +4 -3
  38. data/spec/rdkafka/bindings_spec.rb +2 -0
  39. data/spec/rdkafka/callbacks_spec.rb +2 -0
  40. data/spec/rdkafka/config_spec.rb +17 -2
  41. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  42. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  44. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  45. data/spec/rdkafka/consumer_spec.rb +124 -22
  46. data/spec/rdkafka/error_spec.rb +2 -0
  47. data/spec/rdkafka/metadata_spec.rb +2 -0
  48. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -34
  49. data/spec/rdkafka/producer/delivery_handle_spec.rb +2 -0
  50. data/spec/rdkafka/producer/delivery_report_spec.rb +4 -2
  51. data/spec/rdkafka/producer_spec.rb +118 -17
  52. data/spec/spec_helper.rb +17 -1
  53. data.tar.gz.sig +0 -0
  54. metadata +9 -9
  55. metadata.gz.sig +0 -0
  56. data/bin/console +0 -11
  57. data/lib/rdkafka/producer/client.rb +0 -47
@@ -1,8 +1,15 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "objspace"
2
4
 
3
5
  module Rdkafka
4
6
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
5
7
  class Producer
8
+ # Cache partitions count for 30 seconds
9
+ PARTITIONS_COUNT_TTL = 30
10
+
11
+ private_constant :PARTITIONS_COUNT_TTL
12
+
6
13
  # @private
7
14
  # Returns the current delivery callback, by default this is nil.
8
15
  #
@@ -16,12 +23,32 @@ module Rdkafka
16
23
  attr_reader :delivery_callback_arity
17
24
 
18
25
  # @private
19
- def initialize(client, partitioner_name)
20
- @client = client
26
+ def initialize(native_kafka, partitioner_name)
27
+ @native_kafka = native_kafka
21
28
  @partitioner_name = partitioner_name || "consistent_random"
22
29
 
23
- # Makes sure, that the producer gets closed before it gets GCed by Ruby
24
- ObjectSpace.define_finalizer(self, client.finalizer)
30
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
31
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
32
+
33
+ @_partitions_count_cache = Hash.new do |cache, topic|
34
+ topic_metadata = nil
35
+
36
+ @native_kafka.with_inner do |inner|
37
+ topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
38
+ end
39
+
40
+ cache[topic] = [
41
+ monotonic_now,
42
+ topic_metadata ? topic_metadata[:partition_count] : nil
43
+ ]
44
+ end
45
+ end
46
+
47
+ # @return [String] producer name
48
+ def name
49
+ @name ||= @native_kafka.with_inner do |inner|
50
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
51
+ end
25
52
  end
26
53
 
27
54
  # Set a callback that will be called every time a message is successfully produced.
@@ -38,9 +65,26 @@ module Rdkafka
38
65
 
39
66
  # Close this producer and wait for the internal poll queue to empty.
40
67
  def close
68
+ return if closed?
41
69
  ObjectSpace.undefine_finalizer(self)
70
+ @native_kafka.close
71
+ end
42
72
 
43
- @client.close
73
+ # Whether this producer has closed
74
+ def closed?
75
+ @native_kafka.closed?
76
+ end
77
+
78
+ # Wait until all outstanding producer requests are completed, with the given timeout
79
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
80
+ #
81
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
82
+ def flush(timeout_ms=5_000)
83
+ closed_producer_check(__method__)
84
+
85
+ @native_kafka.with_inner do |inner|
86
+ Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
87
+ end
44
88
  end
45
89
 
46
90
  # Partition count for a given topic.
@@ -50,9 +94,20 @@ module Rdkafka
50
94
  #
51
95
  # @return partition count [Integer,nil]
52
96
  #
97
+ # We cache the partition count for a given topic for given time
98
+ # This prevents us in case someone uses `partition_key` from querying for the count with
99
+ # each message. Instead we query once every 30 seconds at most
100
+ #
101
+ # @param topic [String] topic name
102
+ # @return [Integer] partition count for a given topic
53
103
  def partition_count(topic)
54
104
  closed_producer_check(__method__)
55
- Rdkafka::Metadata.new(@client.native, topic).topics&.first[:partition_count]
105
+
106
+ @_partitions_count_cache.delete_if do |_, cached|
107
+ monotonic_now - cached.first > PARTITIONS_COUNT_TTL
108
+ end
109
+
110
+ @_partitions_count_cache[topic].last
56
111
  end
57
112
 
58
113
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -143,10 +198,12 @@ module Rdkafka
143
198
  args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
144
199
 
145
200
  # Produce the message
146
- response = Rdkafka::Bindings.rd_kafka_producev(
147
- @client.native,
148
- *args
149
- )
201
+ response = @native_kafka.with_inner do |inner|
202
+ Rdkafka::Bindings.rd_kafka_producev(
203
+ inner,
204
+ *args
205
+ )
206
+ end
150
207
 
151
208
  # Raise error if the produce call was not successful
152
209
  if response != 0
@@ -157,7 +214,6 @@ module Rdkafka
157
214
  delivery_handle
158
215
  end
159
216
 
160
- # @private
161
217
  def call_delivery_callback(delivery_report, delivery_handle)
162
218
  return unless @delivery_callback
163
219
 
@@ -171,8 +227,15 @@ module Rdkafka
171
227
  callback.method(:call).arity
172
228
  end
173
229
 
230
+ private
231
+
232
+ def monotonic_now
233
+ # needed because Time.now can go backwards
234
+ Process.clock_gettime(Process::CLOCK_MONOTONIC)
235
+ end
236
+
174
237
  def closed_producer_check(method)
175
- raise Rdkafka::ClosedProducerError.new(method) if @client.closed?
238
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
176
239
  end
177
240
  end
178
241
  end
@@ -1,5 +1,7 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
- VERSION = "0.12.3"
4
+ VERSION = "0.13.0"
3
5
  LIBRDKAFKA_VERSION = "2.0.2"
4
6
  LIBRDKAFKA_SOURCE_SHA256 = "f321bcb1e015a34114c83cf1aa7b99ee260236aab096b85c003170c90a47ca9d"
5
7
  end
data/lib/rdkafka.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "rdkafka/version"
2
4
 
3
5
  require "rdkafka/abstract_handle"
@@ -18,7 +20,7 @@ require "rdkafka/consumer/partition"
18
20
  require "rdkafka/consumer/topic_partition_list"
19
21
  require "rdkafka/error"
20
22
  require "rdkafka/metadata"
23
+ require "rdkafka/native_kafka"
21
24
  require "rdkafka/producer"
22
- require "rdkafka/producer/client"
23
25
  require "rdkafka/producer/delivery_handle"
24
26
  require "rdkafka/producer/delivery_report"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::AbstractHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::CreateTopicHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::CreateTopicReport do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::DeleteTopicHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::DeleteTopicReport do
@@ -1,9 +1,11 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
  require "ostruct"
3
5
 
4
6
  describe Rdkafka::Admin do
5
- let(:config) { rdkafka_config }
6
- let(:admin) { config.admin }
7
+ let(:config) { rdkafka_config }
8
+ let(:admin) { config.admin }
7
9
 
8
10
  after do
9
11
  # Registry should always end up being empty
@@ -174,7 +176,6 @@ describe Rdkafka::Admin do
174
176
  end
175
177
  end
176
178
 
177
-
178
179
  it "deletes a topic that was newly created" do
179
180
  create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor)
180
181
  create_topic_report = create_topic_handle.wait(max_wait_timeout: 15.0)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
  require 'zlib'
3
5
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Callbacks do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Config do
@@ -148,11 +150,24 @@ describe Rdkafka::Config do
148
150
  }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"invalid.key\"")
149
151
  end
150
152
 
153
+ it "allows string partitioner key" do
154
+ expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original
155
+ config = Rdkafka::Config.new("partitioner" => "murmur2")
156
+ config.producer.close
157
+ end
158
+
159
+ it "allows symbol partitioner key" do
160
+ expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original
161
+ config = Rdkafka::Config.new(:partitioner => "murmur2")
162
+ config.producer.close
163
+ end
164
+
151
165
  it "should allow configuring zstd compression" do
152
166
  config = Rdkafka::Config.new('compression.codec' => 'zstd')
153
167
  begin
154
- expect(config.producer).to be_a Rdkafka::Producer
155
- config.producer.close
168
+ producer = config.producer
169
+ expect(producer).to be_a Rdkafka::Producer
170
+ producer.close
156
171
  rescue Rdkafka::Config::ConfigError => ex
157
172
  pending "Zstd compression not supported on this machine"
158
173
  raise ex
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Consumer::Headers do
6
+ let(:headers) do
7
+ { # Note String keys!
8
+ "version" => "2.1.3",
9
+ "type" => "String"
10
+ }
11
+ end
12
+ let(:native_message) { double('native message') }
13
+ let(:headers_ptr) { double('headers pointer') }
14
+
15
+ describe '.from_native' do
16
+ before do
17
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(native_message, anything) do |_, headers_ptrptr|
18
+ expect(headers_ptrptr).to receive(:read_pointer).and_return(headers_ptr)
19
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
20
+ end
21
+
22
+ expect(Rdkafka::Bindings).to \
23
+ receive(:rd_kafka_header_get_all)
24
+ .with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
25
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: headers.keys[0]))
26
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[0].size)
27
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers.values[0]))
28
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
29
+ end
30
+
31
+ expect(Rdkafka::Bindings).to \
32
+ receive(:rd_kafka_header_get_all)
33
+ .with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
34
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: headers.keys[1]))
35
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[1].size)
36
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers.values[1]))
37
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
38
+ end
39
+
40
+ expect(Rdkafka::Bindings).to \
41
+ receive(:rd_kafka_header_get_all)
42
+ .with(headers_ptr, 2, anything, anything, anything)
43
+ .and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
44
+ end
45
+
46
+ subject { described_class.from_native(native_message) }
47
+
48
+ it { is_expected.to eq(headers) }
49
+ it { is_expected.to be_frozen }
50
+
51
+ it 'allows String key' do
52
+ expect(subject['version']).to eq("2.1.3")
53
+ end
54
+
55
+ it 'allows Symbol key, but warns' do
56
+ expect(Kernel).to \
57
+ receive(:warn).with("rdkafka deprecation warning: header access with Symbol key :version treated as a String. " \
58
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
59
+ expect(subject[:version]).to eq("2.1.3")
60
+ end
61
+ end
62
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Consumer::Message do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Consumer::Partition do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Consumer::TopicPartitionList do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
  require "ostruct"
3
5
  require 'securerandom'
@@ -9,6 +11,10 @@ describe Rdkafka::Consumer do
9
11
  after { consumer.close }
10
12
  after { producer.close }
11
13
 
14
+ describe '#name' do
15
+ it { expect(consumer.name).to include('rdkafka#consumer-') }
16
+ end
17
+
12
18
  describe "#subscribe, #unsubscribe and #subscription" do
13
19
  it "should subscribe, unsubscribe and return the subscription" do
14
20
  expect(consumer.subscription).to be_empty
@@ -53,7 +59,7 @@ describe Rdkafka::Consumer do
53
59
 
54
60
  describe "#pause and #resume" do
55
61
  context "subscription" do
56
- let(:timeout) { 1000 }
62
+ let(:timeout) { 2000 }
57
63
 
58
64
  before { consumer.subscribe("consume_test_topic") }
59
65
  after { consumer.unsubscribe }
@@ -268,6 +274,28 @@ describe Rdkafka::Consumer do
268
274
  end
269
275
  end
270
276
 
277
+ describe '#assignment_lost?' do
278
+ it "should not return true as we do have an assignment" do
279
+ consumer.subscribe("consume_test_topic")
280
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
281
+ list.add_topic("consume_test_topic")
282
+ end
283
+
284
+ expect(consumer.assignment_lost?).to eq false
285
+ consumer.unsubscribe
286
+ end
287
+
288
+ it "should not return true after voluntary unsubscribing" do
289
+ consumer.subscribe("consume_test_topic")
290
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
291
+ list.add_topic("consume_test_topic")
292
+ end
293
+
294
+ consumer.unsubscribe
295
+ expect(consumer.assignment_lost?).to eq false
296
+ end
297
+ end
298
+
271
299
  describe "#close" do
272
300
  it "should close a consumer" do
273
301
  consumer.subscribe("consume_test_topic")
@@ -593,7 +621,7 @@ describe Rdkafka::Consumer do
593
621
  end
594
622
 
595
623
  describe "#poll with headers" do
596
- it "should return message with headers" do
624
+ it "should return message with headers using string keys (when produced with symbol keys)" do
597
625
  report = producer.produce(
598
626
  topic: "consume_test_topic",
599
627
  key: "key headers",
@@ -603,7 +631,20 @@ describe Rdkafka::Consumer do
603
631
  message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
604
632
  expect(message).to be
605
633
  expect(message.key).to eq('key headers')
606
- expect(message.headers).to include(foo: 'bar')
634
+ expect(message.headers).to include('foo' => 'bar')
635
+ end
636
+
637
+ it "should return message with headers using string keys (when produced with string keys)" do
638
+ report = producer.produce(
639
+ topic: "consume_test_topic",
640
+ key: "key headers",
641
+ headers: { 'foo' => 'bar' }
642
+ ).wait
643
+
644
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
645
+ expect(message).to be
646
+ expect(message.key).to eq('key headers')
647
+ expect(message.headers).to include('foo' => 'bar')
607
648
  end
608
649
 
609
650
  it "should return message with no headers" do
@@ -698,7 +739,7 @@ describe Rdkafka::Consumer do
698
739
  n.times do |i|
699
740
  handles << producer.produce(
700
741
  topic: topic_name,
701
- payload: Time.new.to_f.to_s,
742
+ payload: i % 10 == 0 ? nil : Time.new.to_f.to_s,
702
743
  key: i.to_s,
703
744
  partition: 0
704
745
  )
@@ -723,7 +764,8 @@ describe Rdkafka::Consumer do
723
764
  #
724
765
  # This is, in effect, an integration test and the subsequent specs are
725
766
  # unit tests.
726
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
767
+ admin = rdkafka_config.admin
768
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
727
769
  create_topic_handle.wait(max_wait_timeout: 15.0)
728
770
  consumer.subscribe(topic_name)
729
771
  produce_n 42
@@ -736,6 +778,7 @@ describe Rdkafka::Consumer do
736
778
  expect(all_yields.flatten.size).to eq 42
737
779
  expect(all_yields.size).to be > 4
738
780
  expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
781
+ admin.close
739
782
  end
740
783
 
741
784
  it "should batch poll results and yield arrays of messages" do
@@ -778,13 +821,15 @@ describe Rdkafka::Consumer do
778
821
  end
779
822
 
780
823
  it "should yield [] if nothing is received before the timeout" do
781
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
824
+ admin = rdkafka_config.admin
825
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
782
826
  create_topic_handle.wait(max_wait_timeout: 15.0)
783
827
  consumer.subscribe(topic_name)
784
828
  consumer.each_batch do |batch|
785
829
  expect(batch).to eq([])
786
830
  break
787
831
  end
832
+ admin.close
788
833
  end
789
834
 
790
835
  it "should yield batchs of max_items in size if messages are already fetched" do
@@ -861,6 +906,7 @@ describe Rdkafka::Consumer do
861
906
  expect(batches_yielded.first.size).to eq 2
862
907
  expect(exceptions_yielded.flatten.size).to eq 1
863
908
  expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
909
+ consumer.close
864
910
  end
865
911
  end
866
912
 
@@ -902,6 +948,7 @@ describe Rdkafka::Consumer do
902
948
  expect(each_batch_iterations).to eq 0
903
949
  expect(batches_yielded.size).to eq 0
904
950
  expect(exceptions_yielded.size).to eq 0
951
+ consumer.close
905
952
  end
906
953
  end
907
954
  end
@@ -916,11 +963,11 @@ describe Rdkafka::Consumer do
916
963
  context "with a working listener" do
917
964
  let(:listener) do
918
965
  Struct.new(:queue) do
919
- def on_partitions_assigned(consumer, list)
966
+ def on_partitions_assigned(list)
920
967
  collect(:assign, list)
921
968
  end
922
969
 
923
- def on_partitions_revoked(consumer, list)
970
+ def on_partitions_revoked(list)
924
971
  collect(:revoke, list)
925
972
  end
926
973
 
@@ -944,12 +991,12 @@ describe Rdkafka::Consumer do
944
991
  context "with a broken listener" do
945
992
  let(:listener) do
946
993
  Struct.new(:queue) do
947
- def on_partitions_assigned(consumer, list)
994
+ def on_partitions_assigned(list)
948
995
  queue << :assigned
949
996
  raise 'boom'
950
997
  end
951
998
 
952
- def on_partitions_revoked(consumer, list)
999
+ def on_partitions_revoked(list)
953
1000
  queue << :revoked
954
1001
  raise 'boom'
955
1002
  end
@@ -962,18 +1009,6 @@ describe Rdkafka::Consumer do
962
1009
  expect(listener.queue).to eq([:assigned, :revoked])
963
1010
  end
964
1011
  end
965
-
966
- def notify_listener(listener)
967
- # 1. subscribe and poll
968
- consumer.subscribe("consume_test_topic")
969
- wait_for_assignment(consumer)
970
- consumer.poll(100)
971
-
972
- # 2. unsubscribe
973
- consumer.unsubscribe
974
- wait_for_unassignment(consumer)
975
- consumer.close
976
- end
977
1012
  end
978
1013
 
979
1014
  context "methods that should not be called after a consumer has been closed" do
@@ -993,6 +1028,7 @@ describe Rdkafka::Consumer do
993
1028
  :assignment => nil,
994
1029
  :committed => [],
995
1030
  :query_watermark_offsets => [ nil, nil ],
1031
+ :assignment_lost? => []
996
1032
  }.each do |method, args|
997
1033
  it "raises an exception if #{method} is called" do
998
1034
  expect {
@@ -1005,4 +1041,70 @@ describe Rdkafka::Consumer do
1005
1041
  end
1006
1042
  end
1007
1043
  end
1044
+
1045
+ it "provides a finalizer that closes the native kafka client" do
1046
+ expect(consumer.closed?).to eq(false)
1047
+
1048
+ consumer.finalizer.call("some-ignored-object-id")
1049
+
1050
+ expect(consumer.closed?).to eq(true)
1051
+ end
1052
+
1053
+ context "when the rebalance protocol is cooperative" do
1054
+ let(:consumer) do
1055
+ config = rdkafka_consumer_config(
1056
+ {
1057
+ :"partition.assignment.strategy" => "cooperative-sticky",
1058
+ :"debug" => "consumer",
1059
+ }
1060
+ )
1061
+ config.consumer_rebalance_listener = listener
1062
+ config.consumer
1063
+ end
1064
+
1065
+ let(:listener) do
1066
+ Struct.new(:queue) do
1067
+ def on_partitions_assigned(list)
1068
+ collect(:assign, list)
1069
+ end
1070
+
1071
+ def on_partitions_revoked(list)
1072
+ collect(:revoke, list)
1073
+ end
1074
+
1075
+ def collect(name, list)
1076
+ partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
1077
+ queue << ([name] + partitions)
1078
+ end
1079
+ end.new([])
1080
+ end
1081
+
1082
+ it "should be able to assign and unassign partitions using the cooperative partition assignment APIs" do
1083
+ notify_listener(listener) do
1084
+ handles = []
1085
+ 10.times do
1086
+ handles << producer.produce(
1087
+ topic: "consume_test_topic",
1088
+ payload: "payload 1",
1089
+ key: "key 1",
1090
+ partition: 0
1091
+ )
1092
+ end
1093
+ handles.each(&:wait)
1094
+
1095
+ consumer.subscribe("consume_test_topic")
1096
+ # Check the first 10 messages. Then close the consumer, which
1097
+ # should break the each loop.
1098
+ consumer.each_with_index do |message, i|
1099
+ expect(message).to be_a Rdkafka::Consumer::Message
1100
+ break if i == 10
1101
+ end
1102
+ end
1103
+
1104
+ expect(listener.queue).to eq([
1105
+ [:assign, "consume_test_topic", 0, 1, 2],
1106
+ [:revoke, "consume_test_topic", 0, 1, 2]
1107
+ ])
1108
+ end
1109
+ end
1008
1110
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::RdkafkaError do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
  require "securerandom"
3
5