rdkafka 0.11.0 → 0.12.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 55566df04f391a8ecbd2a51e7c2e1d91dc31397f90fcb2777b26d90b4774a290
4
- data.tar.gz: 93407bbc327703e231bc9da7e2ebb74ca52089f14a50c6491b20d98e98ec7b07
3
+ metadata.gz: 821523c304fc7a1fbb2c7be2b58d98d56600b645b89fdb4093f976418650035d
4
+ data.tar.gz: '039b8e345fd8be5f295a293d64466071dbefd77d81b01460abb0fcf343a6bed3'
5
5
  SHA512:
6
- metadata.gz: 1ad2f39610e53f27b9e192e36d5420e15b7d2807ca628cd80a48ba5a9e60ab6c2601a2c10303767fc048f5be812656e40b8fa6decbdf2fd249cf56854dd28dae
7
- data.tar.gz: d27ef3a56b024f2c120eb0b348111e4d4f0fac19e5b818bfaa12503b7af033d1fa1f72543b3f306b07d91ced1de9aea487fff48df482cb4e75cd207f075f8c7a
6
+ metadata.gz: 2c7ac2199a63aacd3b1420890981ed5d953ae5cdadb874886cc4e396fa1fd8f69333633319beef35a05a002d75d22335a526a126e518cc3fbbb877a1c11ef2f7
7
+ data.tar.gz: 5d23c6beec3759877013b040018111453e05c41238014b07a27c1a9d8b96e8af3bc037aacd1ebe89f856435cf0afb8e34a9f89443f87cf1a3682736efb79b4bd
data/CHANGELOG.md CHANGED
@@ -1,3 +1,6 @@
1
+ # 0.12.0
2
+ * Bumps librdkafka to 1.9.0
3
+
1
4
  # 0.11.0
2
5
  * Upgrade librdkafka to 1.8.2
3
6
  * Bump supported minimum Ruby version to 2.6
data/lib/rdkafka/admin.rb CHANGED
@@ -90,7 +90,7 @@ module Rdkafka
90
90
  admin_options_ptr,
91
91
  queue_ptr
92
92
  )
93
- rescue Exception => err
93
+ rescue Exception
94
94
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
95
95
  raise
96
96
  ensure
@@ -140,7 +140,7 @@ module Rdkafka
140
140
  admin_options_ptr,
141
141
  queue_ptr
142
142
  )
143
- rescue Exception => err
143
+ rescue Exception
144
144
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
145
145
  raise
146
146
  ensure
@@ -246,14 +246,21 @@ module Rdkafka
246
246
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
247
247
 
248
248
  # Partitioner
249
- attach_function :rd_kafka_msg_partitioner_consistent_random, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
249
+ PARTITIONERS = %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random).each_with_object({}) do |name, hsh|
250
+ method_name = "rd_kafka_msg_partitioner_#{name}".to_sym
251
+ attach_function method_name, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
252
+ hsh[name] = method_name
253
+ end
250
254
 
251
- def self.partitioner(str, partition_count)
255
+ def self.partitioner(str, partition_count, partitioner_name = "consistent_random")
252
256
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
253
257
  return -1 unless partition_count&.nonzero?
254
258
 
255
259
  str_ptr = FFI::MemoryPointer.from_string(str)
256
- rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
260
+ method_name = PARTITIONERS.fetch(partitioner_name) do
261
+ raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
262
+ end
263
+ public_send(method_name, nil, str_ptr, str.size, partition_count, nil, nil)
257
264
  end
258
265
 
259
266
  # Create Topics
@@ -97,7 +97,7 @@ module Rdkafka
97
97
  delivery_handle[:pending] = false
98
98
  # Call delivery callback on opaque
99
99
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
100
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
100
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]), delivery_handle)
101
101
  end
102
102
  end
103
103
  end
@@ -179,7 +179,7 @@ module Rdkafka
179
179
  # Set callback to receive delivery reports on config
180
180
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
181
181
  # Return producer with Kafka client
182
- Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
182
+ Rdkafka::Producer.new(Rdkafka::Producer::Client.new(native_kafka(config, :rd_kafka_producer)), self[:partitioner]).tap do |producer|
183
183
  opaque.producer = producer
184
184
  end
185
185
  end
@@ -278,8 +278,8 @@ module Rdkafka
278
278
  attr_accessor :producer
279
279
  attr_accessor :consumer_rebalance_listener
280
280
 
281
- def call_delivery_callback(delivery_handle)
282
- producer.call_delivery_callback(delivery_handle) if producer
281
+ def call_delivery_callback(delivery_report, delivery_handle)
282
+ producer.call_delivery_callback(delivery_report, delivery_handle) if producer
283
283
  end
284
284
 
285
285
  def call_on_partitions_assigned(consumer, list)
@@ -0,0 +1,47 @@
1
+ module Rdkafka
2
+ class Producer
3
+ class Client
4
+ def initialize(native)
5
+ @native = native
6
+
7
+ # Start thread to poll client for delivery callbacks
8
+ @polling_thread = Thread.new do
9
+ loop do
10
+ Rdkafka::Bindings.rd_kafka_poll(native, 250)
11
+ # Exit thread if closing and the poll queue is empty
12
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(native) == 0
13
+ break
14
+ end
15
+ end
16
+ end
17
+ @polling_thread.abort_on_exception = true
18
+ @polling_thread[:closing] = false
19
+ end
20
+
21
+ def native
22
+ @native
23
+ end
24
+
25
+ def finalizer
26
+ ->(_) { close }
27
+ end
28
+
29
+ def closed?
30
+ @native.nil?
31
+ end
32
+
33
+ def close(object_id=nil)
34
+ return unless @native
35
+
36
+ # Indicate to polling thread that we're closing
37
+ @polling_thread[:closing] = true
38
+ # Wait for the polling thread to finish up
39
+ @polling_thread.join
40
+
41
+ Rdkafka::Bindings.rd_kafka_destroy(@native)
42
+
43
+ @native = nil
44
+ end
45
+ end
46
+ end
47
+ end
@@ -1,4 +1,4 @@
1
- require "securerandom"
1
+ require "objspace"
2
2
 
3
3
  module Rdkafka
4
4
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
@@ -10,29 +10,22 @@ module Rdkafka
10
10
  attr_reader :delivery_callback
11
11
 
12
12
  # @private
13
- def initialize(native_kafka)
14
- @id = SecureRandom.uuid
15
- @closing = false
16
- @native_kafka = native_kafka
13
+ # Returns the number of arguments accepted by the callback, by default this is nil.
14
+ #
15
+ # @return [Integer, nil]
16
+ attr_reader :delivery_callback_arity
17
+
18
+ # @private
19
+ def initialize(client, partitioner_name)
20
+ @client = client
21
+ @partitioner_name = partitioner_name || "consistent_random"
17
22
 
18
23
  # Makes sure, that the producer gets closed before it gets GCed by Ruby
19
- ObjectSpace.define_finalizer(@id, proc { close })
20
-
21
- # Start thread to poll client for delivery callbacks
22
- @polling_thread = Thread.new do
23
- loop do
24
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
25
- # Exit thread if closing and the poll queue is empty
26
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
27
- break
28
- end
29
- end
30
- end
31
- @polling_thread.abort_on_exception = true
24
+ ObjectSpace.define_finalizer(self, client.finalizer)
32
25
  end
33
26
 
34
27
  # Set a callback that will be called every time a message is successfully produced.
35
- # The callback is called with a {DeliveryReport}
28
+ # The callback is called with a {DeliveryReport} and {DeliveryHandle}
36
29
  #
37
30
  # @param callback [Proc, #call] The callback
38
31
  #
@@ -40,20 +33,14 @@ module Rdkafka
40
33
  def delivery_callback=(callback)
41
34
  raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
42
35
  @delivery_callback = callback
36
+ @delivery_callback_arity = arity(callback)
43
37
  end
44
38
 
45
39
  # Close this producer and wait for the internal poll queue to empty.
46
40
  def close
47
- ObjectSpace.undefine_finalizer(@id)
41
+ ObjectSpace.undefine_finalizer(self)
48
42
 
49
- return unless @native_kafka
50
-
51
- # Indicate to polling thread that we're closing
52
- @closing = true
53
- # Wait for the polling thread to finish up
54
- @polling_thread.join
55
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
56
- @native_kafka = nil
43
+ @client.close
57
44
  end
58
45
 
59
46
  # Partition count for a given topic.
@@ -65,7 +52,7 @@ module Rdkafka
65
52
  #
66
53
  def partition_count(topic)
67
54
  closed_producer_check(__method__)
68
- Rdkafka::Metadata.new(@native_kafka, topic).topics&.first[:partition_count]
55
+ Rdkafka::Metadata.new(@client.native, topic).topics&.first[:partition_count]
69
56
  end
70
57
 
71
58
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -106,7 +93,7 @@ module Rdkafka
106
93
  if partition_key
107
94
  partition_count = partition_count(topic)
108
95
  # If the topic is not present, set to -1
109
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count) if partition_count
96
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count
110
97
  end
111
98
 
112
99
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -157,7 +144,7 @@ module Rdkafka
157
144
 
158
145
  # Produce the message
159
146
  response = Rdkafka::Bindings.rd_kafka_producev(
160
- @native_kafka,
147
+ @client.native,
161
148
  *args
162
149
  )
163
150
 
@@ -171,12 +158,21 @@ module Rdkafka
171
158
  end
172
159
 
173
160
  # @private
174
- def call_delivery_callback(delivery_handle)
175
- @delivery_callback.call(delivery_handle) if @delivery_callback
161
+ def call_delivery_callback(delivery_report, delivery_handle)
162
+ return unless @delivery_callback
163
+
164
+ args = [delivery_report, delivery_handle].take(@delivery_callback_arity)
165
+ @delivery_callback.call(*args)
166
+ end
167
+
168
+ def arity(callback)
169
+ return callback.arity if callback.respond_to?(:arity)
170
+
171
+ callback.method(:call).arity
176
172
  end
177
173
 
178
174
  def closed_producer_check(method)
179
- raise Rdkafka::ClosedProducerError.new(method) if @native_kafka.nil?
175
+ raise Rdkafka::ClosedProducerError.new(method) if @client.closed?
180
176
  end
181
177
  end
182
178
  end
@@ -1,5 +1,5 @@
1
1
  module Rdkafka
2
- VERSION = "0.11.0"
3
- LIBRDKAFKA_VERSION = "1.8.2"
4
- LIBRDKAFKA_SOURCE_SHA256 = "6a747d293a7a4613bd2897e28e8791476fbe1ae7361f2530a876e0fd483482a6"
2
+ VERSION = "0.12.0"
3
+ LIBRDKAFKA_VERSION = "1.9.0"
4
+ LIBRDKAFKA_SOURCE_SHA256 = "59b6088b69ca6cf278c3f9de5cd6b7f3fd604212cd1c59870bc531c54147e889"
5
5
  end
data/lib/rdkafka.rb CHANGED
@@ -17,5 +17,6 @@ require "rdkafka/consumer/topic_partition_list"
17
17
  require "rdkafka/error"
18
18
  require "rdkafka/metadata"
19
19
  require "rdkafka/producer"
20
+ require "rdkafka/producer/client"
20
21
  require "rdkafka/producer/delivery_handle"
21
22
  require "rdkafka/producer/delivery_report"
data/rdkafka.gemspec CHANGED
@@ -18,7 +18,7 @@ Gem::Specification.new do |gem|
18
18
  gem.extensions = %w(ext/Rakefile)
19
19
 
20
20
  gem.add_dependency 'ffi', '~> 1.15'
21
- gem.add_dependency 'mini_portile2', '~> 2.7'
21
+ gem.add_dependency 'mini_portile2', '~> 2.6'
22
22
  gem.add_dependency 'rake', '> 12'
23
23
 
24
24
  gem.add_development_dependency 'pry'
@@ -76,6 +76,13 @@ describe Rdkafka::Bindings do
76
76
  result_2 = (Zlib.crc32(partition_key) % partition_count)
77
77
  expect(result_1).to eq(result_2)
78
78
  end
79
+
80
+ it "should return the partition calculated by the specified partitioner" do
81
+ result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
82
+ ptr = FFI::MemoryPointer.from_string(partition_key)
83
+ result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
84
+ expect(result_1).to eq(result_2)
85
+ end
79
86
  end
80
87
 
81
88
  describe "stats callback" do
@@ -241,7 +241,7 @@ describe Rdkafka::Consumer do
241
241
 
242
242
  it "should return the assignment when subscribed" do
243
243
  # Make sure there's a message
244
- report = producer.produce(
244
+ producer.produce(
245
245
  topic: "consume_test_topic",
246
246
  payload: "payload 1",
247
247
  key: "key 1",
@@ -272,7 +272,7 @@ describe Rdkafka::Consumer do
272
272
  it "should close a consumer" do
273
273
  consumer.subscribe("consume_test_topic")
274
274
  100.times do |i|
275
- report = producer.produce(
275
+ producer.produce(
276
276
  topic: "consume_test_topic",
277
277
  payload: "payload #{i}",
278
278
  key: "key #{i}",
@@ -289,7 +289,7 @@ describe Rdkafka::Consumer do
289
289
  describe "#commit, #committed and #store_offset" do
290
290
  # Make sure there's a stored offset
291
291
  let!(:report) do
292
- report = producer.produce(
292
+ producer.produce(
293
293
  topic: "consume_test_topic",
294
294
  payload: "payload 1",
295
295
  key: "key 1",
@@ -831,7 +831,6 @@ describe Rdkafka::Consumer do
831
831
  )
832
832
  consumer = config.consumer
833
833
  consumer.subscribe(topic_name)
834
- loop_count = 0
835
834
  batches_yielded = []
836
835
  exceptions_yielded = []
837
836
  each_batch_iterations = 0
@@ -875,7 +874,6 @@ describe Rdkafka::Consumer do
875
874
  )
876
875
  consumer = config.consumer
877
876
  consumer.subscribe(topic_name)
878
- loop_count = 0
879
877
  batches_yielded = []
880
878
  exceptions_yielded = []
881
879
  each_batch_iterations = 0
@@ -0,0 +1,145 @@
1
+ require "spec_helper"
2
+
3
+ describe Rdkafka::Producer::Client do
4
+ let(:config) { rdkafka_producer_config }
5
+ let(:native) { config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer) }
6
+ let(:closing) { false }
7
+ let(:thread) { double(Thread) }
8
+
9
+ subject(:client) { described_class.new(native) }
10
+
11
+ before do
12
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).and_call_original
13
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(instance_of(FFI::Pointer)).and_return(0).and_call_original
14
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_destroy)
15
+ allow(Thread).to receive(:new).and_return(thread)
16
+
17
+ allow(thread).to receive(:[]=).with(:closing, anything)
18
+ allow(thread).to receive(:join)
19
+ allow(thread).to receive(:abort_on_exception=).with(anything)
20
+ end
21
+
22
+ context "defaults" do
23
+ it "sets the thread to abort on exception" do
24
+ expect(thread).to receive(:abort_on_exception=).with(true)
25
+
26
+ client
27
+ end
28
+
29
+ it "sets the thread `closing` flag to false" do
30
+ expect(thread).to receive(:[]=).with(:closing, false)
31
+
32
+ client
33
+ end
34
+ end
35
+
36
+ context "the polling thread" do
37
+ it "is created" do
38
+ expect(Thread).to receive(:new)
39
+
40
+ client
41
+ end
42
+
43
+ it "polls the native with default 250ms timeout" do
44
+ polling_loop_expects do
45
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).at_least(:once)
46
+ end
47
+ end
48
+
49
+ it "check the out queue of native client" do
50
+ polling_loop_expects do
51
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(native).at_least(:once)
52
+ end
53
+ end
54
+ end
55
+
56
+ def polling_loop_expects(&block)
57
+ Thread.current[:closing] = true # this forces the loop break with line #12
58
+
59
+ allow(Thread).to receive(:new).and_yield do |_|
60
+ block.call
61
+ end.and_return(thread)
62
+
63
+ client
64
+ end
65
+
66
+ it "exposes `native` client" do
67
+ expect(client.native).to eq(native)
68
+ end
69
+
70
+ context "when client was not yet closed (`nil`)" do
71
+ it "is not closed" do
72
+ expect(client.closed?).to eq(false)
73
+ end
74
+
75
+ context "and attempt to close" do
76
+ it "calls the `destroy` binding" do
77
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy).with(native)
78
+
79
+ client.close
80
+ end
81
+
82
+ it "indicates to the polling thread that it is closing" do
83
+ expect(thread).to receive(:[]=).with(:closing, true)
84
+
85
+ client.close
86
+ end
87
+
88
+ it "joins the polling thread" do
89
+ expect(thread).to receive(:join)
90
+
91
+ client.close
92
+ end
93
+
94
+ it "closes and unassign the native client" do
95
+ client.close
96
+
97
+ expect(client.native).to eq(nil)
98
+ expect(client.closed?).to eq(true)
99
+ end
100
+ end
101
+ end
102
+
103
+ context "when client was already closed" do
104
+ before { client.close }
105
+
106
+ it "is closed" do
107
+ expect(client.closed?).to eq(true)
108
+ end
109
+
110
+ context "and attempt to close again" do
111
+ it "does not call the `destroy` binding" do
112
+ expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy)
113
+
114
+ client.close
115
+ end
116
+
117
+ it "does not indicate to the polling thread that it is closing" do
118
+ expect(thread).not_to receive(:[]=).with(:closing, true)
119
+
120
+ client.close
121
+ end
122
+
123
+ it "does not join the polling thread" do
124
+ expect(thread).not_to receive(:join)
125
+
126
+ client.close
127
+ end
128
+
129
+ it "does not close and unassign the native client again" do
130
+ client.close
131
+
132
+ expect(client.native).to eq(nil)
133
+ expect(client.closed?).to eq(true)
134
+ end
135
+ end
136
+ end
137
+
138
+ it "provide a finalizer Proc that closes the `native` client" do
139
+ expect(client.closed?).to eq(false)
140
+
141
+ client.finalizer.call("some-ignored-object-id")
142
+
143
+ expect(client.closed?).to eq(true)
144
+ end
145
+ end
@@ -49,6 +49,27 @@ describe Rdkafka::Producer do
49
49
  # Callback should have been called
50
50
  expect(@callback_called).to be true
51
51
  end
52
+
53
+ it "should provide handle" do
54
+ @callback_handle = nil
55
+
56
+ producer.delivery_callback = lambda { |_, handle| @callback_handle = handle }
57
+
58
+ # Produce a message
59
+ handle = producer.produce(
60
+ topic: "produce_test_topic",
61
+ payload: "payload",
62
+ key: "key"
63
+ )
64
+
65
+ # Wait for it to be delivered
66
+ handle.wait(max_wait_timeout: 15)
67
+
68
+ # Join the producer thread.
69
+ producer.close
70
+
71
+ expect(handle).to be @callback_handle
72
+ end
52
73
  end
53
74
 
54
75
  context "with a callable object" do
@@ -93,6 +114,36 @@ describe Rdkafka::Producer do
93
114
  expect(called_report.first.partition).to eq 1
94
115
  expect(called_report.first.offset).to be >= 0
95
116
  end
117
+
118
+ it "should provide handle" do
119
+ callback_handles = []
120
+ callback = Class.new do
121
+ def initialize(callback_handles)
122
+ @callback_handles = callback_handles
123
+ end
124
+
125
+ def call(_, handle)
126
+ @callback_handles << handle
127
+ end
128
+ end
129
+ producer.delivery_callback = callback.new(callback_handles)
130
+
131
+ # Produce a message
132
+ handle = producer.produce(
133
+ topic: "produce_test_topic",
134
+ payload: "payload",
135
+ key: "key"
136
+ )
137
+
138
+ # Wait for it to be delivered
139
+ handle.wait(max_wait_timeout: 15)
140
+
141
+ # Join the producer thread.
142
+ producer.close
143
+
144
+ # Callback should have been called
145
+ expect(handle).to be callback_handles.first
146
+ end
96
147
  end
97
148
 
98
149
  it "should not accept a callback that's not callable" do
@@ -376,13 +427,9 @@ describe Rdkafka::Producer do
376
427
  end
377
428
  end
378
429
 
379
- it "should produce a message in a forked process" do
430
+ it "should produce a message in a forked process", skip: defined?(JRUBY_VERSION) && "Kernel#fork is not available" do
380
431
  # Fork, produce a message, send the report over a pipe and
381
432
  # wait for and check the message in the main process.
382
-
383
- # Kernel#fork is not available in JRuby
384
- skip if defined?(JRUBY_VERSION)
385
-
386
433
  reader, writer = IO.pipe
387
434
 
388
435
  fork do
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.11.0
4
+ version: 0.12.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-11-17 00:00:00.000000000 Z
11
+ date: 2022-06-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -30,14 +30,14 @@ dependencies:
30
30
  requirements:
31
31
  - - "~>"
32
32
  - !ruby/object:Gem::Version
33
- version: '2.7'
33
+ version: '2.6'
34
34
  type: :runtime
35
35
  prerelease: false
36
36
  version_requirements: !ruby/object:Gem::Requirement
37
37
  requirements:
38
38
  - - "~>"
39
39
  - !ruby/object:Gem::Version
40
- version: '2.7'
40
+ version: '2.6'
41
41
  - !ruby/object:Gem::Dependency
42
42
  name: rake
43
43
  requirement: !ruby/object:Gem::Requirement
@@ -177,6 +177,7 @@ files:
177
177
  - lib/rdkafka/error.rb
178
178
  - lib/rdkafka/metadata.rb
179
179
  - lib/rdkafka/producer.rb
180
+ - lib/rdkafka/producer/client.rb
180
181
  - lib/rdkafka/producer/delivery_handle.rb
181
182
  - lib/rdkafka/producer/delivery_report.rb
182
183
  - lib/rdkafka/version.rb
@@ -196,6 +197,7 @@ files:
196
197
  - spec/rdkafka/consumer_spec.rb
197
198
  - spec/rdkafka/error_spec.rb
198
199
  - spec/rdkafka/metadata_spec.rb
200
+ - spec/rdkafka/producer/client_spec.rb
199
201
  - spec/rdkafka/producer/delivery_handle_spec.rb
200
202
  - spec/rdkafka/producer/delivery_report_spec.rb
201
203
  - spec/rdkafka/producer_spec.rb
@@ -204,7 +206,7 @@ homepage: https://github.com/thijsc/rdkafka-ruby
204
206
  licenses:
205
207
  - MIT
206
208
  metadata: {}
207
- post_install_message:
209
+ post_install_message:
208
210
  rdoc_options: []
209
211
  require_paths:
210
212
  - lib
@@ -219,8 +221,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
219
221
  - !ruby/object:Gem::Version
220
222
  version: '0'
221
223
  requirements: []
222
- rubygems_version: 3.1.4
223
- signing_key:
224
+ rubygems_version: 3.0.3
225
+ signing_key:
224
226
  specification_version: 4
225
227
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
226
228
  It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
@@ -241,6 +243,7 @@ test_files:
241
243
  - spec/rdkafka/consumer_spec.rb
242
244
  - spec/rdkafka/error_spec.rb
243
245
  - spec/rdkafka/metadata_spec.rb
246
+ - spec/rdkafka/producer/client_spec.rb
244
247
  - spec/rdkafka/producer/delivery_handle_spec.rb
245
248
  - spec/rdkafka/producer/delivery_report_spec.rb
246
249
  - spec/rdkafka/producer_spec.rb