rdkafka 0.11.1 → 0.12.0.beta.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9b7042f241be5aad91c403f61aabb8e8ba87afa46674db8ba58c487fe01f88c9
4
- data.tar.gz: 5cb00dc3dc8b4069a8e62cda9271e290d1371434332767270426488dedcff641
3
+ metadata.gz: a52f754380304d2a9bef30ecd2cbf0f33347cdb02254758f5452d9b9880a5bae
4
+ data.tar.gz: ed3f56449ad95f679bb5e686117ff6590533bd70965679cedbf952a3d30e0cfb
5
5
  SHA512:
6
- metadata.gz: 2b287a0d81aca7909702969778fb64585b6808860971d50e1c5540eb0c43cf73617f9e701605b816e8f52a81e39d9909512e1d113f6a550555e6ca503dc5ff58
7
- data.tar.gz: 7924ab06f52646168b52d0478b605484e4aa1a3674ace4f97af5b56588aea6dec8e614d3258b0670f92f18a905509ae6982d2c8999fa719f8ea7451e515ecb3d
6
+ metadata.gz: 8e5d72b357d60352cabcac36e0257fc72d1ffcfd5f13bcca5652add92f3cac421941af3b2ae19015575490021287218a06755268727f3e07de5826c30d7e9f98
7
+ data.tar.gz: 84a475f9167d598ad8499412a44c16019beaf77ec297f6d9c0879f27d0bcae079b0006bc880334c7e9d7310d9110250db69bdface85937cb1ec0abd5b56c4b2b
data/CHANGELOG.md CHANGED
@@ -1,6 +1,3 @@
1
- # 0.11.1
2
- * Use mini_portile2 2.6, otherwise you can't run nokogiri and rdkafka at the same time.
3
-
4
1
  # 0.11.0
5
2
  * Upgrade librdkafka to 1.8.2
6
3
  * Bump supported minimum Ruby version to 2.6
data/ext/Rakefile CHANGED
@@ -28,16 +28,25 @@ task :default => :clean do
28
28
  from_extension = 'so.1'
29
29
  to_extension = 'so'
30
30
  end
31
+
32
+ # Get path to the compiled library
31
33
  lib_path = File.join(File.dirname(__FILE__), "ports/#{recipe.host}/librdkafka/#{Rdkafka::LIBRDKAFKA_VERSION}/lib/librdkafka.#{from_extension}")
32
- FileUtils.mv(lib_path, File.join(File.dirname(__FILE__), "librdkafka.#{to_extension}"))
34
+
35
+ # Get target dir
36
+ target_dir = ENV["RUBYARCHDIR"] || File.dirname(__FILE__)
37
+
38
+ # Move the compliled library there
39
+ FileUtils.mv(lib_path, File.join(target_dir, "librdkafka.#{to_extension}"))
40
+
33
41
  # Cleanup files created by miniportile we don't need in the gem
34
- FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
35
- FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
42
+ FileUtils.rm_rf File.join(target_dir, "tmp")
43
+ FileUtils.rm_rf File.join(target_dir, "ports")
36
44
  end
37
45
 
38
46
  task :clean do
39
- FileUtils.rm_f File.join(File.dirname(__FILE__), "librdkafka.dylib")
40
- FileUtils.rm_f File.join(File.dirname(__FILE__), "librdkafka.so")
47
+ target_dir = ENV["RUBYARCHDIR"] || File.dirname(__FILE__)
48
+ FileUtils.rm_f File.join(target_dir, "librdkafka.dylib")
49
+ FileUtils.rm_f File.join(target_dir, "librdkafka.so")
41
50
  FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
42
51
  FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
43
52
  end
data/lib/rdkafka/admin.rb CHANGED
@@ -90,7 +90,7 @@ module Rdkafka
90
90
  admin_options_ptr,
91
91
  queue_ptr
92
92
  )
93
- rescue Exception => err
93
+ rescue Exception
94
94
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
95
95
  raise
96
96
  ensure
@@ -140,7 +140,7 @@ module Rdkafka
140
140
  admin_options_ptr,
141
141
  queue_ptr
142
142
  )
143
- rescue Exception => err
143
+ rescue Exception
144
144
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
145
145
  raise
146
146
  ensure
@@ -246,14 +246,21 @@ module Rdkafka
246
246
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
247
247
 
248
248
  # Partitioner
249
- attach_function :rd_kafka_msg_partitioner_consistent_random, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
249
+ PARTITIONERS = %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random).each_with_object({}) do |name, hsh|
250
+ method_name = "rd_kafka_msg_partitioner_#{name}".to_sym
251
+ attach_function method_name, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
252
+ hsh[name] = method_name
253
+ end
250
254
 
251
- def self.partitioner(str, partition_count)
255
+ def self.partitioner(str, partition_count, partitioner_name = "consistent_random")
252
256
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
253
257
  return -1 unless partition_count&.nonzero?
254
258
 
255
259
  str_ptr = FFI::MemoryPointer.from_string(str)
256
- rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
260
+ method_name = PARTITIONERS.fetch(partitioner_name) do
261
+ raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
262
+ end
263
+ public_send(method_name, nil, str_ptr, str.size, partition_count, nil, nil)
257
264
  end
258
265
 
259
266
  # Create Topics
@@ -179,7 +179,7 @@ module Rdkafka
179
179
  # Set callback to receive delivery reports on config
180
180
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
181
181
  # Return producer with Kafka client
182
- Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
182
+ Rdkafka::Producer.new(Rdkafka::Producer::Client.new(native_kafka(config, :rd_kafka_producer)), self[:partitioner]).tap do |producer|
183
183
  opaque.producer = producer
184
184
  end
185
185
  end
@@ -0,0 +1,47 @@
1
+ module Rdkafka
2
+ class Producer
3
+ class Client
4
+ def initialize(native)
5
+ @native = native
6
+
7
+ # Start thread to poll client for delivery callbacks
8
+ @polling_thread = Thread.new do
9
+ loop do
10
+ Rdkafka::Bindings.rd_kafka_poll(native, 250)
11
+ # Exit thread if closing and the poll queue is empty
12
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(native) == 0
13
+ break
14
+ end
15
+ end
16
+ end
17
+ @polling_thread.abort_on_exception = true
18
+ @polling_thread[:closing] = false
19
+ end
20
+
21
+ def native
22
+ @native
23
+ end
24
+
25
+ def finalizer
26
+ ->(_) { close }
27
+ end
28
+
29
+ def closed?
30
+ @native.nil?
31
+ end
32
+
33
+ def close(object_id=nil)
34
+ return unless @native
35
+
36
+ # Indicate to polling thread that we're closing
37
+ @polling_thread[:closing] = true
38
+ # Wait for the polling thread to finish up
39
+ @polling_thread.join
40
+
41
+ Rdkafka::Bindings.rd_kafka_destroy(@native)
42
+
43
+ @native = nil
44
+ end
45
+ end
46
+ end
47
+ end
@@ -1,4 +1,4 @@
1
- require "securerandom"
1
+ require "objspace"
2
2
 
3
3
  module Rdkafka
4
4
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
@@ -10,25 +10,12 @@ module Rdkafka
10
10
  attr_reader :delivery_callback
11
11
 
12
12
  # @private
13
- def initialize(native_kafka)
14
- @id = SecureRandom.uuid
15
- @closing = false
16
- @native_kafka = native_kafka
13
+ def initialize(client, partitioner_name)
14
+ @client = client
15
+ @partitioner_name = partitioner_name || "consistent_random"
17
16
 
18
17
  # Makes sure, that the producer gets closed before it gets GCed by Ruby
19
- ObjectSpace.define_finalizer(@id, proc { close })
20
-
21
- # Start thread to poll client for delivery callbacks
22
- @polling_thread = Thread.new do
23
- loop do
24
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
25
- # Exit thread if closing and the poll queue is empty
26
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
27
- break
28
- end
29
- end
30
- end
31
- @polling_thread.abort_on_exception = true
18
+ ObjectSpace.define_finalizer(self, client.finalizer)
32
19
  end
33
20
 
34
21
  # Set a callback that will be called every time a message is successfully produced.
@@ -44,16 +31,9 @@ module Rdkafka
44
31
 
45
32
  # Close this producer and wait for the internal poll queue to empty.
46
33
  def close
47
- ObjectSpace.undefine_finalizer(@id)
48
-
49
- return unless @native_kafka
34
+ ObjectSpace.undefine_finalizer(self)
50
35
 
51
- # Indicate to polling thread that we're closing
52
- @closing = true
53
- # Wait for the polling thread to finish up
54
- @polling_thread.join
55
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
56
- @native_kafka = nil
36
+ @client.close
57
37
  end
58
38
 
59
39
  # Partition count for a given topic.
@@ -65,7 +45,7 @@ module Rdkafka
65
45
  #
66
46
  def partition_count(topic)
67
47
  closed_producer_check(__method__)
68
- Rdkafka::Metadata.new(@native_kafka, topic).topics&.first[:partition_count]
48
+ Rdkafka::Metadata.new(@client.native, topic).topics&.first[:partition_count]
69
49
  end
70
50
 
71
51
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -106,7 +86,7 @@ module Rdkafka
106
86
  if partition_key
107
87
  partition_count = partition_count(topic)
108
88
  # If the topic is not present, set to -1
109
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count) if partition_count
89
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count
110
90
  end
111
91
 
112
92
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -157,7 +137,7 @@ module Rdkafka
157
137
 
158
138
  # Produce the message
159
139
  response = Rdkafka::Bindings.rd_kafka_producev(
160
- @native_kafka,
140
+ @client.native,
161
141
  *args
162
142
  )
163
143
 
@@ -176,7 +156,7 @@ module Rdkafka
176
156
  end
177
157
 
178
158
  def closed_producer_check(method)
179
- raise Rdkafka::ClosedProducerError.new(method) if @native_kafka.nil?
159
+ raise Rdkafka::ClosedProducerError.new(method) if @client.closed?
180
160
  end
181
161
  end
182
162
  end
@@ -1,5 +1,5 @@
1
1
  module Rdkafka
2
- VERSION = "0.11.1"
3
- LIBRDKAFKA_VERSION = "1.8.2"
4
- LIBRDKAFKA_SOURCE_SHA256 = "6a747d293a7a4613bd2897e28e8791476fbe1ae7361f2530a876e0fd483482a6"
2
+ VERSION = "0.12.0.beta.2"
3
+ LIBRDKAFKA_VERSION = "1.9.0-RC2"
4
+ LIBRDKAFKA_SOURCE_SHA256 = "f38c7007d01c489c7b743d200a6760678f43b171b36886c4dda39d4a8d5aab59"
5
5
  end
data/lib/rdkafka.rb CHANGED
@@ -17,5 +17,6 @@ require "rdkafka/consumer/topic_partition_list"
17
17
  require "rdkafka/error"
18
18
  require "rdkafka/metadata"
19
19
  require "rdkafka/producer"
20
+ require "rdkafka/producer/client"
20
21
  require "rdkafka/producer/delivery_handle"
21
22
  require "rdkafka/producer/delivery_report"
@@ -76,6 +76,13 @@ describe Rdkafka::Bindings do
76
76
  result_2 = (Zlib.crc32(partition_key) % partition_count)
77
77
  expect(result_1).to eq(result_2)
78
78
  end
79
+
80
+ it "should return the partition calculated by the specified partitioner" do
81
+ result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
82
+ ptr = FFI::MemoryPointer.from_string(partition_key)
83
+ result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
84
+ expect(result_1).to eq(result_2)
85
+ end
79
86
  end
80
87
 
81
88
  describe "stats callback" do
@@ -241,7 +241,7 @@ describe Rdkafka::Consumer do
241
241
 
242
242
  it "should return the assignment when subscribed" do
243
243
  # Make sure there's a message
244
- report = producer.produce(
244
+ producer.produce(
245
245
  topic: "consume_test_topic",
246
246
  payload: "payload 1",
247
247
  key: "key 1",
@@ -272,7 +272,7 @@ describe Rdkafka::Consumer do
272
272
  it "should close a consumer" do
273
273
  consumer.subscribe("consume_test_topic")
274
274
  100.times do |i|
275
- report = producer.produce(
275
+ producer.produce(
276
276
  topic: "consume_test_topic",
277
277
  payload: "payload #{i}",
278
278
  key: "key #{i}",
@@ -289,7 +289,7 @@ describe Rdkafka::Consumer do
289
289
  describe "#commit, #committed and #store_offset" do
290
290
  # Make sure there's a stored offset
291
291
  let!(:report) do
292
- report = producer.produce(
292
+ producer.produce(
293
293
  topic: "consume_test_topic",
294
294
  payload: "payload 1",
295
295
  key: "key 1",
@@ -831,7 +831,6 @@ describe Rdkafka::Consumer do
831
831
  )
832
832
  consumer = config.consumer
833
833
  consumer.subscribe(topic_name)
834
- loop_count = 0
835
834
  batches_yielded = []
836
835
  exceptions_yielded = []
837
836
  each_batch_iterations = 0
@@ -875,7 +874,6 @@ describe Rdkafka::Consumer do
875
874
  )
876
875
  consumer = config.consumer
877
876
  consumer.subscribe(topic_name)
878
- loop_count = 0
879
877
  batches_yielded = []
880
878
  exceptions_yielded = []
881
879
  each_batch_iterations = 0
@@ -0,0 +1,145 @@
1
+ require "spec_helper"
2
+
3
+ describe Rdkafka::Producer::Client do
4
+ let(:config) { rdkafka_producer_config }
5
+ let(:native) { config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer) }
6
+ let(:closing) { false }
7
+ let(:thread) { double(Thread) }
8
+
9
+ subject(:client) { described_class.new(native) }
10
+
11
+ before do
12
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).and_call_original
13
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(instance_of(FFI::Pointer)).and_return(0).and_call_original
14
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_destroy)
15
+ allow(Thread).to receive(:new).and_return(thread)
16
+
17
+ allow(thread).to receive(:[]=).with(:closing, anything)
18
+ allow(thread).to receive(:join)
19
+ allow(thread).to receive(:abort_on_exception=).with(anything)
20
+ end
21
+
22
+ context "defaults" do
23
+ it "sets the thread to abort on exception" do
24
+ expect(thread).to receive(:abort_on_exception=).with(true)
25
+
26
+ client
27
+ end
28
+
29
+ it "sets the thread `closing` flag to false" do
30
+ expect(thread).to receive(:[]=).with(:closing, false)
31
+
32
+ client
33
+ end
34
+ end
35
+
36
+ context "the polling thread" do
37
+ it "is created" do
38
+ expect(Thread).to receive(:new)
39
+
40
+ client
41
+ end
42
+
43
+ it "polls the native with default 250ms timeout" do
44
+ polling_loop_expects do
45
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).at_least(:once)
46
+ end
47
+ end
48
+
49
+ it "check the out queue of native client" do
50
+ polling_loop_expects do
51
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(native).at_least(:once)
52
+ end
53
+ end
54
+ end
55
+
56
+ def polling_loop_expects(&block)
57
+ Thread.current[:closing] = true # this forces the loop break with line #12
58
+
59
+ allow(Thread).to receive(:new).and_yield do |_|
60
+ block.call
61
+ end.and_return(thread)
62
+
63
+ client
64
+ end
65
+
66
+ it "exposes `native` client" do
67
+ expect(client.native).to eq(native)
68
+ end
69
+
70
+ context "when client was not yet closed (`nil`)" do
71
+ it "is not closed" do
72
+ expect(client.closed?).to eq(false)
73
+ end
74
+
75
+ context "and attempt to close" do
76
+ it "calls the `destroy` binding" do
77
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy).with(native)
78
+
79
+ client.close
80
+ end
81
+
82
+ it "indicates to the polling thread that it is closing" do
83
+ expect(thread).to receive(:[]=).with(:closing, true)
84
+
85
+ client.close
86
+ end
87
+
88
+ it "joins the polling thread" do
89
+ expect(thread).to receive(:join)
90
+
91
+ client.close
92
+ end
93
+
94
+ it "closes and unassign the native client" do
95
+ client.close
96
+
97
+ expect(client.native).to eq(nil)
98
+ expect(client.closed?).to eq(true)
99
+ end
100
+ end
101
+ end
102
+
103
+ context "when client was already closed" do
104
+ before { client.close }
105
+
106
+ it "is closed" do
107
+ expect(client.closed?).to eq(true)
108
+ end
109
+
110
+ context "and attempt to close again" do
111
+ it "does not call the `destroy` binding" do
112
+ expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy)
113
+
114
+ client.close
115
+ end
116
+
117
+ it "does not indicate to the polling thread that it is closing" do
118
+ expect(thread).not_to receive(:[]=).with(:closing, true)
119
+
120
+ client.close
121
+ end
122
+
123
+ it "does not join the polling thread" do
124
+ expect(thread).not_to receive(:join)
125
+
126
+ client.close
127
+ end
128
+
129
+ it "does not close and unassign the native client again" do
130
+ client.close
131
+
132
+ expect(client.native).to eq(nil)
133
+ expect(client.closed?).to eq(true)
134
+ end
135
+ end
136
+ end
137
+
138
+ it "provide a finalizer Proc that closes the `native` client" do
139
+ expect(client.closed?).to eq(false)
140
+
141
+ client.finalizer.call("some-ignored-object-id")
142
+
143
+ expect(client.closed?).to eq(true)
144
+ end
145
+ end
@@ -376,13 +376,9 @@ describe Rdkafka::Producer do
376
376
  end
377
377
  end
378
378
 
379
- it "should produce a message in a forked process" do
379
+ it "should produce a message in a forked process", skip: defined?(JRUBY_VERSION) && "Kernel#fork is not available" do
380
380
  # Fork, produce a message, send the report over a pipe and
381
381
  # wait for and check the message in the main process.
382
-
383
- # Kernel#fork is not available in JRuby
384
- skip if defined?(JRUBY_VERSION)
385
-
386
382
  reader, writer = IO.pipe
387
383
 
388
384
  fork do
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.11.1
4
+ version: 0.12.0.beta.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-11-23 00:00:00.000000000 Z
11
+ date: 2022-04-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -177,6 +177,7 @@ files:
177
177
  - lib/rdkafka/error.rb
178
178
  - lib/rdkafka/metadata.rb
179
179
  - lib/rdkafka/producer.rb
180
+ - lib/rdkafka/producer/client.rb
180
181
  - lib/rdkafka/producer/delivery_handle.rb
181
182
  - lib/rdkafka/producer/delivery_report.rb
182
183
  - lib/rdkafka/version.rb
@@ -196,6 +197,7 @@ files:
196
197
  - spec/rdkafka/consumer_spec.rb
197
198
  - spec/rdkafka/error_spec.rb
198
199
  - spec/rdkafka/metadata_spec.rb
200
+ - spec/rdkafka/producer/client_spec.rb
199
201
  - spec/rdkafka/producer/delivery_handle_spec.rb
200
202
  - spec/rdkafka/producer/delivery_report_spec.rb
201
203
  - spec/rdkafka/producer_spec.rb
@@ -204,7 +206,7 @@ homepage: https://github.com/thijsc/rdkafka-ruby
204
206
  licenses:
205
207
  - MIT
206
208
  metadata: {}
207
- post_install_message:
209
+ post_install_message:
208
210
  rdoc_options: []
209
211
  require_paths:
210
212
  - lib
@@ -215,12 +217,12 @@ required_ruby_version: !ruby/object:Gem::Requirement
215
217
  version: '2.6'
216
218
  required_rubygems_version: !ruby/object:Gem::Requirement
217
219
  requirements:
218
- - - ">="
220
+ - - ">"
219
221
  - !ruby/object:Gem::Version
220
- version: '0'
222
+ version: 1.3.1
221
223
  requirements: []
222
- rubygems_version: 3.1.4
223
- signing_key:
224
+ rubygems_version: 3.0.3
225
+ signing_key:
224
226
  specification_version: 4
225
227
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
226
228
  It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
@@ -241,6 +243,7 @@ test_files:
241
243
  - spec/rdkafka/consumer_spec.rb
242
244
  - spec/rdkafka/error_spec.rb
243
245
  - spec/rdkafka/metadata_spec.rb
246
+ - spec/rdkafka/producer/client_spec.rb
244
247
  - spec/rdkafka/producer/delivery_handle_spec.rb
245
248
  - spec/rdkafka/producer/delivery_report_spec.rb
246
249
  - spec/rdkafka/producer_spec.rb