rdkafka 0.7.0 → 0.8.0.beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,7 +19,7 @@ module Rdkafka
19
19
  raise Rdkafka::RdkafkaError.new(err, "Error reading message headers")
20
20
  end
21
21
 
22
- headers_ptr = headers_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
22
+ headers_ptr = headers_ptrptr.read_pointer
23
23
 
24
24
  name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
25
  value_ptrptr = FFI::MemoryPointer.new(:pointer)
@@ -42,12 +42,14 @@ module Rdkafka
42
42
  raise Rdkafka::RdkafkaError.new(err, "Error reading a message header at index #{idx}")
43
43
  end
44
44
 
45
- name = name_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
46
- name = name.read_string_to_null
45
+ name_ptr = name_ptrptr.read_pointer
46
+ name = name_ptr.respond_to?(:read_string_to_null) ? name_ptr.read_string_to_null : name_ptr.read_string
47
47
 
48
48
  size = size_ptr[:value]
49
- value = value_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
50
- value = value.read_string(size)
49
+
50
+ value_ptr = value_ptrptr.read_pointer
51
+
52
+ value = value_ptr.read_string(size)
51
53
 
52
54
  headers[name.to_sym] = value
53
55
 
@@ -106,7 +106,7 @@ module Rdkafka
106
106
  data[elem[:topic]] = nil
107
107
  else
108
108
  partitions = data[elem[:topic]] || []
109
- offset = if elem[:offset] == -1001
109
+ offset = if elem[:offset] == Rdkafka::Bindings::RD_KAFKA_OFFSET_INVALID
110
110
  nil
111
111
  else
112
112
  elem[:offset]
@@ -125,10 +125,10 @@ module Rdkafka
125
125
  #
126
126
  # The pointer will be cleaned by `rd_kafka_topic_partition_list_destroy` when GC releases it.
127
127
  #
128
- # @return [FFI::AutoPointer]
128
+ # @return [FFI::Pointer]
129
129
  # @private
130
130
  def to_native_tpl
131
- tpl = TopicPartitionList.new_native_tpl(count)
131
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
132
132
 
133
133
  @data.each do |topic, partitions|
134
134
  if partitions
@@ -138,6 +138,7 @@ module Rdkafka
138
138
  topic,
139
139
  p.partition
140
140
  )
141
+
141
142
  if p.offset
142
143
  Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
143
144
  tpl,
@@ -158,17 +159,6 @@ module Rdkafka
158
159
 
159
160
  tpl
160
161
  end
161
-
162
- # Creates a new native tpl and wraps it into FFI::AutoPointer which in turn calls
163
- # `rd_kafka_topic_partition_list_destroy` when a pointer will be cleaned by GC
164
- #
165
- # @param count [Integer] an initial capacity of partitions list
166
- # @return [FFI::AutoPointer]
167
- # @private
168
- def self.new_native_tpl(count)
169
- tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
170
- FFI::AutoPointer.new(tpl, Rdkafka::Bindings.method(:rd_kafka_topic_partition_list_destroy))
171
- end
172
162
  end
173
163
  end
174
164
  end
data/lib/rdkafka/error.rb CHANGED
@@ -39,6 +39,11 @@ module Rdkafka
39
39
  def is_partition_eof?
40
40
  code == :partition_eof
41
41
  end
42
+
43
+ # Error comparison
44
+ def ==(another_error)
45
+ another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
46
+ end
42
47
  end
43
48
 
44
49
  # Error with topic partition list returned by the underlying rdkafka library.
@@ -0,0 +1,91 @@
1
+ module Rdkafka
2
+ class Metadata
3
+ attr_reader :brokers, :topics
4
+
5
+ def initialize(native_client, topic_name = nil)
6
+ native_topic = if topic_name
7
+ Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
+ end
9
+
10
+ ptr = FFI::MemoryPointer.new(:pointer)
11
+
12
+ # Retrieve metadata flag is 0/1 for single/multiple topics.
13
+ topic_flag = topic_name ? 1 : 0
14
+
15
+ # Retrieve the Metadata
16
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
17
+
18
+ # Error Handling
19
+ Rdkafka::Error.new(result) unless result.zero?
20
+
21
+ metadata_from_native(ptr.read_pointer)
22
+ ensure
23
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
24
+ Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
25
+ end
26
+
27
+ private
28
+
29
+ def metadata_from_native(ptr)
30
+ metadata = Metadata.new(ptr)
31
+ @brokers = Array.new(metadata[:brokers_count]) do |i|
32
+ BrokerMetadata.new(metadata[:brokers_metadata] + (i * BrokerMetadata.size)).to_h
33
+ end
34
+
35
+ @topics = Array.new(metadata[:topics_count]) do |i|
36
+ topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
37
+ Rdkafka::Error.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
38
+
39
+ partitions = Array.new(topic[:partition_count]) do |j|
40
+ partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
41
+ Rdkafka::Error.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
42
+ partition.to_h
43
+ end
44
+ topic.to_h.merge!(partitions: partitions)
45
+ end
46
+ end
47
+
48
+ class CustomFFIStruct < FFI::Struct
49
+ def to_h
50
+ members.each_with_object({}) do |mem, hsh|
51
+ val = self.[](mem)
52
+ next if val.is_a?(FFI::Pointer) || mem == :rd_kafka_resp_err
53
+
54
+ hsh[mem] = self.[](mem)
55
+ end
56
+ end
57
+ end
58
+
59
+ class Metadata < CustomFFIStruct
60
+ layout :brokers_count, :int,
61
+ :brokers_metadata, :pointer,
62
+ :topics_count, :int,
63
+ :topics_metadata, :pointer,
64
+ :broker_id, :int32,
65
+ :broker_name, :string
66
+ end
67
+
68
+ class BrokerMetadata < CustomFFIStruct
69
+ layout :broker_id, :int32,
70
+ :broker_name, :string,
71
+ :broker_port, :int
72
+ end
73
+
74
+ class TopicMetadata < CustomFFIStruct
75
+ layout :topic_name, :string,
76
+ :partition_count, :int,
77
+ :partitions_metadata, :pointer,
78
+ :rd_kafka_resp_err, :int
79
+ end
80
+
81
+ class PartitionMetadata < CustomFFIStruct
82
+ layout :partition_id, :int32,
83
+ :rd_kafka_resp_err, :int,
84
+ :leader, :int32,
85
+ :replica_count, :int,
86
+ :replicas, :pointer,
87
+ :in_sync_replica_brokers, :int,
88
+ :isrs, :pointer
89
+ end
90
+ end
91
+ end
@@ -37,10 +37,25 @@ module Rdkafka
37
37
 
38
38
  # Close this producer and wait for the internal poll queue to empty.
39
39
  def close
40
+ return unless @native_kafka
41
+
40
42
  # Indicate to polling thread that we're closing
41
43
  @closing = true
42
44
  # Wait for the polling thread to finish up
43
45
  @polling_thread.join
46
+ Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
47
+ @native_kafka = nil
48
+ end
49
+
50
+ # Partition count for a given topic.
51
+ # NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
52
+ #
53
+ # @param topic [String] The topic name.
54
+ #
55
+ # @return partition count [Integer,nil]
56
+ #
57
+ def partition_count(topic)
58
+ Rdkafka::Metadata.new(@native_kafka, topic).topics&.select { |x| x[:topic_name] == topic }&.dig(0, :partition_count)
44
59
  end
45
60
 
46
61
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -58,7 +73,7 @@ module Rdkafka
58
73
  # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
59
74
  #
60
75
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
61
- def produce(topic:, payload: nil, key: nil, partition: nil, timestamp: nil, headers: nil)
76
+ def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
62
77
  # Start by checking and converting the input
63
78
 
64
79
  # Get payload length
@@ -75,9 +90,15 @@ module Rdkafka
75
90
  key.bytesize
76
91
  end
77
92
 
78
- # If partition is nil use -1 to let Kafka set the partition based
79
- # on the key/randomly if there is no key
80
- partition = -1 if partition.nil?
93
+ if partition_key
94
+ partition_count = partition_count(topic)
95
+ # If the topic is not present, set to -1
96
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count) if partition_count
97
+ end
98
+
99
+ # If partition is nil, use -1 to let librdafka set the partition randomly or
100
+ # based on the key when present.
101
+ partition ||= -1
81
102
 
82
103
  # If timestamp is nil use 0 and let Kafka set one. If an integer or time
83
104
  # use it.
@@ -10,11 +10,16 @@ module Rdkafka
10
10
  # @return [Integer]
11
11
  attr_reader :offset
12
12
 
13
+ # Error in case happen during produce.
14
+ # @return [string]
15
+ attr_reader :error
16
+
13
17
  private
14
18
 
15
- def initialize(partition, offset)
19
+ def initialize(partition, offset, error = nil)
16
20
  @partition = partition
17
21
  @offset = offset
22
+ @error = error
18
23
  end
19
24
  end
20
25
  end
@@ -1,5 +1,5 @@
1
1
  module Rdkafka
2
- VERSION = "0.7.0"
3
- LIBRDKAFKA_VERSION = "1.2.0"
4
- LIBRDKAFKA_SOURCE_SHA256 = "eedde1c96104e4ac2d22a4230e34f35dd60d53976ae2563e3dd7c27190a96859"
2
+ VERSION = "0.8.0.beta.1"
3
+ LIBRDKAFKA_VERSION = "1.4.0"
4
+ LIBRDKAFKA_SOURCE_SHA256 = "ae27ea3f3d0d32d29004e7f709efbba2666c5383a107cc45b3a1949486b2eb84"
5
5
  end
data/rdkafka.gemspec CHANGED
@@ -19,7 +19,7 @@ Gem::Specification.new do |gem|
19
19
 
20
20
  gem.add_dependency 'ffi', '~> 1.9'
21
21
  gem.add_dependency 'mini_portile2', '~> 2.1'
22
- gem.add_dependency 'rake', '~> 12.3'
22
+ gem.add_dependency 'rake', '>= 12.3'
23
23
 
24
24
  gem.add_development_dependency 'pry', '~> 0.10'
25
25
  gem.add_development_dependency 'rspec', '~> 3.5'
@@ -1,4 +1,5 @@
1
1
  require "spec_helper"
2
+ require 'zlib'
2
3
 
3
4
  describe Rdkafka::Bindings do
4
5
  it "should load librdkafka" do
@@ -7,12 +8,12 @@ describe Rdkafka::Bindings do
7
8
 
8
9
  describe ".lib_extension" do
9
10
  it "should know the lib extension for darwin" do
10
- expect(Gem::Platform.local).to receive(:os).and_return("darwin-aaa")
11
+ stub_const('RbConfig::CONFIG', 'host_os' =>'darwin')
11
12
  expect(Rdkafka::Bindings.lib_extension).to eq "dylib"
12
13
  end
13
14
 
14
15
  it "should know the lib extension for linux" do
15
- expect(Gem::Platform.local).to receive(:os).and_return("linux")
16
+ stub_const('RbConfig::CONFIG', 'host_os' =>'linux')
16
17
  expect(Rdkafka::Bindings.lib_extension).to eq "so"
17
18
  end
18
19
  end
@@ -60,6 +61,23 @@ describe Rdkafka::Bindings do
60
61
  end
61
62
  end
62
63
 
64
+ describe "partitioner" do
65
+ let(:partition_key) { ('a'..'z').to_a.shuffle.take(15).join('') }
66
+ let(:partition_count) { rand(50) + 1 }
67
+
68
+ it "should return the same partition for a similar string and the same partition count" do
69
+ result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
70
+ result_2 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
71
+ expect(result_1).to eq(result_2)
72
+ end
73
+
74
+ it "should match the old partitioner" do
75
+ result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
76
+ result_2 = (Zlib.crc32(partition_key) % partition_count)
77
+ expect(result_1).to eq(result_2)
78
+ end
79
+ end
80
+
63
81
  describe "stats callback" do
64
82
  context "without a stats callback" do
65
83
  it "should do nothing" do
@@ -50,7 +50,9 @@ describe Rdkafka::Config do
50
50
  end
51
51
 
52
52
  it "should create a consumer with valid config" do
53
- expect(rdkafka_config.consumer).to be_a Rdkafka::Consumer
53
+ consumer = rdkafka_config.consumer
54
+ expect(consumer).to be_a Rdkafka::Consumer
55
+ consumer.close
54
56
  end
55
57
 
56
58
  it "should raise an error when creating a consumer with invalid config" do
@@ -76,7 +78,9 @@ describe Rdkafka::Config do
76
78
  end
77
79
 
78
80
  it "should create a producer with valid config" do
79
- expect(rdkafka_config.producer).to be_a Rdkafka::Producer
81
+ producer = rdkafka_config.producer
82
+ expect(producer).to be_a Rdkafka::Producer
83
+ producer.close
80
84
  end
81
85
 
82
86
  it "should raise an error when creating a producer with invalid config" do
@@ -1,7 +1,8 @@
1
1
  require "spec_helper"
2
2
 
3
3
  describe Rdkafka::Consumer::Message do
4
- let(:native_topic) { new_native_topic }
4
+ let(:native_client) { new_native_client }
5
+ let(:native_topic) { new_native_topic(native_client: native_client) }
5
6
  let(:payload) { nil }
6
7
  let(:key) { nil }
7
8
  let(:native_message) do
@@ -24,6 +25,10 @@ describe Rdkafka::Consumer::Message do
24
25
  end
25
26
  end
26
27
 
28
+ after(:each) do
29
+ Rdkafka::Bindings.rd_kafka_destroy(native_client)
30
+ end
31
+
27
32
  subject { Rdkafka::Consumer::Message.new(native_message) }
28
33
 
29
34
  before do
@@ -6,7 +6,10 @@ describe Rdkafka::Consumer do
6
6
  let(:consumer) { config.consumer }
7
7
  let(:producer) { config.producer }
8
8
 
9
- describe "#subscripe, #unsubscribe and #subscription" do
9
+ after { consumer.close }
10
+ after { producer.close }
11
+
12
+ describe "#subscribe, #unsubscribe and #subscription" do
10
13
  it "should subscribe, unsubscribe and return the subscription" do
11
14
  expect(consumer.subscription).to be_empty
12
15
 
@@ -88,7 +91,6 @@ describe Rdkafka::Consumer do
88
91
  # 8. ensure that message is successfully consumed
89
92
  records = consumer.poll(timeout)
90
93
  expect(records).not_to be_nil
91
- consumer.commit
92
94
  end
93
95
  end
94
96
 
@@ -205,8 +207,6 @@ describe Rdkafka::Consumer do
205
207
  expect(records&.payload).to eq "payload c"
206
208
  records = consumer.poll(timeout)
207
209
  expect(records).to be_nil
208
-
209
- consumer.commit
210
210
  end
211
211
  end
212
212
  end
@@ -313,11 +313,11 @@ describe Rdkafka::Consumer do
313
313
  }.to raise_error TypeError
314
314
  end
315
315
 
316
- context "with a commited consumer" do
316
+ context "with a committed consumer" do
317
317
  before :all do
318
- # Make sure there are some message
319
- producer = rdkafka_config.producer
318
+ # Make sure there are some messages.
320
319
  handles = []
320
+ producer = rdkafka_config.producer
321
321
  10.times do
322
322
  (0..2).each do |i|
323
323
  handles << producer.produce(
@@ -329,6 +329,7 @@ describe Rdkafka::Consumer do
329
329
  end
330
330
  end
331
331
  handles.each(&:wait)
332
+ producer.close
332
333
  end
333
334
 
334
335
  before do
@@ -389,20 +390,26 @@ describe Rdkafka::Consumer do
389
390
 
390
391
  describe "#store_offset" do
391
392
  before do
393
+ config = {}
392
394
  config[:'enable.auto.offset.store'] = false
393
395
  config[:'enable.auto.commit'] = false
394
- consumer.subscribe("consume_test_topic")
395
- wait_for_assignment(consumer)
396
+ @new_consumer = rdkafka_config(config).consumer
397
+ @new_consumer.subscribe("consume_test_topic")
398
+ wait_for_assignment(@new_consumer)
399
+ end
400
+
401
+ after do
402
+ @new_consumer.close
396
403
  end
397
404
 
398
405
  it "should store the offset for a message" do
399
- consumer.store_offset(message)
400
- consumer.commit
406
+ @new_consumer.store_offset(message)
407
+ @new_consumer.commit
401
408
 
402
409
  list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
403
410
  list.add_topic("consume_test_topic", [0, 1, 2])
404
411
  end
405
- partitions = consumer.committed(list).to_h["consume_test_topic"]
412
+ partitions = @new_consumer.committed(list).to_h["consume_test_topic"]
406
413
  expect(partitions).not_to be_nil
407
414
  expect(partitions[message.partition].offset).to eq(message.offset + 1)
408
415
  end
@@ -410,7 +417,7 @@ describe Rdkafka::Consumer do
410
417
  it "should raise an error with invalid input" do
411
418
  allow(message).to receive(:partition).and_return(9999)
412
419
  expect {
413
- consumer.store_offset(message)
420
+ @new_consumer.store_offset(message)
414
421
  }.to raise_error Rdkafka::RdkafkaError
415
422
  end
416
423
  end
@@ -554,12 +561,12 @@ describe Rdkafka::Consumer do
554
561
  payload: "payload 1",
555
562
  key: "key 1"
556
563
  ).wait
557
-
558
564
  consumer.subscribe("consume_test_topic")
559
- message = consumer.poll(5000)
560
- expect(message).to be_a Rdkafka::Consumer::Message
565
+ message = consumer.each {|m| break m}
561
566
 
562
- # Message content is tested in producer spec
567
+ expect(message).to be_a Rdkafka::Consumer::Message
568
+ expect(message.payload).to eq('payload 1')
569
+ expect(message.key).to eq('key 1')
563
570
  end
564
571
 
565
572
  it "should raise an error when polling fails" do