rdkafka 0.4.2 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,63 @@
1
+ module Rdkafka
2
+ class Consumer
3
+ # A message headers
4
+ class Headers
5
+ # Reads a native kafka's message header into ruby's hash
6
+ #
7
+ # @return [Hash<String, String>] a message headers
8
+ #
9
+ # @raise [Rdkafka::RdkafkaError] when fail to read headers
10
+ #
11
+ # @private
12
+ def self.from_native(native_message)
13
+ headers_ptrptr = FFI::MemoryPointer.new(:pointer)
14
+ err = Rdkafka::Bindings.rd_kafka_message_headers(native_message, headers_ptrptr)
15
+
16
+ if err == Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT
17
+ return {}
18
+ elsif err != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
19
+ raise Rdkafka::RdkafkaError.new(err, "Error reading message headers")
20
+ end
21
+
22
+ headers_ptr = headers_ptrptr.read_pointer
23
+
24
+ name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
+ value_ptrptr = FFI::MemoryPointer.new(:pointer)
26
+ size_ptr = Rdkafka::Bindings::SizePtr.new
27
+ headers = {}
28
+
29
+ idx = 0
30
+ loop do
31
+ err = Rdkafka::Bindings.rd_kafka_header_get_all(
32
+ headers_ptr,
33
+ idx,
34
+ name_ptrptr,
35
+ value_ptrptr,
36
+ size_ptr
37
+ )
38
+
39
+ if err == Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT
40
+ break
41
+ elsif err != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
42
+ raise Rdkafka::RdkafkaError.new(err, "Error reading a message header at index #{idx}")
43
+ end
44
+
45
+ name_ptr = name_ptrptr.read_pointer
46
+ name = name_ptr.respond_to?(:read_string_to_null) ? name_ptr.read_string_to_null : name_ptr.read_string
47
+
48
+ size = size_ptr[:value]
49
+
50
+ value_ptr = value_ptrptr.read_pointer
51
+
52
+ value = value_ptr.read_string(size)
53
+
54
+ headers[name.to_sym] = value
55
+
56
+ idx += 1
57
+ end
58
+
59
+ headers
60
+ end
61
+ end
62
+ end
63
+ end
@@ -26,6 +26,9 @@ module Rdkafka
26
26
  # @return [Time, nil]
27
27
  attr_reader :timestamp
28
28
 
29
+ # @return [Hash<String, String>] a message headers
30
+ attr_reader :headers
31
+
29
32
  # @private
30
33
  def initialize(native_message)
31
34
  # Set topic
@@ -54,12 +57,16 @@ module Rdkafka
54
57
  else
55
58
  nil
56
59
  end
60
+
61
+ @headers = Headers.from_native(native_message)
57
62
  end
58
63
 
59
64
  # Human readable representation of this message.
60
65
  # @return [String]
61
66
  def to_s
62
- "<Message in '#{topic}' with key '#{truncate(key)}', payload '#{truncate(payload)}', partition #{partition}, offset #{offset}, timestamp #{timestamp}>"
67
+ is_headers = @headers.empty? ? "" : ", headers #{headers.size}"
68
+
69
+ "<Message in '#{topic}' with key '#{truncate(key)}', payload '#{truncate(payload)}', partition #{partition}, offset #{offset}, timestamp #{timestamp}#{is_headers}>"
63
70
  end
64
71
 
65
72
  def truncate(string)
@@ -69,6 +76,9 @@ module Rdkafka
69
76
  string
70
77
  end
71
78
  end
79
+
80
+ private
81
+
72
82
  end
73
83
  end
74
84
  end
@@ -10,20 +10,25 @@ module Rdkafka
10
10
  # @return [Integer, nil]
11
11
  attr_reader :offset
12
12
 
13
+ # Partition's error code
14
+ # @return [Integer]
15
+ attr_reader :err
16
+
13
17
  # @private
14
- def initialize(partition, offset)
18
+ def initialize(partition, offset, err = 0)
15
19
  @partition = partition
16
20
  @offset = offset
21
+ @err = err
17
22
  end
18
23
 
19
24
  # Human readable representation of this partition.
20
25
  # @return [String]
21
26
  def to_s
22
- if offset.nil?
23
- "<Partition #{partition} without offset>"
24
- else
25
- "<Partition #{partition} with offset #{offset}>"
26
- end
27
+ message = "<Partition #{partition}"
28
+ message += " offset=#{offset}" if offset
29
+ message += " err=#{err}" if err != 0
30
+ message += ">"
31
+ message
27
32
  end
28
33
 
29
34
  # Human readable representation of this partition.
@@ -4,7 +4,7 @@ module Rdkafka
4
4
  class TopicPartitionList
5
5
  # Create a topic partition list.
6
6
  #
7
- # @param data [Hash<String => [nil,Partition]>] The topic and partion data or nil to create an empty list
7
+ # @param data [Hash{String => nil,Partition}] The topic and partition data or nil to create an empty list
8
8
  #
9
9
  # @return [TopicPartitionList]
10
10
  def initialize(data=nil)
@@ -54,7 +54,7 @@ module Rdkafka
54
54
  if partitions.is_a? Integer
55
55
  partitions = (0..partitions - 1)
56
56
  end
57
- @data[topic.to_s] = partitions.map { |p| Partition.new(p, nil) }
57
+ @data[topic.to_s] = partitions.map { |p| Partition.new(p, nil, 0) }
58
58
  end
59
59
  end
60
60
 
@@ -71,7 +71,7 @@ module Rdkafka
71
71
 
72
72
  # Return a `Hash` with the topics as keys and and an array of partition information as the value if present.
73
73
  #
74
- # @return [Hash<String, [Array<Partition>, nil]>]
74
+ # @return [Hash{String => Array<Partition>,nil}]
75
75
  def to_h
76
76
  @data
77
77
  end
@@ -88,7 +88,7 @@ module Rdkafka
88
88
 
89
89
  # Create a new topic partition list based of a native one.
90
90
  #
91
- # @param pointer [FFI::Pointer] Optional pointer to an existing native list. Its contents will be copied and afterwards it will be destroyed.
91
+ # @param pointer [FFI::Pointer] Optional pointer to an existing native list. Its contents will be copied.
92
92
  #
93
93
  # @return [TopicPartitionList]
94
94
  #
@@ -106,12 +106,12 @@ module Rdkafka
106
106
  data[elem[:topic]] = nil
107
107
  else
108
108
  partitions = data[elem[:topic]] || []
109
- offset = if elem[:offset] == -1001
109
+ offset = if elem[:offset] == Rdkafka::Bindings::RD_KAFKA_OFFSET_INVALID
110
110
  nil
111
111
  else
112
112
  elem[:offset]
113
113
  end
114
- partition = Partition.new(elem[:partition], offset)
114
+ partition = Partition.new(elem[:partition], offset, elem[:err])
115
115
  partitions.push(partition)
116
116
  data[elem[:topic]] = partitions
117
117
  end
@@ -119,42 +119,45 @@ module Rdkafka
119
119
 
120
120
  # Return the created object
121
121
  TopicPartitionList.new(data)
122
- ensure
123
- # Destroy the tpl
124
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(pointer)
125
122
  end
126
123
 
127
- # Create a native tpl with the contents of this object added
124
+ # Create a native tpl with the contents of this object added.
128
125
  #
126
+ # The pointer will be cleaned by `rd_kafka_topic_partition_list_destroy` when GC releases it.
127
+ #
128
+ # @return [FFI::Pointer]
129
129
  # @private
130
130
  def to_native_tpl
131
- Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count).tap do |tpl|
132
- @data.each do |topic, partitions|
133
- if partitions
134
- partitions.each do |p|
135
- Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
136
- tpl,
137
- topic,
138
- p.partition
139
- )
140
- if p.offset
141
- Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
142
- tpl,
143
- topic,
144
- p.partition,
145
- p.offset
146
- )
147
- end
148
- end
149
- else
131
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
132
+
133
+ @data.each do |topic, partitions|
134
+ if partitions
135
+ partitions.each do |p|
150
136
  Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
151
137
  tpl,
152
138
  topic,
153
- -1
139
+ p.partition
154
140
  )
141
+
142
+ if p.offset
143
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
144
+ tpl,
145
+ topic,
146
+ p.partition,
147
+ p.offset
148
+ )
149
+ end
155
150
  end
151
+ else
152
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
153
+ tpl,
154
+ topic,
155
+ -1
156
+ )
156
157
  end
157
158
  end
159
+
160
+ tpl
158
161
  end
159
162
  end
160
163
  end
@@ -39,5 +39,22 @@ module Rdkafka
39
39
  def is_partition_eof?
40
40
  code == :partition_eof
41
41
  end
42
+
43
+ # Error comparison
44
+ def ==(another_error)
45
+ another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
46
+ end
47
+ end
48
+
49
+ # Error with topic partition list returned by the underlying rdkafka library.
50
+ class RdkafkaTopicPartitionListError < RdkafkaError
51
+ # @return [TopicPartitionList]
52
+ attr_reader :topic_partition_list
53
+
54
+ # @private
55
+ def initialize(response, topic_partition_list, message_prefix=nil)
56
+ super(response, message_prefix)
57
+ @topic_partition_list = topic_partition_list
58
+ end
42
59
  end
43
60
  end
@@ -0,0 +1,91 @@
1
+ module Rdkafka
2
+ class Metadata
3
+ attr_reader :brokers, :topics
4
+
5
+ def initialize(native_client, topic_name = nil)
6
+ native_topic = if topic_name
7
+ Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
+ end
9
+
10
+ ptr = FFI::MemoryPointer.new(:pointer)
11
+
12
+ # Retrieve metadata flag is 0/1 for single/multiple topics.
13
+ topic_flag = topic_name ? 1 : 0
14
+
15
+ # Retrieve the Metadata
16
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
17
+
18
+ # Error Handling
19
+ Rdkafka::Error.new(result) unless result.zero?
20
+
21
+ metadata_from_native(ptr.read_pointer)
22
+ ensure
23
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
24
+ Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
25
+ end
26
+
27
+ private
28
+
29
+ def metadata_from_native(ptr)
30
+ metadata = Metadata.new(ptr)
31
+ @brokers = Array.new(metadata[:brokers_count]) do |i|
32
+ BrokerMetadata.new(metadata[:brokers_metadata] + (i * BrokerMetadata.size)).to_h
33
+ end
34
+
35
+ @topics = Array.new(metadata[:topics_count]) do |i|
36
+ topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
37
+ Rdkafka::Error.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
38
+
39
+ partitions = Array.new(topic[:partition_count]) do |j|
40
+ partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
41
+ Rdkafka::Error.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
42
+ partition.to_h
43
+ end
44
+ topic.to_h.merge!(partitions: partitions)
45
+ end
46
+ end
47
+
48
+ class CustomFFIStruct < FFI::Struct
49
+ def to_h
50
+ members.each_with_object({}) do |mem, hsh|
51
+ val = self.[](mem)
52
+ next if val.is_a?(FFI::Pointer) || mem == :rd_kafka_resp_err
53
+
54
+ hsh[mem] = self.[](mem)
55
+ end
56
+ end
57
+ end
58
+
59
+ class Metadata < CustomFFIStruct
60
+ layout :brokers_count, :int,
61
+ :brokers_metadata, :pointer,
62
+ :topics_count, :int,
63
+ :topics_metadata, :pointer,
64
+ :broker_id, :int32,
65
+ :broker_name, :string
66
+ end
67
+
68
+ class BrokerMetadata < CustomFFIStruct
69
+ layout :broker_id, :int32,
70
+ :broker_name, :string,
71
+ :broker_port, :int
72
+ end
73
+
74
+ class TopicMetadata < CustomFFIStruct
75
+ layout :topic_name, :string,
76
+ :partition_count, :int,
77
+ :partitions_metadata, :pointer,
78
+ :rd_kafka_resp_err, :int
79
+ end
80
+
81
+ class PartitionMetadata < CustomFFIStruct
82
+ layout :partition_id, :int32,
83
+ :rd_kafka_resp_err, :int,
84
+ :leader, :int32,
85
+ :replica_count, :int,
86
+ :replicas, :pointer,
87
+ :in_sync_replica_brokers, :int,
88
+ :isrs, :pointer
89
+ end
90
+ end
91
+ end
@@ -2,7 +2,10 @@ module Rdkafka
2
2
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
3
3
  class Producer
4
4
  # @private
5
- @delivery_callback = nil
5
+ # Returns the current delivery callback, by default this is nil.
6
+ #
7
+ # @return [Proc, nil]
8
+ attr_reader :delivery_callback
6
9
 
7
10
  # @private
8
11
  def initialize(native_kafka)
@@ -32,36 +35,45 @@ module Rdkafka
32
35
  @delivery_callback = callback
33
36
  end
34
37
 
35
- # Returns the current delivery callback, by default this is nil.
36
- #
37
- # @return [Proc, nil]
38
- def delivery_callback
39
- @delivery_callback
40
- end
41
-
42
38
  # Close this producer and wait for the internal poll queue to empty.
43
39
  def close
40
+ return unless @native_kafka
41
+
44
42
  # Indicate to polling thread that we're closing
45
43
  @closing = true
46
44
  # Wait for the polling thread to finish up
47
45
  @polling_thread.join
46
+ Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
47
+ @native_kafka = nil
48
+ end
49
+
50
+ # Partition count for a given topic.
51
+ # NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
52
+ #
53
+ # @param topic [String] The topic name.
54
+ #
55
+ # @return partition count [Integer,nil]
56
+ #
57
+ def partition_count(topic)
58
+ Rdkafka::Metadata.new(@native_kafka, topic).topics&.select { |x| x[:topic_name] == topic }&.dig(0, :partition_count)
48
59
  end
49
60
 
50
61
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
51
62
  #
52
63
  # When no partition is specified the underlying Kafka library picks a partition based on the key. If no key is specified, a random partition will be used.
53
- # When a timestamp is provided this is used instead of the autogenerated timestamp.
64
+ # When a timestamp is provided this is used instead of the auto-generated timestamp.
54
65
  #
55
66
  # @param topic [String] The topic to produce to
56
67
  # @param payload [String,nil] The message's payload
57
68
  # @param key [String] The message's key
58
69
  # @param partition [Integer,nil] Optional partition to produce to
59
70
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
71
+ # @param headers [Hash<String,String>] Optional message headers
60
72
  #
61
73
  # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
62
74
  #
63
75
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
64
- def produce(topic:, payload: nil, key: nil, partition: nil, timestamp: nil)
76
+ def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
65
77
  # Start by checking and converting the input
66
78
 
67
79
  # Get payload length
@@ -78,9 +90,15 @@ module Rdkafka
78
90
  key.bytesize
79
91
  end
80
92
 
81
- # If partition is nil use -1 to let Kafka set the partition based
82
- # on the key/randomly if there is no key
83
- partition = -1 if partition.nil?
93
+ if partition_key
94
+ partition_count = partition_count(topic)
95
+ # If the topic is not present, set to -1
96
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count) if partition_count
97
+ end
98
+
99
+ # If partition is nil, use -1 to let librdafka set the partition randomly or
100
+ # based on the key when present.
101
+ partition ||= -1
84
102
 
85
103
  # If timestamp is nil use 0 and let Kafka set one. If an integer or time
86
104
  # use it.
@@ -101,9 +119,7 @@ module Rdkafka
101
119
  delivery_handle[:offset] = -1
102
120
  DeliveryHandle.register(delivery_handle.to_ptr.address, delivery_handle)
103
121
 
104
- # Produce the message
105
- response = Rdkafka::Bindings.rd_kafka_producev(
106
- @native_kafka,
122
+ args = [
107
123
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TOPIC, :string, topic,
108
124
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_MSGFLAGS, :int, Rdkafka::Bindings::RD_KAFKA_MSG_F_COPY,
109
125
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_VALUE, :buffer_in, payload, :size_t, payload_size,
@@ -111,10 +127,30 @@ module Rdkafka
111
127
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_PARTITION, :int32, partition,
112
128
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TIMESTAMP, :int64, raw_timestamp,
113
129
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_OPAQUE, :pointer, delivery_handle,
114
- :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_END
130
+ ]
131
+
132
+ if headers
133
+ headers.each do |key0, value0|
134
+ key = key0.to_s
135
+ value = value0.to_s
136
+ args += [
137
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER,
138
+ :string, key,
139
+ :pointer, value,
140
+ :size_t, value.bytes.size
141
+ ]
142
+ end
143
+ end
144
+
145
+ args += [:int, Rdkafka::Bindings::RD_KAFKA_VTYPE_END]
146
+
147
+ # Produce the message
148
+ response = Rdkafka::Bindings.rd_kafka_producev(
149
+ @native_kafka,
150
+ *args
115
151
  )
116
152
 
117
- # Raise error if the produce call was not successfull
153
+ # Raise error if the produce call was not successful
118
154
  if response != 0
119
155
  DeliveryHandle.remove(delivery_handle.to_ptr.address)
120
156
  raise RdkafkaError.new(response)