rdkafka 0.4.2 → 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 277ff64f82622eddd6c31265d1b4b91b6b60bed802590bf9b4d015ea7a32ce0a
4
- data.tar.gz: 027fc22c22349729bb04288f9010eda156a9d04cf9a616bb237b1fc56eb9aed5
3
+ metadata.gz: c01eae203c0d0a12be82edacae9fb3ecde7c73dc96e6a95d456434a2d0a2e5d9
4
+ data.tar.gz: 50bced86691e02ca0f1af3df2140751f530b667aa5ddf6f6822110991e3f4b43
5
5
  SHA512:
6
- metadata.gz: 0cb6c97fa7ac62ead42bcd8da1d22f56b02f56e54a7671e7e2722d029839d24c7390c0fe10890530afb4e9a002e9f5fcc7e213777cf956e762d443edf466715e
7
- data.tar.gz: 54865cb4b0772cffa1c91fd4e971a3e23f9f9b24959c83402460bca8b2d268e15bef8df8ea011848a57d75c4088198d02f77713ff9123647f62b7b5ece38a787
6
+ metadata.gz: af1930db8fea0a9ebec2ab63c404be88a805349badbc575e351e90d694d989a962dcfd4db32a5e344fddc83e8f97ca7600188ccc6d8e46ccd9a73afa99affbed
7
+ data.tar.gz: c8a629fe6ea2a9e1a0d6c38e6d2eb46f1ea8d6791e7d2bdd2751c83ab5a6ed6fc90d7783eaeef3e2f324f01d50e3bb3d308c12048001519dea42c3810c399240
@@ -11,11 +11,10 @@ env:
11
11
  - KAFKA_HEAP_OPTS="-Xmx512m -Xms512m"
12
12
 
13
13
  rvm:
14
- - 2.1
15
- - 2.2
16
14
  - 2.3
17
15
  - 2.4
18
16
  - 2.5
17
+ - 2.6
19
18
 
20
19
  before_install:
21
20
  - gem update --system
@@ -1,3 +1,9 @@
1
+ # 0.5.0
2
+ * Bump librdkafka to 1.0.0 (by breunigs)
3
+ * Add cluster and member information (by dmexe)
4
+ * Support message headers for consumer & producer (by dmexe)
5
+ * Add consumer rebalance listener (by dmexe)
6
+
1
7
  # 0.4.2
2
8
  * Delivery callback for producer
3
9
  * Document list param of commit method
data/README.md CHANGED
@@ -8,7 +8,7 @@
8
8
  The `rdkafka` gem is a modern Kafka client library for Ruby based on
9
9
  [librdkafka](https://github.com/edenhill/librdkafka/).
10
10
  It wraps the production-ready C client using the [ffi](https://github.com/ffi/ffi)
11
- gem and targets Kafka 1.0+ and Ruby 2.1+.
11
+ gem and targets Kafka 1.0+ and Ruby 2.3+.
12
12
 
13
13
  This gem only provides a high-level Kafka consumer. If you are running
14
14
  an older version of Kafka and/or need the legacy simple consumer we
@@ -13,6 +13,6 @@ services:
13
13
  KAFKA_ADVERTISED_PORT: 9092
14
14
  KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
15
15
  KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
16
- KAFKA_CREATE_TOPICS: "consume_test_topic:3:1,empty_test_topic:3:1,load_test_topic:3:1,produce_test_topic:3:1,rake_test_topic:3:1,empty_test_topic:3:1"
16
+ KAFKA_CREATE_TOPICS: "consume_test_topic:3:1,empty_test_topic:3:1,load_test_topic:3:1,produce_test_topic:3:1,rake_test_topic:3:1,watermarks_test_topic:3:1"
17
17
  volumes:
18
18
  - /var/run/docker.sock:/var/run/docker.sock
@@ -3,6 +3,7 @@ require "rdkafka/version"
3
3
  require "rdkafka/bindings"
4
4
  require "rdkafka/config"
5
5
  require "rdkafka/consumer"
6
+ require "rdkafka/consumer/headers"
6
7
  require "rdkafka/consumer/message"
7
8
  require "rdkafka/consumer/partition"
8
9
  require "rdkafka/consumer/topic_partition_list"
@@ -17,11 +17,25 @@ module Rdkafka
17
17
 
18
18
  ffi_lib File.join(File.dirname(__FILE__), "../../ext/librdkafka.#{lib_extension}")
19
19
 
20
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
21
+ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
22
+ RD_KAFKA_RESP_ERR__NOENT = -156
23
+ RD_KAFKA_RESP_ERR_NO_ERROR = 0
24
+
25
+ class SizePtr < FFI::Struct
26
+ layout :value, :size_t
27
+ end
28
+
20
29
  # Polling
21
30
 
22
31
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
23
32
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
24
33
 
34
+ # Metadata
35
+
36
+ attach_function :rd_kafka_memberid, [:pointer], :string
37
+ attach_function :rd_kafka_clusterid, [:pointer], :string
38
+
25
39
  # Message struct
26
40
 
27
41
  class Message < FFI::Struct
@@ -148,6 +162,45 @@ module Rdkafka
148
162
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
149
163
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
150
164
  attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
165
+ attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
166
+ attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
167
+
168
+ # Headers
169
+ attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
170
+ attach_function :rd_kafka_message_headers, [:pointer, :pointer], :int
171
+
172
+ # Rebalance
173
+
174
+ callback :rebalance_cb_function, [:pointer, :int, :pointer, :pointer], :void
175
+ attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void
176
+
177
+ RebalanceCallback = FFI::Function.new(
178
+ :void, [:pointer, :int, :pointer, :pointer]
179
+ ) do |client_ptr, code, partitions_ptr, opaque_ptr|
180
+ case code
181
+ when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
182
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
183
+ else # RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS or errors
184
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
185
+ end
186
+
187
+ opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
188
+ return unless opaque
189
+
190
+ tpl = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(partitions_ptr).freeze
191
+ consumer = Rdkafka::Consumer.new(client_ptr)
192
+
193
+ begin
194
+ case code
195
+ when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
196
+ opaque.call_on_partitions_assigned(consumer, tpl)
197
+ when RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
198
+ opaque.call_on_partitions_revoked(consumer, tpl)
199
+ end
200
+ rescue Exception => err
201
+ Rdkafka::Config.logger.error("Unhandled exception: #{err.class} - #{err.message}")
202
+ end
203
+ end
151
204
 
152
205
  # Stats
153
206
 
@@ -164,6 +217,8 @@ module Rdkafka
164
217
  RD_KAFKA_VTYPE_OPAQUE = 6
165
218
  RD_KAFKA_VTYPE_MSGFLAGS = 7
166
219
  RD_KAFKA_VTYPE_TIMESTAMP = 8
220
+ RD_KAFKA_VTYPE_HEADER = 9
221
+ RD_KAFKA_VTYPE_HEADERS = 10
167
222
 
168
223
  RD_KAFKA_MSG_F_COPY = 0x2
169
224
 
@@ -72,6 +72,7 @@ module Rdkafka
72
72
  # @return [Config]
73
73
  def initialize(config_hash = {})
74
74
  @config_hash = DEFAULT_CONFIG.merge(config_hash)
75
+ @consumer_rebalance_listener = nil
75
76
  end
76
77
 
77
78
  # Set a config option.
@@ -93,6 +94,13 @@ module Rdkafka
93
94
  @config_hash[key]
94
95
  end
95
96
 
97
+ # Get notifications on partition assignment/revocation for the subscribed topics
98
+ #
99
+ # @param listener [Object, #on_partitions_assigned, #on_partitions_revoked] listener instance
100
+ def consumer_rebalance_listener=(listener)
101
+ @consumer_rebalance_listener = listener
102
+ end
103
+
96
104
  # Create a consumer with this configuration.
97
105
  #
98
106
  # @raise [ConfigError] When the configuration contains invalid options
@@ -100,9 +108,19 @@ module Rdkafka
100
108
  #
101
109
  # @return [Consumer] The created consumer
102
110
  def consumer
103
- kafka = native_kafka(native_config, :rd_kafka_consumer)
111
+ opaque = Opaque.new
112
+ config = native_config(opaque)
113
+
114
+ if @consumer_rebalance_listener
115
+ opaque.consumer_rebalance_listener = @consumer_rebalance_listener
116
+ Rdkafka::Bindings.rd_kafka_conf_set_rebalance_cb(config, Rdkafka::Bindings::RebalanceCallback)
117
+ end
118
+
119
+ kafka = native_kafka(config, :rd_kafka_consumer)
120
+
104
121
  # Redirect the main queue to the consumer
105
122
  Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
123
+
106
124
  # Return consumer with Kafka client
107
125
  Rdkafka::Consumer.new(kafka)
108
126
  end
@@ -204,9 +222,24 @@ module Rdkafka
204
222
  # @private
205
223
  class Opaque
206
224
  attr_accessor :producer
225
+ attr_accessor :consumer_rebalance_listener
207
226
 
208
227
  def call_delivery_callback(delivery_handle)
209
228
  producer.call_delivery_callback(delivery_handle) if producer
210
229
  end
230
+
231
+ def call_on_partitions_assigned(consumer, list)
232
+ return unless consumer_rebalance_listener
233
+ return unless consumer_rebalance_listener.respond_to?(:on_partitions_assigned)
234
+
235
+ consumer_rebalance_listener.on_partitions_assigned(consumer, list)
236
+ end
237
+
238
+ def call_on_partitions_revoked(consumer, list)
239
+ return unless consumer_rebalance_listener
240
+ return unless consumer_rebalance_listener.respond_to?(:on_partitions_revoked)
241
+
242
+ consumer_rebalance_listener.on_partitions_revoked(consumer, list)
243
+ end
211
244
  end
212
245
  end
@@ -30,7 +30,8 @@ module Rdkafka
30
30
  # @return [nil]
31
31
  def subscribe(*topics)
32
32
  # Create topic partition list with topics and no partition set
33
- tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
33
+ tpl = TopicPartitionList.new_native_tpl(topics.length)
34
+
34
35
  topics.each do |topic|
35
36
  Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
36
37
  tpl,
@@ -43,9 +44,6 @@ module Rdkafka
43
44
  if response != 0
44
45
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
45
46
  end
46
- ensure
47
- # Clean up the topic partition list
48
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
49
47
  end
50
48
 
51
49
  # Unsubscribe from all subscribed topics.
@@ -60,6 +58,44 @@ module Rdkafka
60
58
  end
61
59
  end
62
60
 
61
+ # Pause producing or consumption for the provided list of partitions
62
+ #
63
+ # @param list [TopicPartitionList] The topic with partitions to pause
64
+ #
65
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
66
+ #
67
+ # @return [nil]
68
+ def pause(list)
69
+ unless list.is_a?(TopicPartitionList)
70
+ raise TypeError.new("list has to be a TopicPartitionList")
71
+ end
72
+ tpl = list.to_native_tpl
73
+ response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
74
+
75
+ if response != 0
76
+ list = TopicPartitionList.from_native_tpl(tpl)
77
+ raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
78
+ end
79
+ end
80
+
81
+ # Resume producing consumption for the provided list of partitions
82
+ #
83
+ # @param list [TopicPartitionList] The topic with partitions to pause
84
+ #
85
+ # @raise [RdkafkaError] When resume subscription fails.
86
+ #
87
+ # @return [nil]
88
+ def resume(list)
89
+ unless list.is_a?(TopicPartitionList)
90
+ raise TypeError.new("list has to be a TopicPartitionList")
91
+ end
92
+ tpl = list.to_native_tpl
93
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
94
+ if response != 0
95
+ raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
96
+ end
97
+ end
98
+
63
99
  # Return the current subscription to topics and partitions
64
100
  #
65
101
  # @raise [RdkafkaError] When getting the subscription fails.
@@ -67,12 +103,17 @@ module Rdkafka
67
103
  # @return [TopicPartitionList]
68
104
  def subscription
69
105
  tpl = FFI::MemoryPointer.new(:pointer)
70
- tpl.autorelease = false
71
106
  response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, tpl)
72
107
  if response != 0
73
108
  raise Rdkafka::RdkafkaError.new(response)
74
109
  end
75
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl.get_pointer(0))
110
+ tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
111
+
112
+ begin
113
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
114
+ ensure
115
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
116
+ end
76
117
  end
77
118
 
78
119
  # Atomic assignment of partitions to consume
@@ -89,8 +130,6 @@ module Rdkafka
89
130
  if response != 0
90
131
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
91
132
  end
92
- ensure
93
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
94
133
  end
95
134
 
96
135
  # Returns the current partition assignment.
@@ -100,12 +139,18 @@ module Rdkafka
100
139
  # @return [TopicPartitionList]
101
140
  def assignment
102
141
  tpl = FFI::MemoryPointer.new(:pointer)
103
- tpl.autorelease = false
104
142
  response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, tpl)
105
143
  if response != 0
106
144
  raise Rdkafka::RdkafkaError.new(response)
107
145
  end
108
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl.get_pointer(0))
146
+
147
+ tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
148
+
149
+ begin
150
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
151
+ ensure
152
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
153
+ end
109
154
  end
110
155
 
111
156
  # Return the current committed offset per partition for this consumer group.
@@ -117,7 +162,7 @@ module Rdkafka
117
162
  # @raise [RdkafkaError] When getting the committed positions fails.
118
163
  #
119
164
  # @return [TopicPartitionList]
120
- def committed(list=nil, timeout_ms=200)
165
+ def committed(list=nil, timeout_ms=1200)
121
166
  if list.nil?
122
167
  list = assignment
123
168
  elsif !list.is_a?(TopicPartitionList)
@@ -190,6 +235,22 @@ module Rdkafka
190
235
  out
191
236
  end
192
237
 
238
+ # Returns the ClusterId as reported in broker metadata.
239
+ #
240
+ # @return [String, nil]
241
+ def cluster_id
242
+ Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
243
+ end
244
+
245
+ # Returns this client's broker-assigned group member id
246
+ #
247
+ # This currently requires the high-level KafkaConsumer
248
+ #
249
+ # @return [String, nil]
250
+ def member_id
251
+ Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
252
+ end
253
+
193
254
  # Store offset of a message to be used in the next commit of this consumer
194
255
  #
195
256
  # When using this `enable.auto.offset.store` should be set to `false` in the config.
@@ -242,8 +303,6 @@ module Rdkafka
242
303
  if response != 0
243
304
  raise Rdkafka::RdkafkaError.new(response)
244
305
  end
245
- ensure
246
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
247
306
  end
248
307
 
249
308
  # Poll for the next message on one of the subscribed topics
@@ -0,0 +1,61 @@
1
+ module Rdkafka
2
+ class Consumer
3
+ # A message headers
4
+ class Headers
5
+ # Reads a native kafka's message header into ruby's hash
6
+ #
7
+ # @return [Hash<String, String>] a message headers
8
+ #
9
+ # @raise [Rdkafka::RdkafkaError] when fail to read headers
10
+ #
11
+ # @private
12
+ def self.from_native(native_message)
13
+ headers_ptrptr = FFI::MemoryPointer.new(:pointer)
14
+ err = Rdkafka::Bindings.rd_kafka_message_headers(native_message, headers_ptrptr)
15
+
16
+ if err == Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT
17
+ return {}
18
+ elsif err != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
19
+ raise Rdkafka::RdkafkaError.new(err, "Error reading message headers")
20
+ end
21
+
22
+ headers_ptr = headers_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
23
+
24
+ name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
+ value_ptrptr = FFI::MemoryPointer.new(:pointer)
26
+ size_ptr = Rdkafka::Bindings::SizePtr.new
27
+ headers = {}
28
+
29
+ idx = 0
30
+ loop do
31
+ err = Rdkafka::Bindings.rd_kafka_header_get_all(
32
+ headers_ptr,
33
+ idx,
34
+ name_ptrptr,
35
+ value_ptrptr,
36
+ size_ptr
37
+ )
38
+
39
+ if err == Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT
40
+ break
41
+ elsif err != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
42
+ raise Rdkafka::RdkafkaError.new(err, "Error reading a message header at index #{idx}")
43
+ end
44
+
45
+ name = name_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
46
+ name = name.read_string_to_null
47
+
48
+ size = size_ptr[:value]
49
+ value = value_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
50
+ value = value.read_string(size)
51
+
52
+ headers[name.to_sym] = value
53
+
54
+ idx += 1
55
+ end
56
+
57
+ headers
58
+ end
59
+ end
60
+ end
61
+ end
@@ -26,6 +26,9 @@ module Rdkafka
26
26
  # @return [Time, nil]
27
27
  attr_reader :timestamp
28
28
 
29
+ # @return [Hash<String, String>] a message headers
30
+ attr_reader :headers
31
+
29
32
  # @private
30
33
  def initialize(native_message)
31
34
  # Set topic
@@ -54,12 +57,16 @@ module Rdkafka
54
57
  else
55
58
  nil
56
59
  end
60
+
61
+ @headers = Headers.from_native(native_message)
57
62
  end
58
63
 
59
64
  # Human readable representation of this message.
60
65
  # @return [String]
61
66
  def to_s
62
- "<Message in '#{topic}' with key '#{truncate(key)}', payload '#{truncate(payload)}', partition #{partition}, offset #{offset}, timestamp #{timestamp}>"
67
+ is_headers = @headers.empty? ? "" : ", headers #{headers.size}"
68
+
69
+ "<Message in '#{topic}' with key '#{truncate(key)}', payload '#{truncate(payload)}', partition #{partition}, offset #{offset}, timestamp #{timestamp}#{is_headers}>"
63
70
  end
64
71
 
65
72
  def truncate(string)
@@ -69,6 +76,9 @@ module Rdkafka
69
76
  string
70
77
  end
71
78
  end
79
+
80
+ private
81
+
72
82
  end
73
83
  end
74
84
  end
@@ -10,20 +10,25 @@ module Rdkafka
10
10
  # @return [Integer, nil]
11
11
  attr_reader :offset
12
12
 
13
+ # Partition's error code
14
+ # @retuen [Integer]
15
+ attr_reader :err
16
+
13
17
  # @private
14
- def initialize(partition, offset)
18
+ def initialize(partition, offset, err = 0)
15
19
  @partition = partition
16
20
  @offset = offset
21
+ @err = err
17
22
  end
18
23
 
19
24
  # Human readable representation of this partition.
20
25
  # @return [String]
21
26
  def to_s
22
- if offset.nil?
23
- "<Partition #{partition} without offset>"
24
- else
25
- "<Partition #{partition} with offset #{offset}>"
26
- end
27
+ message = "<Partition #{partition}"
28
+ message += " offset=#{offset}" if offset
29
+ message += " err=#{err}" if err != 0
30
+ message += ">"
31
+ message
27
32
  end
28
33
 
29
34
  # Human readable representation of this partition.
@@ -54,7 +54,7 @@ module Rdkafka
54
54
  if partitions.is_a? Integer
55
55
  partitions = (0..partitions - 1)
56
56
  end
57
- @data[topic.to_s] = partitions.map { |p| Partition.new(p, nil) }
57
+ @data[topic.to_s] = partitions.map { |p| Partition.new(p, nil, 0) }
58
58
  end
59
59
  end
60
60
 
@@ -88,7 +88,7 @@ module Rdkafka
88
88
 
89
89
  # Create a new topic partition list based of a native one.
90
90
  #
91
- # @param pointer [FFI::Pointer] Optional pointer to an existing native list. Its contents will be copied and afterwards it will be destroyed.
91
+ # @param pointer [FFI::Pointer] Optional pointer to an existing native list. Its contents will be copied.
92
92
  #
93
93
  # @return [TopicPartitionList]
94
94
  #
@@ -111,7 +111,7 @@ module Rdkafka
111
111
  else
112
112
  elem[:offset]
113
113
  end
114
- partition = Partition.new(elem[:partition], offset)
114
+ partition = Partition.new(elem[:partition], offset, elem[:err])
115
115
  partitions.push(partition)
116
116
  data[elem[:topic]] = partitions
117
117
  end
@@ -119,42 +119,55 @@ module Rdkafka
119
119
 
120
120
  # Return the created object
121
121
  TopicPartitionList.new(data)
122
- ensure
123
- # Destroy the tpl
124
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(pointer)
125
122
  end
126
123
 
127
- # Create a native tpl with the contents of this object added
124
+ # Create a native tpl with the contents of this object added.
128
125
  #
126
+ # The pointer will be cleaned by `rd_kafka_topic_partition_list_destroy` when GC releases it.
127
+ #
128
+ # @return [FFI::AutoPointer]
129
129
  # @private
130
130
  def to_native_tpl
131
- Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count).tap do |tpl|
132
- @data.each do |topic, partitions|
133
- if partitions
134
- partitions.each do |p|
135
- Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
136
- tpl,
137
- topic,
138
- p.partition
139
- )
140
- if p.offset
141
- Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
142
- tpl,
143
- topic,
144
- p.partition,
145
- p.offset
146
- )
147
- end
148
- end
149
- else
131
+ tpl = TopicPartitionList.new_native_tpl(count)
132
+
133
+ @data.each do |topic, partitions|
134
+ if partitions
135
+ partitions.each do |p|
150
136
  Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
151
137
  tpl,
152
138
  topic,
153
- -1
139
+ p.partition
154
140
  )
141
+ if p.offset
142
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
143
+ tpl,
144
+ topic,
145
+ p.partition,
146
+ p.offset
147
+ )
148
+ end
155
149
  end
150
+ else
151
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
152
+ tpl,
153
+ topic,
154
+ -1
155
+ )
156
156
  end
157
157
  end
158
+
159
+ tpl
160
+ end
161
+
162
+ # Creates a new native tpl and wraps it into FFI::AutoPointer which in turn calls
163
+ # `rd_kafka_topic_partition_list_destroy` when a pointer will be cleaned by GC
164
+ #
165
+ # @param count [Integer] an initial capacity of partitions list
166
+ # @return [FFI::AutoPointer]
167
+ # @private
168
+ def self.new_native_tpl(count)
169
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
170
+ FFI::AutoPointer.new(tpl, Rdkafka::Bindings.method(:rd_kafka_topic_partition_list_destroy))
158
171
  end
159
172
  end
160
173
  end
@@ -40,4 +40,16 @@ module Rdkafka
40
40
  code == :partition_eof
41
41
  end
42
42
  end
43
+
44
+ # Error with potic partition list returned by the underlying rdkafka library.
45
+ class RdkafkaTopicPartitionListError < RdkafkaError
46
+ # @return [TopicPartitionList]
47
+ attr_reader :topic_partition_list
48
+
49
+ # @private
50
+ def initialize(response, topic_partition_list, message_prefix=nil)
51
+ super(response, message_prefix)
52
+ @topic_partition_list = topic_partition_list
53
+ end
54
+ end
43
55
  end
@@ -57,11 +57,12 @@ module Rdkafka
57
57
  # @param key [String] The message's key
58
58
  # @param partition [Integer,nil] Optional partition to produce to
59
59
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
60
+ # @param headers [Hash<String,String>] Optional message headers
60
61
  #
61
62
  # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
62
63
  #
63
64
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
64
- def produce(topic:, payload: nil, key: nil, partition: nil, timestamp: nil)
65
+ def produce(topic:, payload: nil, key: nil, partition: nil, timestamp: nil, headers: nil)
65
66
  # Start by checking and converting the input
66
67
 
67
68
  # Get payload length
@@ -101,9 +102,7 @@ module Rdkafka
101
102
  delivery_handle[:offset] = -1
102
103
  DeliveryHandle.register(delivery_handle.to_ptr.address, delivery_handle)
103
104
 
104
- # Produce the message
105
- response = Rdkafka::Bindings.rd_kafka_producev(
106
- @native_kafka,
105
+ args = [
107
106
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TOPIC, :string, topic,
108
107
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_MSGFLAGS, :int, Rdkafka::Bindings::RD_KAFKA_MSG_F_COPY,
109
108
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_VALUE, :buffer_in, payload, :size_t, payload_size,
@@ -111,7 +110,27 @@ module Rdkafka
111
110
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_PARTITION, :int32, partition,
112
111
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TIMESTAMP, :int64, raw_timestamp,
113
112
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_OPAQUE, :pointer, delivery_handle,
114
- :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_END
113
+ ]
114
+
115
+ if headers
116
+ headers.each do |key0, value0|
117
+ key = key0.to_s
118
+ value = value0.to_s
119
+ args += [
120
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER,
121
+ :string, key,
122
+ :pointer, value,
123
+ :size_t, value.bytes.size
124
+ ]
125
+ end
126
+ end
127
+
128
+ args += [:int, Rdkafka::Bindings::RD_KAFKA_VTYPE_END]
129
+
130
+ # Produce the message
131
+ response = Rdkafka::Bindings.rd_kafka_producev(
132
+ @native_kafka,
133
+ *args
115
134
  )
116
135
 
117
136
  # Raise error if the produce call was not successfull
@@ -1,5 +1,5 @@
1
1
  module Rdkafka
2
- VERSION = "0.4.2"
3
- LIBRDKAFKA_VERSION = "0.11.6"
4
- LIBRDKAFKA_SOURCE_SHA256 = '9c0afb8b53779d968225edf1e79da48a162895ad557900f75e7978f65e642032'
2
+ VERSION = "0.5.0"
3
+ LIBRDKAFKA_VERSION = "1.0.0"
4
+ LIBRDKAFKA_SOURCE_SHA256 = 'b00a0d9f0e8c7ceb67b93b4ee67f3c68279a843a15bf4a6742eb64897519aa09'
5
5
  end
@@ -23,8 +23,25 @@ describe Rdkafka::Consumer::Message do
23
23
  end
24
24
  end
25
25
  end
26
+
26
27
  subject { Rdkafka::Consumer::Message.new(native_message) }
27
28
 
29
+ before do
30
+ # mock headers, because it produces 'segmentation fault' while settings or reading headers for
31
+ # a message which is created from scratch
32
+ #
33
+ # Code dump example:
34
+ #
35
+ # ```
36
+ # frame #7: 0x000000010dacf5ab librdkafka.dylib`rd_list_destroy + 11
37
+ # frame #8: 0x000000010dae5a7e librdkafka.dylib`rd_kafka_headers_destroy + 14
38
+ # frame #9: 0x000000010da9ab40 librdkafka.dylib`rd_kafka_message_set_headers + 32
39
+ # ```
40
+ expect( Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(any_args) do
41
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT
42
+ end
43
+ end
44
+
28
45
  it "should have a topic" do
29
46
  expect(subject.topic).to eq "topic_name"
30
47
  end
@@ -2,7 +2,8 @@ require "spec_helper"
2
2
 
3
3
  describe Rdkafka::Consumer::Partition do
4
4
  let(:offset) { 100 }
5
- subject { Rdkafka::Consumer::Partition.new(1, offset) }
5
+ let(:err) { 0 }
6
+ subject { Rdkafka::Consumer::Partition.new(1, offset, err) }
6
7
 
7
8
  it "should have a partition" do
8
9
  expect(subject.partition).to eq 1
@@ -12,22 +13,34 @@ describe Rdkafka::Consumer::Partition do
12
13
  expect(subject.offset).to eq 100
13
14
  end
14
15
 
16
+ it "should have an err code" do
17
+ expect(subject.err).to eq 0
18
+ end
19
+
15
20
  describe "#to_s" do
16
21
  it "should return a human readable representation" do
17
- expect(subject.to_s).to eq "<Partition 1 with offset 100>"
22
+ expect(subject.to_s).to eq "<Partition 1 offset=100>"
18
23
  end
19
24
  end
20
25
 
21
26
  describe "#inspect" do
22
27
  it "should return a human readable representation" do
23
- expect(subject.to_s).to eq "<Partition 1 with offset 100>"
28
+ expect(subject.to_s).to eq "<Partition 1 offset=100>"
24
29
  end
25
30
 
26
31
  context "without offset" do
27
32
  let(:offset) { nil }
28
33
 
29
34
  it "should return a human readable representation" do
30
- expect(subject.to_s).to eq "<Partition 1 without offset>"
35
+ expect(subject.to_s).to eq "<Partition 1>"
36
+ end
37
+ end
38
+
39
+ context "with err code" do
40
+ let(:err) { 1 }
41
+
42
+ it "should return a human readable representation" do
43
+ expect(subject.to_s).to eq "<Partition 1 offset=100 err=1>"
31
44
  end
32
45
  end
33
46
  end
@@ -118,7 +118,7 @@ describe Rdkafka::Consumer::TopicPartitionList do
118
118
  list = Rdkafka::Consumer::TopicPartitionList.new
119
119
  list.add_topic("topic1", [0, 1])
120
120
 
121
- expected = "<TopicPartitionList: {\"topic1\"=>[<Partition 0 without offset>, <Partition 1 without offset>]}>"
121
+ expected = "<TopicPartitionList: {\"topic1\"=>[<Partition 0>, <Partition 1>]}>"
122
122
 
123
123
  expect(list.to_s).to eq expected
124
124
  end
@@ -47,6 +47,83 @@ describe Rdkafka::Consumer do
47
47
  end
48
48
  end
49
49
 
50
+ describe "#pause and #resume" do
51
+ context "subscription" do
52
+ let(:timeout) { 1000 }
53
+
54
+ before { consumer.subscribe("consume_test_topic") }
55
+ after { consumer.unsubscribe }
56
+
57
+ it "should pause and then resume" do
58
+ # 1. partitions are assigned
59
+ wait_for_assignment(consumer)
60
+ expect(consumer.assignment).not_to be_empty
61
+
62
+ # 2. send a first message
63
+ send_one_message
64
+
65
+ # 3. ensure that message is successfully consumed
66
+ records = consumer.poll(timeout)
67
+ expect(records).not_to be_nil
68
+ consumer.commit
69
+
70
+ # 4. send a second message
71
+ send_one_message
72
+
73
+ # 5. pause the subscription
74
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
75
+ tpl.add_topic("consume_test_topic", (0..2))
76
+ consumer.pause(tpl)
77
+
78
+ # 6. unsure that messages are not available
79
+ records = consumer.poll(timeout)
80
+ expect(records).to be_nil
81
+
82
+ # 7. resume the subscription
83
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
84
+ tpl.add_topic("consume_test_topic", (0..2))
85
+ consumer.resume(tpl)
86
+
87
+ # 8. ensure that message is successfuly consumed
88
+ records = consumer.poll(timeout)
89
+ expect(records).not_to be_nil
90
+ consumer.commit
91
+ end
92
+ end
93
+
94
+ it "should raise when not TopicPartitionList" do
95
+ expect { consumer.pause(true) }.to raise_error(TypeError)
96
+ expect { consumer.resume(true) }.to raise_error(TypeError)
97
+ end
98
+
99
+ it "should raise an error when pausing fails" do
100
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap { |tpl| tpl.add_topic('topic', (0..1)) }
101
+
102
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_pause_partitions).and_return(20)
103
+ expect {
104
+ consumer.pause(list)
105
+ }.to raise_error do |err|
106
+ expect(err).to be_instance_of(Rdkafka::RdkafkaTopicPartitionListError)
107
+ expect(err.topic_partition_list).to be
108
+ end
109
+ end
110
+
111
+ it "should raise an error when resume fails" do
112
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_resume_partitions).and_return(20)
113
+ expect {
114
+ consumer.resume(Rdkafka::Consumer::TopicPartitionList.new)
115
+ }.to raise_error Rdkafka::RdkafkaError
116
+ end
117
+
118
+ def send_one_message
119
+ producer.produce(
120
+ topic: "consume_test_topic",
121
+ payload: "payload 1",
122
+ key: "key 1"
123
+ ).wait
124
+ end
125
+ end
126
+
50
127
  describe "#assign and #assignment" do
51
128
  it "should return an empty assignment if nothing is assigned" do
52
129
  expect(consumer.assignment).to be_empty
@@ -257,13 +334,13 @@ describe Rdkafka::Consumer do
257
334
  it "should return the watermark offsets" do
258
335
  # Make sure there's a message
259
336
  producer.produce(
260
- topic: "consume_test_topic",
337
+ topic: "watermarks_test_topic",
261
338
  payload: "payload 1",
262
339
  key: "key 1",
263
340
  partition: 0
264
341
  ).wait
265
342
 
266
- low, high = consumer.query_watermark_offsets("consume_test_topic", 0, 5000)
343
+ low, high = consumer.query_watermark_offsets("watermarks_test_topic", 0, 5000)
267
344
  expect(low).to eq 0
268
345
  expect(high).to be > 0
269
346
  end
@@ -358,6 +435,22 @@ describe Rdkafka::Consumer do
358
435
  end
359
436
  end
360
437
 
438
+ describe "#cluster_id" do
439
+ it 'should return the current ClusterId' do
440
+ consumer.subscribe("consume_test_topic")
441
+ wait_for_assignment(consumer)
442
+ expect(consumer.cluster_id).not_to be_empty
443
+ end
444
+ end
445
+
446
+ describe "#member_id" do
447
+ it 'should return the current MemberId' do
448
+ consumer.subscribe("consume_test_topic")
449
+ wait_for_assignment(consumer)
450
+ expect(consumer.member_id).to start_with('rdkafka-')
451
+ end
452
+ end
453
+
361
454
  describe "#poll" do
362
455
  it "should return nil if there is no subscription" do
363
456
  expect(consumer.poll(1000)).to be_nil
@@ -395,6 +488,68 @@ describe Rdkafka::Consumer do
395
488
  end
396
489
  end
397
490
 
491
+ describe "#poll with headers" do
492
+ it "should return message with headers" do
493
+ report = producer.produce(
494
+ topic: "consume_test_topic",
495
+ key: "key headers",
496
+ headers: { foo: 'bar' }
497
+ ).wait
498
+
499
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
500
+ expect(message).to be
501
+ expect(message.key).to eq('key headers')
502
+ expect(message.headers).to include(foo: 'bar')
503
+ end
504
+
505
+ it "should return message with no headers" do
506
+ report = producer.produce(
507
+ topic: "consume_test_topic",
508
+ key: "key no headers",
509
+ headers: nil
510
+ ).wait
511
+
512
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
513
+ expect(message).to be
514
+ expect(message.key).to eq('key no headers')
515
+ expect(message.headers).to be_empty
516
+ end
517
+
518
+ it "should raise an error when message headers aren't readable" do
519
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(any_args) { 1 }
520
+
521
+ report = producer.produce(
522
+ topic: "consume_test_topic",
523
+ key: "key err headers",
524
+ headers: nil
525
+ ).wait
526
+
527
+ expect {
528
+ wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
529
+ }.to raise_error do |err|
530
+ expect(err).to be_instance_of(Rdkafka::RdkafkaError)
531
+ expect(err.message).to start_with("Error reading message headers")
532
+ end
533
+ end
534
+
535
+ it "should raise an error when the first message header aren't readable" do
536
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_header_get_all).with(any_args) { 1 }
537
+
538
+ report = producer.produce(
539
+ topic: "consume_test_topic",
540
+ key: "key err headers",
541
+ headers: { foo: 'bar' }
542
+ ).wait
543
+
544
+ expect {
545
+ wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
546
+ }.to raise_error do |err|
547
+ expect(err).to be_instance_of(Rdkafka::RdkafkaError)
548
+ expect(err.message).to start_with("Error reading a message header at index 0")
549
+ end
550
+ end
551
+ end
552
+
398
553
  describe "#each" do
399
554
  it "should yield messages" do
400
555
  handles = []
@@ -417,4 +572,61 @@ describe Rdkafka::Consumer do
417
572
  end
418
573
  end
419
574
  end
575
+
576
+ describe "a rebalance listener" do
577
+ it "should get notifications" do
578
+ listener = Struct.new(:queue) do
579
+ def on_partitions_assigned(consumer, list)
580
+ collect(:assign, list)
581
+ end
582
+
583
+ def on_partitions_revoked(consumer, list)
584
+ collect(:revoke, list)
585
+ end
586
+
587
+ def collect(name, list)
588
+ partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
589
+ queue << ([name] + partitions)
590
+ end
591
+ end.new([])
592
+
593
+ notify_listener(listener)
594
+
595
+ expect(listener.queue).to eq([
596
+ [:assign, "consume_test_topic", 0, 1, 2],
597
+ [:revoke, "consume_test_topic", 0, 1, 2]
598
+ ])
599
+ end
600
+
601
+ it 'should handle callback exceptions' do
602
+ listener = Struct.new(:queue) do
603
+ def on_partitions_assigned(consumer, list)
604
+ queue << :assigned
605
+ raise 'boom'
606
+ end
607
+
608
+ def on_partitions_revoked(consumer, list)
609
+ queue << :revoked
610
+ raise 'boom'
611
+ end
612
+ end.new([])
613
+
614
+ notify_listener(listener)
615
+
616
+ expect(listener.queue).to eq([:assigned, :revoked])
617
+ end
618
+
619
+ def notify_listener(listener)
620
+ # 1. subscribe and poll
621
+ config.consumer_rebalance_listener = listener
622
+ consumer.subscribe("consume_test_topic")
623
+ wait_for_assignment(consumer)
624
+ consumer.poll(100)
625
+
626
+ # 2. unsubscribe
627
+ consumer.unsubscribe
628
+ wait_for_unassignment(consumer)
629
+ consumer.close
630
+ end
631
+ end
420
632
  end
@@ -217,6 +217,48 @@ describe Rdkafka::Producer do
217
217
  expect(message.payload).to be_nil
218
218
  end
219
219
 
220
+ it "should produce a message with headers" do
221
+ handle = producer.produce(
222
+ topic: "produce_test_topic",
223
+ payload: "payload headers",
224
+ key: "key headers",
225
+ headers: { foo: :bar, baz: :foobar }
226
+ )
227
+ report = handle.wait(5)
228
+
229
+ # Consume message and verify it's content
230
+ message = wait_for_message(
231
+ topic: "produce_test_topic",
232
+ delivery_report: report
233
+ )
234
+
235
+ expect(message.payload).to eq "payload headers"
236
+ expect(message.key).to eq "key headers"
237
+ expect(message.headers[:foo]).to eq "bar"
238
+ expect(message.headers[:baz]).to eq "foobar"
239
+ expect(message.headers[:foobar]).to be_nil
240
+ end
241
+
242
+ it "should produce a message with empty headers" do
243
+ handle = producer.produce(
244
+ topic: "produce_test_topic",
245
+ payload: "payload headers",
246
+ key: "key headers",
247
+ headers: {}
248
+ )
249
+ report = handle.wait(5)
250
+
251
+ # Consume message and verify it's content
252
+ message = wait_for_message(
253
+ topic: "produce_test_topic",
254
+ delivery_report: report
255
+ )
256
+
257
+ expect(message.payload).to eq "payload headers"
258
+ expect(message.key).to eq "key headers"
259
+ expect(message.headers).to be_empty
260
+ end
261
+
220
262
  it "should produce message that aren't waited for and not crash" do
221
263
  5.times do
222
264
  200.times do
@@ -61,3 +61,10 @@ def wait_for_assignment(consumer)
61
61
  sleep 1
62
62
  end
63
63
  end
64
+
65
+ def wait_for_unassignment(consumer)
66
+ 10.times do
67
+ break if consumer.assignment.empty?
68
+ sleep 1
69
+ end
70
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.2
4
+ version: 0.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-01-12 00:00:00.000000000 Z
11
+ date: 2019-04-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -131,6 +131,7 @@ files:
131
131
  - lib/rdkafka/bindings.rb
132
132
  - lib/rdkafka/config.rb
133
133
  - lib/rdkafka/consumer.rb
134
+ - lib/rdkafka/consumer/headers.rb
134
135
  - lib/rdkafka/consumer/message.rb
135
136
  - lib/rdkafka/consumer/partition.rb
136
137
  - lib/rdkafka/consumer/topic_partition_list.rb