rdkafka 0.14.0 → 0.15.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +2 -3
  5. data/.ruby-version +1 -1
  6. data/CHANGELOG.md +25 -0
  7. data/README.md +44 -22
  8. data/docker-compose.yml +3 -1
  9. data/ext/Rakefile +43 -26
  10. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  11. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  12. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  13. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  14. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  15. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  16. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  17. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  18. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  19. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  20. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  21. data/lib/rdkafka/admin.rb +443 -0
  22. data/lib/rdkafka/bindings.rb +125 -2
  23. data/lib/rdkafka/callbacks.rb +196 -1
  24. data/lib/rdkafka/config.rb +24 -3
  25. data/lib/rdkafka/consumer/headers.rb +1 -1
  26. data/lib/rdkafka/consumer/topic_partition_list.rb +8 -7
  27. data/lib/rdkafka/consumer.rb +80 -29
  28. data/lib/rdkafka/producer/delivery_handle.rb +12 -1
  29. data/lib/rdkafka/producer/delivery_report.rb +16 -3
  30. data/lib/rdkafka/producer.rb +42 -12
  31. data/lib/rdkafka/version.rb +3 -3
  32. data/lib/rdkafka.rb +11 -0
  33. data/rdkafka.gemspec +2 -2
  34. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  35. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  36. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  37. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  38. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  39. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  40. data/spec/rdkafka/admin_spec.rb +204 -0
  41. data/spec/rdkafka/config_spec.rb +8 -0
  42. data/spec/rdkafka/consumer_spec.rb +89 -0
  43. data/spec/rdkafka/producer/delivery_report_spec.rb +4 -0
  44. data/spec/rdkafka/producer_spec.rb +26 -2
  45. data/spec/spec_helper.rb +3 -1
  46. data.tar.gz.sig +0 -0
  47. metadata +29 -4
  48. metadata.gz.sig +0 -0
@@ -23,6 +23,96 @@ module Rdkafka
23
23
  end
24
24
  end
25
25
 
26
+ class GroupResult
27
+ attr_reader :result_error, :error_string, :result_name
28
+ def initialize(group_result_pointer)
29
+ native_error = Rdkafka::Bindings.rd_kafka_group_result_error(group_result_pointer)
30
+
31
+ if native_error.null?
32
+ @result_error = 0
33
+ @error_string = FFI::Pointer::NULL
34
+ else
35
+ @result_error = native_error[:code]
36
+ @error_string = native_error[:errstr]
37
+ end
38
+
39
+ @result_name = Rdkafka::Bindings.rd_kafka_group_result_name(group_result_pointer)
40
+ end
41
+ def self.create_group_results_from_array(count, array_pointer)
42
+ (1..count).map do |index|
43
+ result_pointer = (array_pointer + (index - 1)).read_pointer
44
+ new(result_pointer)
45
+ end
46
+ end
47
+ end
48
+
49
+ # Extracts attributes of rd_kafka_acl_result_t
50
+ #
51
+ # @private
52
+ class CreateAclResult
53
+ attr_reader :result_error, :error_string
54
+
55
+ def initialize(acl_result_pointer)
56
+ rd_kafka_error_pointer = Bindings.rd_kafka_acl_result_error(acl_result_pointer)
57
+ @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
58
+ @error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
59
+ end
60
+
61
+ def self.create_acl_results_from_array(count, array_pointer)
62
+ (1..count).map do |index|
63
+ result_pointer = (array_pointer + (index - 1)).read_pointer
64
+ new(result_pointer)
65
+ end
66
+ end
67
+ end
68
+
69
+ # Extracts attributes of rd_kafka_DeleteAcls_result_response_t
70
+ #
71
+ # @private
72
+ class DeleteAclResult
73
+ attr_reader :result_error, :error_string, :matching_acls, :matching_acls_count
74
+
75
+ def initialize(acl_result_pointer)
76
+ @matching_acls=[]
77
+ rd_kafka_error_pointer = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_error(acl_result_pointer)
78
+ @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
79
+ @error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
80
+ if @result_error == 0
81
+ # Get the number of matching acls
82
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
83
+ @matching_acls = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_matching_acls(acl_result_pointer, pointer_to_size_t)
84
+ @matching_acls_count = pointer_to_size_t.read_int
85
+ end
86
+ end
87
+
88
+ def self.delete_acl_results_from_array(count, array_pointer)
89
+ (1..count).map do |index|
90
+ result_pointer = (array_pointer + (index - 1)).read_pointer
91
+ new(result_pointer)
92
+ end
93
+ end
94
+ end
95
+
96
+ # Extracts attributes of rd_kafka_DeleteAcls_result_response_t
97
+ #
98
+ # @private
99
+ class DescribeAclResult
100
+ attr_reader :result_error, :error_string, :matching_acls, :matching_acls_count
101
+
102
+ def initialize(event_ptr)
103
+ @matching_acls=[]
104
+ @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
105
+ @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
106
+ if @result_error == 0
107
+ acl_describe_result = Rdkafka::Bindings.rd_kafka_event_DescribeAcls_result(event_ptr)
108
+ # Get the number of matching acls
109
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
110
+ @matching_acls = Rdkafka::Bindings.rd_kafka_DescribeAcls_result_acls(acl_describe_result, pointer_to_size_t)
111
+ @matching_acls_count = pointer_to_size_t.read_int
112
+ end
113
+ end
114
+ end
115
+
26
116
  # FFI Function used for Create Topic and Delete Topic callbacks
27
117
  BackgroundEventCallbackFunction = FFI::Function.new(
28
118
  :void, [:pointer, :pointer, :pointer]
@@ -38,6 +128,16 @@ module Rdkafka
38
128
  process_create_topic(event_ptr)
39
129
  elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
40
130
  process_delete_topic(event_ptr)
131
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS_RESULT
132
+ process_create_partitions(event_ptr)
133
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATEACLS_RESULT
134
+ process_create_acl(event_ptr)
135
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEACLS_RESULT
136
+ process_delete_acl(event_ptr)
137
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
138
+ process_describe_acl(event_ptr)
139
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEGROUPS_RESULT
140
+ process_delete_groups(event_ptr)
41
141
  end
42
142
  end
43
143
 
@@ -60,6 +160,23 @@ module Rdkafka
60
160
  end
61
161
  end
62
162
 
163
+ def self.process_delete_groups(event_ptr)
164
+ delete_groups_result = Rdkafka::Bindings.rd_kafka_event_DeleteGroups_result(event_ptr)
165
+
166
+ # Get the number of delete group results
167
+ pointer_to_size_t = FFI::MemoryPointer.new(:size_t)
168
+ delete_group_result_array = Rdkafka::Bindings.rd_kafka_DeleteGroups_result_groups(delete_groups_result, pointer_to_size_t)
169
+ delete_group_results = GroupResult.create_group_results_from_array(pointer_to_size_t.read_int, delete_group_result_array) # TODO fix this
170
+ delete_group_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
171
+
172
+ if (delete_group_handle = Rdkafka::Admin::DeleteGroupsHandle.remove(delete_group_handle_ptr.address))
173
+ delete_group_handle[:response] = delete_group_results[0].result_error
174
+ delete_group_handle[:error_string] = delete_group_results[0].error_string
175
+ delete_group_handle[:result_name] = delete_group_results[0].result_name
176
+ delete_group_handle[:pending] = false
177
+ end
178
+ end
179
+
63
180
  def self.process_delete_topic(event_ptr)
64
181
  delete_topics_result = Rdkafka::Bindings.rd_kafka_event_DeleteTopics_result(event_ptr)
65
182
 
@@ -76,6 +193,74 @@ module Rdkafka
76
193
  delete_topic_handle[:pending] = false
77
194
  end
78
195
  end
196
+
197
+ def self.process_create_partitions(event_ptr)
198
+ create_partitionss_result = Rdkafka::Bindings.rd_kafka_event_CreatePartitions_result(event_ptr)
199
+
200
+ # Get the number of create topic results
201
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
202
+ create_partitions_result_array = Rdkafka::Bindings.rd_kafka_CreatePartitions_result_topics(create_partitionss_result, pointer_to_size_t)
203
+ create_partitions_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, create_partitions_result_array)
204
+ create_partitions_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
205
+
206
+ if create_partitions_handle = Rdkafka::Admin::CreatePartitionsHandle.remove(create_partitions_handle_ptr.address)
207
+ create_partitions_handle[:response] = create_partitions_results[0].result_error
208
+ create_partitions_handle[:error_string] = create_partitions_results[0].error_string
209
+ create_partitions_handle[:result_name] = create_partitions_results[0].result_name
210
+ create_partitions_handle[:pending] = false
211
+ end
212
+ end
213
+
214
+ def self.process_create_acl(event_ptr)
215
+ create_acls_result = Rdkafka::Bindings.rd_kafka_event_CreateAcls_result(event_ptr)
216
+
217
+ # Get the number of acl results
218
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
219
+ create_acl_result_array = Rdkafka::Bindings.rd_kafka_CreateAcls_result_acls(create_acls_result, pointer_to_size_t)
220
+ create_acl_results = CreateAclResult.create_acl_results_from_array(pointer_to_size_t.read_int, create_acl_result_array)
221
+ create_acl_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
222
+
223
+ if create_acl_handle = Rdkafka::Admin::CreateAclHandle.remove(create_acl_handle_ptr.address)
224
+ create_acl_handle[:response] = create_acl_results[0].result_error
225
+ create_acl_handle[:response_string] = create_acl_results[0].error_string
226
+ create_acl_handle[:pending] = false
227
+ end
228
+ end
229
+
230
+ def self.process_delete_acl(event_ptr)
231
+ delete_acls_result = Rdkafka::Bindings.rd_kafka_event_DeleteAcls_result(event_ptr)
232
+
233
+ # Get the number of acl results
234
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
235
+ delete_acl_result_responses = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_responses(delete_acls_result, pointer_to_size_t)
236
+ delete_acl_results = DeleteAclResult.delete_acl_results_from_array(pointer_to_size_t.read_int, delete_acl_result_responses)
237
+ delete_acl_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
238
+
239
+ if delete_acl_handle = Rdkafka::Admin::DeleteAclHandle.remove(delete_acl_handle_ptr.address)
240
+ delete_acl_handle[:response] = delete_acl_results[0].result_error
241
+ delete_acl_handle[:response_string] = delete_acl_results[0].error_string
242
+ delete_acl_handle[:pending] = false
243
+ if delete_acl_results[0].result_error == 0
244
+ delete_acl_handle[:matching_acls] = delete_acl_results[0].matching_acls
245
+ delete_acl_handle[:matching_acls_count] = delete_acl_results[0].matching_acls_count
246
+ end
247
+ end
248
+ end
249
+
250
+ def self.process_describe_acl(event_ptr)
251
+ describe_acl = DescribeAclResult.new(event_ptr)
252
+ describe_acl_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
253
+
254
+ if describe_acl_handle = Rdkafka::Admin::DescribeAclHandle.remove(describe_acl_handle_ptr.address)
255
+ describe_acl_handle[:response] = describe_acl.result_error
256
+ describe_acl_handle[:response_string] = describe_acl.error_string
257
+ describe_acl_handle[:pending] = false
258
+ if describe_acl.result_error == 0
259
+ describe_acl_handle[:acls] = describe_acl.matching_acls
260
+ describe_acl_handle[:acls_count] = describe_acl.matching_acls_count
261
+ end
262
+ end
263
+ end
79
264
  end
80
265
 
81
266
  # FFI Function used for Message Delivery callbacks
@@ -103,10 +288,20 @@ module Rdkafka
103
288
 
104
289
  # Call delivery callback on opaque
105
290
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
106
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
291
+ opaque.call_delivery_callback(
292
+ Rdkafka::Producer::DeliveryReport.new(
293
+ message[:partition],
294
+ message[:offset],
295
+ topic_name,
296
+ message[:err],
297
+ delivery_handle.label
298
+ ),
299
+ delivery_handle
300
+ )
107
301
  end
108
302
  end
109
303
  end
110
304
  end
305
+
111
306
  end
112
307
  end
@@ -112,6 +112,7 @@ module Rdkafka
112
112
  def initialize(config_hash = {})
113
113
  @config_hash = DEFAULT_CONFIG.merge(config_hash)
114
114
  @consumer_rebalance_listener = nil
115
+ @consumer_poll_set = true
115
116
  end
116
117
 
117
118
  # Set a config option.
@@ -140,6 +141,22 @@ module Rdkafka
140
141
  @consumer_rebalance_listener = listener
141
142
  end
142
143
 
144
+ # Should we use a single queue for the underlying consumer and events.
145
+ #
146
+ # This is an advanced API that allows for more granular control of the polling process.
147
+ # When this value is set to `false` (`true` by defualt), there will be two queues that need to
148
+ # be polled:
149
+ # - main librdkafka queue for events
150
+ # - consumer queue with messages and rebalances
151
+ #
152
+ # It is recommended to use the defaults and only set it to `false` in advance multi-threaded
153
+ # and complex cases where granular events handling control is needed.
154
+ #
155
+ # @param poll_set [Boolean]
156
+ def consumer_poll_set=(poll_set)
157
+ @consumer_poll_set = poll_set
158
+ end
159
+
143
160
  # Creates a consumer with this configuration.
144
161
  #
145
162
  # @return [Consumer] The created consumer
@@ -158,8 +175,8 @@ module Rdkafka
158
175
  # Create native client
159
176
  kafka = native_kafka(config, :rd_kafka_consumer)
160
177
 
161
- # Redirect the main queue to the consumer
162
- Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
178
+ # Redirect the main queue to the consumer queue
179
+ Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka) if @consumer_poll_set
163
180
 
164
181
  # Return consumer with Kafka client
165
182
  Rdkafka::Consumer.new(
@@ -187,7 +204,11 @@ module Rdkafka
187
204
  # Return producer with Kafka client
188
205
  partitioner_name = self[:partitioner] || self["partitioner"]
189
206
  Rdkafka::Producer.new(
190
- Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true, opaque: opaque),
207
+ Rdkafka::NativeKafka.new(
208
+ native_kafka(config, :rd_kafka_producer),
209
+ run_polling_thread: true,
210
+ opaque: opaque
211
+ ),
191
212
  partitioner_name
192
213
  ).tap do |producer|
193
214
  opaque.producer = producer
@@ -20,7 +20,7 @@ module Rdkafka
20
20
  #
21
21
  # @private
22
22
  #
23
- # @param [librdkakfa message] native_message
23
+ # @param [Rdkafka::Bindings::Message] native_message
24
24
  # @return [Hash<String, String>] headers Hash for the native_message
25
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
26
26
  def self.from_native(native_message)
@@ -36,6 +36,11 @@ module Rdkafka
36
36
  # Add a topic with optionally partitions to the list.
37
37
  # Calling this method multiple times for the same topic will overwrite the previous configuraton.
38
38
  #
39
+ # @param topic [String] The topic's name
40
+ # @param partitions [Array<Integer>, Range<Integer>, Integer] The topic's partitions or partition count
41
+ #
42
+ # @return [nil]
43
+ #
39
44
  # @example Add a topic with unassigned partitions
40
45
  # tpl.add_topic("topic")
41
46
  #
@@ -45,10 +50,6 @@ module Rdkafka
45
50
  # @example Add a topic with all topics up to a count
46
51
  # tpl.add_topic("topic", 9)
47
52
  #
48
- # @param topic [String] The topic's name
49
- # @param partitions [Array<Integer>, Range<Integer>, Integer] The topic's partitions or partition count
50
- #
51
- # @return [nil]
52
53
  def add_topic(topic, partitions=nil)
53
54
  if partitions.nil?
54
55
  @data[topic.to_s] = nil
@@ -90,11 +91,11 @@ module Rdkafka
90
91
 
91
92
  # Create a new topic partition list based of a native one.
92
93
  #
94
+ # @private
95
+ #
93
96
  # @param pointer [FFI::Pointer] Optional pointer to an existing native list. Its contents will be copied.
94
97
  #
95
98
  # @return [TopicPartitionList]
96
- #
97
- # @private
98
99
  def self.from_native_tpl(pointer)
99
100
  # Data to be moved into the tpl
100
101
  data = {}
@@ -127,8 +128,8 @@ module Rdkafka
127
128
  #
128
129
  # The pointer will be cleaned by `rd_kafka_topic_partition_list_destroy` when GC releases it.
129
130
  #
130
- # @return [FFI::Pointer]
131
131
  # @private
132
+ # @return [FFI::Pointer]
132
133
  def to_native_tpl
133
134
  tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
134
135
 
@@ -19,10 +19,6 @@ module Rdkafka
19
19
  @native_kafka = native_kafka
20
20
  end
21
21
 
22
- def finalizer
23
- ->(_) { close }
24
- end
25
-
26
22
  # @return [String] consumer name
27
23
  def name
28
24
  @name ||= @native_kafka.with_inner do |inner|
@@ -30,6 +26,10 @@ module Rdkafka
30
26
  end
31
27
  end
32
28
 
29
+ def finalizer
30
+ ->(_) { close }
31
+ end
32
+
33
33
  # Close this consumer
34
34
  # @return [nil]
35
35
  def close
@@ -221,6 +221,15 @@ module Rdkafka
221
221
  ptr.free unless ptr.nil?
222
222
  end
223
223
 
224
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
225
+ def assignment_lost?
226
+ closed_consumer_check(__method__)
227
+
228
+ @native_kafka.with_inner do |inner|
229
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
230
+ end
231
+ end
232
+
224
233
  # Return the current committed offset per partition for this consumer group.
225
234
  # The offset field of each requested partition will either be set to stored offset or to -1001
226
235
  # in case there was no stored offset for that partition.
@@ -230,7 +239,7 @@ module Rdkafka
230
239
  # @param timeout_ms [Integer] The timeout for fetching this information.
231
240
  # @return [TopicPartitionList]
232
241
  # @raise [RdkafkaError] When getting the committed positions fails.
233
- def committed(list=nil, timeout_ms=1200)
242
+ def committed(list=nil, timeout_ms=2000)
234
243
  closed_consumer_check(__method__)
235
244
 
236
245
  if list.nil?
@@ -259,9 +268,9 @@ module Rdkafka
259
268
  #
260
269
  # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
261
270
  #
262
- # @raise [RdkafkaError] When getting the positions fails.
263
- #
264
271
  # @return [TopicPartitionList]
272
+ #
273
+ # @raise [RdkafkaError] When getting the positions fails.
265
274
  def position(list=nil)
266
275
  if list.nil?
267
276
  list = assignment
@@ -289,7 +298,7 @@ module Rdkafka
289
298
  # @param timeout_ms [Integer] The timeout for querying the broker
290
299
  # @return [Integer] The low and high watermark
291
300
  # @raise [RdkafkaError] When querying the broker fails.
292
- def query_watermark_offsets(topic, partition, timeout_ms=200)
301
+ def query_watermark_offsets(topic, partition, timeout_ms=1000)
293
302
  closed_consumer_check(__method__)
294
303
 
295
304
  low = FFI::MemoryPointer.new(:int64, 1)
@@ -325,7 +334,7 @@ module Rdkafka
325
334
  # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
326
335
  # per partition
327
336
  # @raise [RdkafkaError] When querying the broker fails.
328
- def lag(topic_partition_list, watermark_timeout_ms=100)
337
+ def lag(topic_partition_list, watermark_timeout_ms=1000)
329
338
  out = {}
330
339
 
331
340
  topic_partition_list.to_h.each do |topic, partitions|
@@ -378,27 +387,26 @@ module Rdkafka
378
387
  def store_offset(message)
379
388
  closed_consumer_check(__method__)
380
389
 
381
- # rd_kafka_offset_store is one of the few calls that does not support
382
- # a string as the topic, so create a native topic for it.
383
- native_topic = @native_kafka.with_inner do |inner|
384
- Rdkafka::Bindings.rd_kafka_topic_new(
390
+ list = TopicPartitionList.new
391
+ list.add_topic_and_partitions_with_offsets(
392
+ message.topic,
393
+ message.partition => message.offset + 1
394
+ )
395
+
396
+ tpl = list.to_native_tpl
397
+
398
+ response = @native_kafka.with_inner do |inner|
399
+ Rdkafka::Bindings.rd_kafka_offsets_store(
385
400
  inner,
386
- message.topic,
387
- nil
401
+ tpl
388
402
  )
389
403
  end
390
- response = Rdkafka::Bindings.rd_kafka_offset_store(
391
- native_topic,
392
- message.partition,
393
- message.offset
394
- )
404
+
395
405
  if response != 0
396
406
  raise Rdkafka::RdkafkaError.new(response)
397
407
  end
398
408
  ensure
399
- if native_topic && !native_topic.null?
400
- Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
401
- end
409
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
402
410
  end
403
411
 
404
412
  # Seek to a particular message. The next poll on the topic/partition will return the
@@ -438,9 +446,9 @@ module Rdkafka
438
446
  #
439
447
  # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
440
448
  #
441
- # @raise [RdKafkaError] When the OffsetForTimes lookup fails
442
- #
443
449
  # @return [TopicPartitionList]
450
+ #
451
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
444
452
  def offsets_for_times(list, timeout_ms = 1000)
445
453
  closed_consumer_check(__method__)
446
454
 
@@ -531,15 +539,41 @@ module Rdkafka
531
539
  end
532
540
  end
533
541
 
542
+ # Polls the main rdkafka queue (not the consumer one). Do **NOT** use it if `consumer_poll_set`
543
+ # was set to `true`.
544
+ #
545
+ # Events will cause application-provided callbacks to be called.
546
+ #
547
+ # Events (in the context of the consumer):
548
+ # - error callbacks
549
+ # - stats callbacks
550
+ # - any other callbacks supported by librdkafka that are not part of the consumer_poll, that
551
+ # would have a callback configured and activated.
552
+ #
553
+ # This method needs to be called at regular intervals to serve any queued callbacks waiting to
554
+ # be called. When in use, does **NOT** replace `#poll` but needs to run complementary with it.
555
+ #
556
+ # @param timeout_ms [Integer] poll timeout. If set to 0 will run async, when set to -1 will
557
+ # block until any events available.
558
+ #
559
+ # @note This method technically should be called `#poll` and the current `#poll` should be
560
+ # called `#consumer_poll` though we keep the current naming convention to make it backward
561
+ # compatible.
562
+ def events_poll(timeout_ms = 0)
563
+ @native_kafka.with_inner do |inner|
564
+ Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
565
+ end
566
+ end
567
+
534
568
  # Poll for new messages and yield for each received one. Iteration
535
569
  # will end when the consumer is closed.
536
570
  #
537
571
  # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
538
572
  # reached, so you probably want to disable that when using this method of iteration.
539
573
  #
540
- # @raise [RdkafkaError] When polling fails
541
574
  # @yieldparam message [Message] Received message
542
575
  # @return [nil]
576
+ # @raise [RdkafkaError] When polling fails
543
577
  def each
544
578
  loop do
545
579
  message = poll(250)
@@ -594,14 +628,15 @@ module Rdkafka
594
628
  # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
595
629
  # @param timeout_ms [Integer] max time to wait for up to max_items
596
630
  #
597
- # @raise [RdkafkaError] When polling fails
598
- #
599
- # @yield [messages, pending_exception]
600
631
  # @yieldparam messages [Array] An array of received Message
601
632
  # @yieldparam pending_exception [Exception] normally nil, or an exception
633
+ #
634
+ # @yield [messages, pending_exception]
602
635
  # which will be propagated after processing of the partial batch is complete.
603
636
  #
604
637
  # @return [nil]
638
+ #
639
+ # @raise [RdkafkaError] When polling fails
605
640
  def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
606
641
  closed_consumer_check(__method__)
607
642
  slice = []
@@ -637,6 +672,22 @@ module Rdkafka
637
672
  end
638
673
  end
639
674
 
675
+ # Returns pointer to the consumer group metadata. It is used only in the context of
676
+ # exactly-once-semantics in transactions, this is why it is never remapped to Ruby
677
+ #
678
+ # This API is **not** usable by itself from Ruby
679
+ #
680
+ # @note This pointer **needs** to be removed with `#rd_kafka_consumer_group_metadata_destroy`
681
+ #
682
+ # @private
683
+ def consumer_group_metadata_pointer
684
+ closed_consumer_check(__method__)
685
+
686
+ @native_kafka.with_inner do |inner|
687
+ Bindings.rd_kafka_consumer_group_metadata(inner)
688
+ end
689
+ end
690
+
640
691
  private
641
692
 
642
693
  def closed_consumer_check(method)
@@ -11,6 +11,9 @@ module Rdkafka
11
11
  :offset, :int64,
12
12
  :topic_name, :pointer
13
13
 
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
16
+
14
17
  # @return [String] the name of the operation (e.g. "delivery")
15
18
  def operation_name
16
19
  "delivery"
@@ -18,7 +21,15 @@ module Rdkafka
18
21
 
19
22
  # @return [DeliveryReport] a report on the delivery of the message
20
23
  def create_result
21
- DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
24
+ DeliveryReport.new(
25
+ self[:partition],
26
+ self[:offset],
27
+ # For part of errors, we will not get a topic name reference and in cases like this
28
+ # we should not return it
29
+ self[:topic_name].null? ? nil : self[:topic_name].read_string,
30
+ self[:response] != 0 ? RdkafkaError.new(self[:response]) : nil,
31
+ label
32
+ )
22
33
  end
23
34
  end
24
35
  end
@@ -12,21 +12,34 @@ module Rdkafka
12
12
  # @return [Integer]
13
13
  attr_reader :offset
14
14
 
15
- # The name of the topic this message was produced to.
16
- # @return [String]
15
+ # The name of the topic this message was produced to or nil in case of reports with errors
16
+ # where topic was not reached.
17
+ #
18
+ # @return [String, nil]
17
19
  attr_reader :topic_name
18
20
 
19
21
  # Error in case happen during produce.
20
22
  # @return [Integer]
21
23
  attr_reader :error
22
24
 
25
+ # @return [Object, nil] label set during message production or nil by default
26
+ attr_reader :label
27
+
28
+ # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
29
+ # where the topic name is under `#topic` method. That way we have a consistent name that
30
+ # is present in both places
31
+ #
32
+ # We do not remove the original `#topic_name` because of backwards compatibility
33
+ alias topic topic_name
34
+
23
35
  private
24
36
 
25
- def initialize(partition, offset, topic_name = nil, error = nil)
37
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
26
38
  @partition = partition
27
39
  @offset = offset
28
40
  @topic_name = topic_name
29
41
  @error = error
42
+ @label = label
30
43
  end
31
44
  end
32
45
  end