rdkafka 0.15.0 → 0.16.0.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +4 -7
  4. data/.gitignore +2 -0
  5. data/.ruby-version +1 -1
  6. data/CHANGELOG.md +25 -1
  7. data/README.md +31 -9
  8. data/docker-compose.yml +1 -1
  9. data/ext/Rakefile +51 -26
  10. data/lib/rdkafka/abstract_handle.rb +44 -20
  11. data/lib/rdkafka/admin/acl_binding_result.rb +38 -24
  12. data/lib/rdkafka/admin/create_topic_report.rb +1 -1
  13. data/lib/rdkafka/admin/delete_groups_report.rb +1 -1
  14. data/lib/rdkafka/admin/delete_topic_report.rb +1 -1
  15. data/lib/rdkafka/admin.rb +15 -0
  16. data/lib/rdkafka/bindings.rb +44 -8
  17. data/lib/rdkafka/callbacks.rb +28 -12
  18. data/lib/rdkafka/config.rb +69 -15
  19. data/lib/rdkafka/consumer.rb +39 -17
  20. data/lib/rdkafka/helpers/oauth.rb +58 -0
  21. data/lib/rdkafka/native_kafka.rb +32 -19
  22. data/lib/rdkafka/producer/delivery_handle.rb +12 -1
  23. data/lib/rdkafka/producer/delivery_report.rb +16 -3
  24. data/lib/rdkafka/producer.rb +47 -10
  25. data/lib/rdkafka/version.rb +1 -1
  26. data/lib/rdkafka.rb +1 -0
  27. data/rdkafka.gemspec +2 -2
  28. data/spec/rdkafka/abstract_handle_spec.rb +34 -21
  29. data/spec/rdkafka/admin/delete_acl_report_spec.rb +1 -0
  30. data/spec/rdkafka/admin/describe_acl_report_spec.rb +1 -0
  31. data/spec/rdkafka/admin_spec.rb +53 -0
  32. data/spec/rdkafka/bindings_spec.rb +97 -0
  33. data/spec/rdkafka/config_spec.rb +53 -0
  34. data/spec/rdkafka/consumer_spec.rb +74 -0
  35. data/spec/rdkafka/native_kafka_spec.rb +8 -1
  36. data/spec/rdkafka/producer/delivery_report_spec.rb +4 -0
  37. data/spec/rdkafka/producer_spec.rb +69 -2
  38. data/spec/spec_helper.rb +16 -1
  39. data.tar.gz.sig +0 -0
  40. metadata +6 -4
  41. metadata.gz.sig +0 -0
@@ -156,7 +156,8 @@ module Rdkafka
156
156
  create_topic_handle[:response] = create_topic_results[0].result_error
157
157
  create_topic_handle[:error_string] = create_topic_results[0].error_string
158
158
  create_topic_handle[:result_name] = create_topic_results[0].result_name
159
- create_topic_handle[:pending] = false
159
+
160
+ create_topic_handle.unlock
160
161
  end
161
162
  end
162
163
 
@@ -173,7 +174,8 @@ module Rdkafka
173
174
  delete_group_handle[:response] = delete_group_results[0].result_error
174
175
  delete_group_handle[:error_string] = delete_group_results[0].error_string
175
176
  delete_group_handle[:result_name] = delete_group_results[0].result_name
176
- delete_group_handle[:pending] = false
177
+
178
+ delete_group_handle.unlock
177
179
  end
178
180
  end
179
181
 
@@ -190,7 +192,8 @@ module Rdkafka
190
192
  delete_topic_handle[:response] = delete_topic_results[0].result_error
191
193
  delete_topic_handle[:error_string] = delete_topic_results[0].error_string
192
194
  delete_topic_handle[:result_name] = delete_topic_results[0].result_name
193
- delete_topic_handle[:pending] = false
195
+
196
+ delete_topic_handle.unlock
194
197
  end
195
198
  end
196
199
 
@@ -207,7 +210,8 @@ module Rdkafka
207
210
  create_partitions_handle[:response] = create_partitions_results[0].result_error
208
211
  create_partitions_handle[:error_string] = create_partitions_results[0].error_string
209
212
  create_partitions_handle[:result_name] = create_partitions_results[0].result_name
210
- create_partitions_handle[:pending] = false
213
+
214
+ create_partitions_handle.unlock
211
215
  end
212
216
  end
213
217
 
@@ -223,7 +227,8 @@ module Rdkafka
223
227
  if create_acl_handle = Rdkafka::Admin::CreateAclHandle.remove(create_acl_handle_ptr.address)
224
228
  create_acl_handle[:response] = create_acl_results[0].result_error
225
229
  create_acl_handle[:response_string] = create_acl_results[0].error_string
226
- create_acl_handle[:pending] = false
230
+
231
+ create_acl_handle.unlock
227
232
  end
228
233
  end
229
234
 
@@ -239,11 +244,13 @@ module Rdkafka
239
244
  if delete_acl_handle = Rdkafka::Admin::DeleteAclHandle.remove(delete_acl_handle_ptr.address)
240
245
  delete_acl_handle[:response] = delete_acl_results[0].result_error
241
246
  delete_acl_handle[:response_string] = delete_acl_results[0].error_string
242
- delete_acl_handle[:pending] = false
247
+
243
248
  if delete_acl_results[0].result_error == 0
244
249
  delete_acl_handle[:matching_acls] = delete_acl_results[0].matching_acls
245
250
  delete_acl_handle[:matching_acls_count] = delete_acl_results[0].matching_acls_count
246
251
  end
252
+
253
+ delete_acl_handle.unlock
247
254
  end
248
255
  end
249
256
 
@@ -254,18 +261,18 @@ module Rdkafka
254
261
  if describe_acl_handle = Rdkafka::Admin::DescribeAclHandle.remove(describe_acl_handle_ptr.address)
255
262
  describe_acl_handle[:response] = describe_acl.result_error
256
263
  describe_acl_handle[:response_string] = describe_acl.error_string
257
- describe_acl_handle[:pending] = false
264
+
258
265
  if describe_acl.result_error == 0
259
266
  describe_acl_handle[:acls] = describe_acl.matching_acls
260
267
  describe_acl_handle[:acls_count] = describe_acl.matching_acls_count
261
268
  end
269
+
270
+ describe_acl_handle.unlock
262
271
  end
263
272
  end
264
-
265
273
  end
266
274
 
267
275
  # FFI Function used for Message Delivery callbacks
268
-
269
276
  DeliveryCallbackFunction = FFI::Function.new(
270
277
  :void, [:pointer, :pointer, :pointer]
271
278
  ) do |client_ptr, message_ptr, opaque_ptr|
@@ -285,15 +292,24 @@ module Rdkafka
285
292
  delivery_handle[:partition] = message[:partition]
286
293
  delivery_handle[:offset] = message[:offset]
287
294
  delivery_handle[:topic_name] = FFI::MemoryPointer.from_string(topic_name)
288
- delivery_handle[:pending] = false
289
295
 
290
296
  # Call delivery callback on opaque
291
297
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
292
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
298
+ opaque.call_delivery_callback(
299
+ Rdkafka::Producer::DeliveryReport.new(
300
+ message[:partition],
301
+ message[:offset],
302
+ topic_name,
303
+ message[:err],
304
+ delivery_handle.label
305
+ ),
306
+ delivery_handle
307
+ )
293
308
  end
309
+
310
+ delivery_handle.unlock
294
311
  end
295
312
  end
296
313
  end
297
-
298
314
  end
299
315
  end
@@ -15,13 +15,13 @@ module Rdkafka
15
15
  @@opaques = ObjectSpace::WeakMap.new
16
16
  # @private
17
17
  @@log_queue = Queue.new
18
-
19
- Thread.start do
20
- loop do
21
- severity, msg = @@log_queue.pop
22
- @@logger.add(severity, msg)
23
- end
24
- end
18
+ # We memoize thread on the first log flush
19
+ # This allows us also to restart logger thread on forks
20
+ @@log_thread = nil
21
+ # @private
22
+ @@log_mutex = Mutex.new
23
+ # @private
24
+ @@oauthbearer_token_refresh_callback = nil
25
25
 
26
26
  # Returns the current logger, by default this is a logger to stdout.
27
27
  #
@@ -30,6 +30,24 @@ module Rdkafka
30
30
  @@logger
31
31
  end
32
32
 
33
+ # Makes sure that there is a thread for consuming logs
34
+ # We do not spawn thread immediately and we need to check if it operates to support forking
35
+ def self.ensure_log_thread
36
+ return if @@log_thread && @@log_thread.alive?
37
+
38
+ @@log_mutex.synchronize do
39
+ # Restart if dead (fork, crash)
40
+ @@log_thread = nil if @@log_thread && !@@log_thread.alive?
41
+
42
+ @@log_thread ||= Thread.start do
43
+ loop do
44
+ severity, msg = @@log_queue.pop
45
+ @@logger.add(severity, msg)
46
+ end
47
+ end
48
+ end
49
+ end
50
+
33
51
  # Returns a queue whose contents will be passed to the configured logger. Each entry
34
52
  # should follow the format [Logger::Severity, String]. The benefit over calling the
35
53
  # logger directly is that this is safe to use from trap contexts.
@@ -87,6 +105,24 @@ module Rdkafka
87
105
  @@error_callback
88
106
  end
89
107
 
108
+ # Sets the SASL/OAUTHBEARER token refresh callback.
109
+ # This callback will be triggered when it is time to refresh the client's OAUTHBEARER token
110
+ #
111
+ # @param callback [Proc, #call] The callback
112
+ #
113
+ # @return [nil]
114
+ def self.oauthbearer_token_refresh_callback=(callback)
115
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call) || callback == nil
116
+ @@oauthbearer_token_refresh_callback = callback
117
+ end
118
+
119
+ # Returns the current oauthbearer_token_refresh_callback callback, by default this is nil.
120
+ #
121
+ # @return [Proc, nil]
122
+ def self.oauthbearer_token_refresh_callback
123
+ @@oauthbearer_token_refresh_callback
124
+ end
125
+
90
126
  # @private
91
127
  def self.opaques
92
128
  @@opaques
@@ -159,11 +195,13 @@ module Rdkafka
159
195
 
160
196
  # Creates a consumer with this configuration.
161
197
  #
198
+ # @param native_kafka_auto_start [Boolean] should the native kafka operations be started
199
+ # automatically. Defaults to true. Set to false only when doing complex initialization.
162
200
  # @return [Consumer] The created consumer
163
201
  #
164
202
  # @raise [ConfigError] When the configuration contains invalid options
165
203
  # @raise [ClientCreationError] When the native client cannot be created
166
- def consumer
204
+ def consumer(native_kafka_auto_start: true)
167
205
  opaque = Opaque.new
168
206
  config = native_config(opaque)
169
207
 
@@ -183,18 +221,21 @@ module Rdkafka
183
221
  Rdkafka::NativeKafka.new(
184
222
  kafka,
185
223
  run_polling_thread: false,
186
- opaque: opaque
224
+ opaque: opaque,
225
+ auto_start: native_kafka_auto_start
187
226
  )
188
227
  )
189
228
  end
190
229
 
191
230
  # Create a producer with this configuration.
192
231
  #
232
+ # @param native_kafka_auto_start [Boolean] should the native kafka operations be started
233
+ # automatically. Defaults to true. Set to false only when doing complex initialization.
193
234
  # @return [Producer] The created producer
194
235
  #
195
236
  # @raise [ConfigError] When the configuration contains invalid options
196
237
  # @raise [ClientCreationError] When the native client cannot be created
197
- def producer
238
+ def producer(native_kafka_auto_start: true)
198
239
  # Create opaque
199
240
  opaque = Opaque.new
200
241
  # Create Kafka config
@@ -203,11 +244,15 @@ module Rdkafka
203
244
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
204
245
  # Return producer with Kafka client
205
246
  partitioner_name = self[:partitioner] || self["partitioner"]
247
+
248
+ kafka = native_kafka(config, :rd_kafka_producer)
249
+
206
250
  Rdkafka::Producer.new(
207
251
  Rdkafka::NativeKafka.new(
208
- native_kafka(config, :rd_kafka_producer),
252
+ kafka,
209
253
  run_polling_thread: true,
210
- opaque: opaque
254
+ opaque: opaque,
255
+ auto_start: native_kafka_auto_start
211
256
  ),
212
257
  partitioner_name
213
258
  ).tap do |producer|
@@ -217,19 +262,25 @@ module Rdkafka
217
262
 
218
263
  # Creates an admin instance with this configuration.
219
264
  #
265
+ # @param native_kafka_auto_start [Boolean] should the native kafka operations be started
266
+ # automatically. Defaults to true. Set to false only when doing complex initialization.
220
267
  # @return [Admin] The created admin instance
221
268
  #
222
269
  # @raise [ConfigError] When the configuration contains invalid options
223
270
  # @raise [ClientCreationError] When the native client cannot be created
224
- def admin
271
+ def admin(native_kafka_auto_start: true)
225
272
  opaque = Opaque.new
226
273
  config = native_config(opaque)
227
274
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
275
+
276
+ kafka = native_kafka(config, :rd_kafka_producer)
277
+
228
278
  Rdkafka::Admin.new(
229
279
  Rdkafka::NativeKafka.new(
230
- native_kafka(config, :rd_kafka_producer),
280
+ kafka,
231
281
  run_polling_thread: true,
232
- opaque: opaque
282
+ opaque: opaque,
283
+ auto_start: native_kafka_auto_start
233
284
  )
234
285
  )
235
286
  end
@@ -283,6 +334,9 @@ module Rdkafka
283
334
 
284
335
  # Set error callback
285
336
  Rdkafka::Bindings.rd_kafka_conf_set_error_cb(config, Rdkafka::Bindings::ErrorCallback)
337
+
338
+ # Set oauth callback
339
+ Rdkafka::Bindings.rd_kafka_conf_set_oauthbearer_token_refresh_cb(config, Rdkafka::Bindings::OAuthbearerTokenRefreshCallback)
286
340
  end
287
341
  end
288
342
 
@@ -13,14 +13,17 @@ module Rdkafka
13
13
  class Consumer
14
14
  include Enumerable
15
15
  include Helpers::Time
16
+ include Helpers::OAuth
16
17
 
17
18
  # @private
18
19
  def initialize(native_kafka)
19
20
  @native_kafka = native_kafka
20
21
  end
21
22
 
22
- def finalizer
23
- ->(_) { close }
23
+ # Starts the native Kafka polling thread and kicks off the init polling
24
+ # @note Not needed to run unless explicit start was disabled
25
+ def start
26
+ @native_kafka.start
24
27
  end
25
28
 
26
29
  # @return [String] consumer name
@@ -30,6 +33,10 @@ module Rdkafka
30
33
  end
31
34
  end
32
35
 
36
+ def finalizer
37
+ ->(_) { close }
38
+ end
39
+
33
40
  # Close this consumer
34
41
  # @return [nil]
35
42
  def close
@@ -239,7 +246,7 @@ module Rdkafka
239
246
  # @param timeout_ms [Integer] The timeout for fetching this information.
240
247
  # @return [TopicPartitionList]
241
248
  # @raise [RdkafkaError] When getting the committed positions fails.
242
- def committed(list=nil, timeout_ms=1200)
249
+ def committed(list=nil, timeout_ms=2000)
243
250
  closed_consumer_check(__method__)
244
251
 
245
252
  if list.nil?
@@ -387,27 +394,26 @@ module Rdkafka
387
394
  def store_offset(message)
388
395
  closed_consumer_check(__method__)
389
396
 
390
- # rd_kafka_offset_store is one of the few calls that does not support
391
- # a string as the topic, so create a native topic for it.
392
- native_topic = @native_kafka.with_inner do |inner|
393
- Rdkafka::Bindings.rd_kafka_topic_new(
397
+ list = TopicPartitionList.new
398
+ list.add_topic_and_partitions_with_offsets(
399
+ message.topic,
400
+ message.partition => message.offset + 1
401
+ )
402
+
403
+ tpl = list.to_native_tpl
404
+
405
+ response = @native_kafka.with_inner do |inner|
406
+ Rdkafka::Bindings.rd_kafka_offsets_store(
394
407
  inner,
395
- message.topic,
396
- nil
408
+ tpl
397
409
  )
398
410
  end
399
- response = Rdkafka::Bindings.rd_kafka_offset_store(
400
- native_topic,
401
- message.partition,
402
- message.offset
403
- )
411
+
404
412
  if response != 0
405
413
  raise Rdkafka::RdkafkaError.new(response)
406
414
  end
407
415
  ensure
408
- if native_topic && !native_topic.null?
409
- Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
410
- end
416
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
411
417
  end
412
418
 
413
419
  # Seek to a particular message. The next poll on the topic/partition will return the
@@ -673,6 +679,22 @@ module Rdkafka
673
679
  end
674
680
  end
675
681
 
682
+ # Returns pointer to the consumer group metadata. It is used only in the context of
683
+ # exactly-once-semantics in transactions, this is why it is never remapped to Ruby
684
+ #
685
+ # This API is **not** usable by itself from Ruby
686
+ #
687
+ # @note This pointer **needs** to be removed with `#rd_kafka_consumer_group_metadata_destroy`
688
+ #
689
+ # @private
690
+ def consumer_group_metadata_pointer
691
+ closed_consumer_check(__method__)
692
+
693
+ @native_kafka.with_inner do |inner|
694
+ Bindings.rd_kafka_consumer_group_metadata(inner)
695
+ end
696
+ end
697
+
676
698
  private
677
699
 
678
700
  def closed_consumer_check(method)
@@ -0,0 +1,58 @@
1
+ module Rdkafka
2
+ module Helpers
3
+
4
+ module OAuth
5
+
6
+ # Set the OAuthBearer token
7
+ #
8
+ # @param token [String] the mandatory token value to set, often (but not necessarily) a JWS compact serialization as per https://tools.ietf.org/html/rfc7515#section-3.1.
9
+ # @param lifetime_ms [Integer] when the token expires, in terms of the number of milliseconds since the epoch. See https://currentmillis.com/.
10
+ # @param principal_name [String] the mandatory Kafka principal name associated with the token.
11
+ # @param extensions [Hash] optional SASL extensions key-value pairs to be communicated to the broker as additional key-value pairs during the initial client response as per https://tools.ietf.org/html/rfc7628#section-3.1.
12
+ # @return [Integer] 0 on success
13
+ def oauthbearer_set_token(token:, lifetime_ms:, principal_name:, extensions: nil)
14
+ error_buffer = FFI::MemoryPointer.from_string(" " * 256)
15
+
16
+ response = @native_kafka.with_inner do |inner|
17
+ Rdkafka::Bindings.rd_kafka_oauthbearer_set_token(
18
+ inner, token, lifetime_ms, principal_name,
19
+ flatten_extensions(extensions), extension_size(extensions), error_buffer, 256
20
+ )
21
+ end
22
+
23
+ return response if response.zero?
24
+
25
+ oauthbearer_set_token_failure("Failed to set token: #{error_buffer.read_string}")
26
+
27
+ response
28
+ end
29
+
30
+ # Marks failed oauth token acquire in librdkafka
31
+ #
32
+ # @param reason [String] human readable error reason for failing to acquire token
33
+ def oauthbearer_set_token_failure(reason)
34
+ @native_kafka.with_inner do |inner|
35
+ Rdkafka::Bindings.rd_kafka_oauthbearer_set_token_failure(
36
+ inner,
37
+ reason
38
+ )
39
+ end
40
+ end
41
+
42
+ private
43
+
44
+ # Flatten the extensions hash into a string according to the spec, https://datatracker.ietf.org/doc/html/rfc7628#section-3.1
45
+ def flatten_extensions(extensions)
46
+ return nil unless extensions
47
+ "\x01#{extensions.map { |e| e.join("=") }.join("\x01")}"
48
+ end
49
+
50
+ # extension_size is the number of keys + values which should be a non-negative even number
51
+ # https://github.com/confluentinc/librdkafka/blob/master/src/rdkafka_sasl_oauthbearer.c#L327-L347
52
+ def extension_size(extensions)
53
+ return 0 unless extensions
54
+ extensions.size * 2
55
+ end
56
+ end
57
+ end
58
+ end
@@ -4,7 +4,7 @@ module Rdkafka
4
4
  # @private
5
5
  # A wrapper around a native kafka that polls and cleanly exits
6
6
  class NativeKafka
7
- def initialize(inner, run_polling_thread:, opaque:)
7
+ def initialize(inner, run_polling_thread:, opaque:, auto_start: true)
8
8
  @inner = inner
9
9
  @opaque = opaque
10
10
  # Lock around external access
@@ -28,30 +28,43 @@ module Rdkafka
28
28
  # counter for operations in progress using inner
29
29
  @operations_in_progress = 0
30
30
 
31
- # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
32
- Rdkafka::Bindings.rd_kafka_poll(inner, 0)
31
+ @run_polling_thread = run_polling_thread
33
32
 
34
- if run_polling_thread
35
- # Start thread to poll client for delivery callbacks,
36
- # not used in consumer.
37
- @polling_thread = Thread.new do
38
- loop do
39
- @poll_mutex.synchronize do
40
- Rdkafka::Bindings.rd_kafka_poll(inner, 100)
41
- end
33
+ start if auto_start
42
34
 
43
- # Exit thread if closing and the poll queue is empty
44
- if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
45
- break
35
+ @closing = false
36
+ end
37
+
38
+ def start
39
+ synchronize do
40
+ return if @started
41
+
42
+ @started = true
43
+
44
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
45
+ Rdkafka::Bindings.rd_kafka_poll(@inner, 0)
46
+
47
+ if @run_polling_thread
48
+ # Start thread to poll client for delivery callbacks,
49
+ # not used in consumer.
50
+ @polling_thread = Thread.new do
51
+ loop do
52
+ @poll_mutex.synchronize do
53
+ Rdkafka::Bindings.rd_kafka_poll(@inner, 100)
54
+ end
55
+
56
+ # Exit thread if closing and the poll queue is empty
57
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(@inner) == 0
58
+ break
59
+ end
46
60
  end
47
61
  end
48
- end
49
62
 
50
- @polling_thread.abort_on_exception = true
51
- @polling_thread[:closing] = false
63
+ @polling_thread.name = "rdkafka.native_kafka##{Rdkafka::Bindings.rd_kafka_name(@inner).gsub('rdkafka', '')}"
64
+ @polling_thread.abort_on_exception = true
65
+ @polling_thread[:closing] = false
66
+ end
52
67
  end
53
-
54
- @closing = false
55
68
  end
56
69
 
57
70
  def with_inner
@@ -11,6 +11,9 @@ module Rdkafka
11
11
  :offset, :int64,
12
12
  :topic_name, :pointer
13
13
 
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
16
+
14
17
  # @return [String] the name of the operation (e.g. "delivery")
15
18
  def operation_name
16
19
  "delivery"
@@ -18,7 +21,15 @@ module Rdkafka
18
21
 
19
22
  # @return [DeliveryReport] a report on the delivery of the message
20
23
  def create_result
21
- DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
24
+ DeliveryReport.new(
25
+ self[:partition],
26
+ self[:offset],
27
+ # For part of errors, we will not get a topic name reference and in cases like this
28
+ # we should not return it
29
+ self[:topic_name].null? ? nil : self[:topic_name].read_string,
30
+ self[:response] != 0 ? RdkafkaError.new(self[:response]) : nil,
31
+ label
32
+ )
22
33
  end
23
34
  end
24
35
  end
@@ -12,21 +12,34 @@ module Rdkafka
12
12
  # @return [Integer]
13
13
  attr_reader :offset
14
14
 
15
- # The name of the topic this message was produced to.
16
- # @return [String]
15
+ # The name of the topic this message was produced to or nil in case of reports with errors
16
+ # where topic was not reached.
17
+ #
18
+ # @return [String, nil]
17
19
  attr_reader :topic_name
18
20
 
19
21
  # Error in case happen during produce.
20
22
  # @return [Integer]
21
23
  attr_reader :error
22
24
 
25
+ # @return [Object, nil] label set during message production or nil by default
26
+ attr_reader :label
27
+
28
+ # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
29
+ # where the topic name is under `#topic` method. That way we have a consistent name that
30
+ # is present in both places
31
+ #
32
+ # We do not remove the original `#topic_name` because of backwards compatibility
33
+ alias topic topic_name
34
+
23
35
  private
24
36
 
25
- def initialize(partition, offset, topic_name = nil, error = nil)
37
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
26
38
  @partition = partition
27
39
  @offset = offset
28
40
  @topic_name = topic_name
29
41
  @error = error
42
+ @label = label
30
43
  end
31
44
  end
32
45
  end