rdkafka 0.13.0 → 0.15.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (75) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +57 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +155 -111
  10. data/{LICENSE → MIT-LICENSE} +2 -1
  11. data/README.md +60 -39
  12. data/certs/cert_chain.pem +26 -0
  13. data/docker-compose.yml +18 -15
  14. data/ext/README.md +1 -1
  15. data/ext/Rakefile +43 -26
  16. data/lib/rdkafka/abstract_handle.rb +40 -26
  17. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  18. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  19. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  20. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  21. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  22. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  23. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  24. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  25. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  26. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin.rb +449 -7
  29. data/lib/rdkafka/bindings.rb +133 -7
  30. data/lib/rdkafka/callbacks.rb +196 -1
  31. data/lib/rdkafka/config.rb +53 -19
  32. data/lib/rdkafka/consumer/headers.rb +2 -4
  33. data/lib/rdkafka/consumer/topic_partition_list.rb +11 -8
  34. data/lib/rdkafka/consumer.rb +164 -74
  35. data/lib/rdkafka/helpers/time.rb +14 -0
  36. data/lib/rdkafka/metadata.rb +22 -1
  37. data/lib/rdkafka/native_kafka.rb +6 -1
  38. data/lib/rdkafka/producer/delivery_handle.rb +12 -1
  39. data/lib/rdkafka/producer/delivery_report.rb +16 -3
  40. data/lib/rdkafka/producer.rb +121 -13
  41. data/lib/rdkafka/version.rb +3 -3
  42. data/lib/rdkafka.rb +21 -1
  43. data/rdkafka.gemspec +19 -5
  44. data/renovate.json +6 -0
  45. data/spec/rdkafka/abstract_handle_spec.rb +0 -2
  46. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  47. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  48. data/spec/rdkafka/admin/create_topic_handle_spec.rb +0 -2
  49. data/spec/rdkafka/admin/create_topic_report_spec.rb +0 -2
  50. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  51. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  52. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +0 -2
  53. data/spec/rdkafka/admin/delete_topic_report_spec.rb +0 -2
  54. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  55. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  56. data/spec/rdkafka/admin_spec.rb +205 -2
  57. data/spec/rdkafka/bindings_spec.rb +0 -1
  58. data/spec/rdkafka/callbacks_spec.rb +0 -2
  59. data/spec/rdkafka/config_spec.rb +8 -2
  60. data/spec/rdkafka/consumer/headers_spec.rb +0 -2
  61. data/spec/rdkafka/consumer/message_spec.rb +0 -2
  62. data/spec/rdkafka/consumer/partition_spec.rb +0 -2
  63. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +19 -2
  64. data/spec/rdkafka/consumer_spec.rb +232 -39
  65. data/spec/rdkafka/error_spec.rb +0 -2
  66. data/spec/rdkafka/metadata_spec.rb +2 -3
  67. data/spec/rdkafka/native_kafka_spec.rb +2 -3
  68. data/spec/rdkafka/producer/delivery_handle_spec.rb +0 -2
  69. data/spec/rdkafka/producer/delivery_report_spec.rb +4 -2
  70. data/spec/rdkafka/producer_spec.rb +183 -3
  71. data/spec/spec_helper.rb +3 -1
  72. data.tar.gz.sig +0 -0
  73. metadata +78 -14
  74. metadata.gz.sig +0 -0
  75. data/.semaphore/semaphore.yml +0 -27
@@ -12,12 +12,20 @@ module Rdkafka
12
12
  # `each_slice` to consume batches of messages.
13
13
  class Consumer
14
14
  include Enumerable
15
+ include Helpers::Time
15
16
 
16
17
  # @private
17
18
  def initialize(native_kafka)
18
19
  @native_kafka = native_kafka
19
20
  end
20
21
 
22
+ # @return [String] consumer name
23
+ def name
24
+ @name ||= @native_kafka.with_inner do |inner|
25
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
26
+ end
27
+ end
28
+
21
29
  def finalizer
22
30
  ->(_) { close }
23
31
  end
@@ -40,13 +48,11 @@ module Rdkafka
40
48
  @native_kafka.closed?
41
49
  end
42
50
 
43
- # Subscribe to one or more topics letting Kafka handle partition assignments.
51
+ # Subscribes to one or more topics letting Kafka handle partition assignments.
44
52
  #
45
53
  # @param topics [Array<String>] One or more topic names
46
- #
47
- # @raise [RdkafkaError] When subscribing fails
48
- #
49
54
  # @return [nil]
55
+ # @raise [RdkafkaError] When subscribing fails
50
56
  def subscribe(*topics)
51
57
  closed_consumer_check(__method__)
52
58
 
@@ -70,9 +76,8 @@ module Rdkafka
70
76
 
71
77
  # Unsubscribe from all subscribed topics.
72
78
  #
73
- # @raise [RdkafkaError] When unsubscribing fails
74
- #
75
79
  # @return [nil]
80
+ # @raise [RdkafkaError] When unsubscribing fails
76
81
  def unsubscribe
77
82
  closed_consumer_check(__method__)
78
83
 
@@ -87,10 +92,8 @@ module Rdkafka
87
92
  # Pause producing or consumption for the provided list of partitions
88
93
  #
89
94
  # @param list [TopicPartitionList] The topic with partitions to pause
90
- #
91
- # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
92
- #
93
95
  # @return [nil]
96
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
94
97
  def pause(list)
95
98
  closed_consumer_check(__method__)
96
99
 
@@ -114,13 +117,11 @@ module Rdkafka
114
117
  end
115
118
  end
116
119
 
117
- # Resume producing consumption for the provided list of partitions
120
+ # Resumes producing consumption for the provided list of partitions
118
121
  #
119
122
  # @param list [TopicPartitionList] The topic with partitions to pause
120
- #
121
- # @raise [RdkafkaError] When resume subscription fails.
122
- #
123
123
  # @return [nil]
124
+ # @raise [RdkafkaError] When resume subscription fails.
124
125
  def resume(list)
125
126
  closed_consumer_check(__method__)
126
127
 
@@ -142,11 +143,10 @@ module Rdkafka
142
143
  end
143
144
  end
144
145
 
145
- # Return the current subscription to topics and partitions
146
- #
147
- # @raise [RdkafkaError] When getting the subscription fails.
146
+ # Returns the current subscription to topics and partitions
148
147
  #
149
148
  # @return [TopicPartitionList]
149
+ # @raise [RdkafkaError] When getting the subscription fails.
150
150
  def subscription
151
151
  closed_consumer_check(__method__)
152
152
 
@@ -171,7 +171,6 @@ module Rdkafka
171
171
  # Atomic assignment of partitions to consume
172
172
  #
173
173
  # @param list [TopicPartitionList] The topic with partitions to assign
174
- #
175
174
  # @raise [RdkafkaError] When assigning fails
176
175
  def assign(list)
177
176
  closed_consumer_check(__method__)
@@ -196,9 +195,8 @@ module Rdkafka
196
195
 
197
196
  # Returns the current partition assignment.
198
197
  #
199
- # @raise [RdkafkaError] When getting the assignment fails.
200
- #
201
198
  # @return [TopicPartitionList]
199
+ # @raise [RdkafkaError] When getting the assignment fails.
202
200
  def assignment
203
201
  closed_consumer_check(__method__)
204
202
 
@@ -223,16 +221,25 @@ module Rdkafka
223
221
  ptr.free unless ptr.nil?
224
222
  end
225
223
 
224
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
225
+ def assignment_lost?
226
+ closed_consumer_check(__method__)
227
+
228
+ @native_kafka.with_inner do |inner|
229
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
230
+ end
231
+ end
232
+
226
233
  # Return the current committed offset per partition for this consumer group.
227
- # The offset field of each requested partition will either be set to stored offset or to -1001 in case there was no stored offset for that partition.
234
+ # The offset field of each requested partition will either be set to stored offset or to -1001
235
+ # in case there was no stored offset for that partition.
228
236
  #
229
- # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
237
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil
238
+ # to use the current subscription.
230
239
  # @param timeout_ms [Integer] The timeout for fetching this information.
231
- #
232
- # @raise [RdkafkaError] When getting the committed positions fails.
233
- #
234
240
  # @return [TopicPartitionList]
235
- def committed(list=nil, timeout_ms=1200)
241
+ # @raise [RdkafkaError] When getting the committed positions fails.
242
+ def committed(list=nil, timeout_ms=2000)
236
243
  closed_consumer_check(__method__)
237
244
 
238
245
  if list.nil?
@@ -256,16 +263,42 @@ module Rdkafka
256
263
  end
257
264
  end
258
265
 
266
+ # Return the current positions (offsets) for topics and partitions.
267
+ # The offset field of each requested partition will be set to the offset of the last consumed message + 1, or nil in case there was no previous message.
268
+ #
269
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
270
+ #
271
+ # @return [TopicPartitionList]
272
+ #
273
+ # @raise [RdkafkaError] When getting the positions fails.
274
+ def position(list=nil)
275
+ if list.nil?
276
+ list = assignment
277
+ elsif !list.is_a?(TopicPartitionList)
278
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
279
+ end
280
+
281
+ tpl = list.to_native_tpl
282
+
283
+ response = @native_kafka.with_inner do |inner|
284
+ Rdkafka::Bindings.rd_kafka_position(inner, tpl)
285
+ end
286
+
287
+ if response != 0
288
+ raise Rdkafka::RdkafkaError.new(response)
289
+ end
290
+
291
+ TopicPartitionList.from_native_tpl(tpl)
292
+ end
293
+
259
294
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
260
295
  #
261
296
  # @param topic [String] The topic to query
262
297
  # @param partition [Integer] The partition to query
263
298
  # @param timeout_ms [Integer] The timeout for querying the broker
264
- #
265
- # @raise [RdkafkaError] When querying the broker fails.
266
- #
267
299
  # @return [Integer] The low and high watermark
268
- def query_watermark_offsets(topic, partition, timeout_ms=200)
300
+ # @raise [RdkafkaError] When querying the broker fails.
301
+ def query_watermark_offsets(topic, partition, timeout_ms=1000)
269
302
  closed_consumer_check(__method__)
270
303
 
271
304
  low = FFI::MemoryPointer.new(:int64, 1)
@@ -298,11 +331,10 @@ module Rdkafka
298
331
  #
299
332
  # @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
300
333
  # @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
301
- #
334
+ # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
335
+ # per partition
302
336
  # @raise [RdkafkaError] When querying the broker fails.
303
- #
304
- # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
305
- def lag(topic_partition_list, watermark_timeout_ms=100)
337
+ def lag(topic_partition_list, watermark_timeout_ms=1000)
306
338
  out = {}
307
339
 
308
340
  topic_partition_list.to_h.each do |topic, partitions|
@@ -350,44 +382,39 @@ module Rdkafka
350
382
  # When using this `enable.auto.offset.store` should be set to `false` in the config.
351
383
  #
352
384
  # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
353
- #
354
- # @raise [RdkafkaError] When storing the offset fails
355
- #
356
385
  # @return [nil]
386
+ # @raise [RdkafkaError] When storing the offset fails
357
387
  def store_offset(message)
358
388
  closed_consumer_check(__method__)
359
389
 
360
- # rd_kafka_offset_store is one of the few calls that does not support
361
- # a string as the topic, so create a native topic for it.
362
- native_topic = @native_kafka.with_inner do |inner|
363
- Rdkafka::Bindings.rd_kafka_topic_new(
390
+ list = TopicPartitionList.new
391
+ list.add_topic_and_partitions_with_offsets(
392
+ message.topic,
393
+ message.partition => message.offset + 1
394
+ )
395
+
396
+ tpl = list.to_native_tpl
397
+
398
+ response = @native_kafka.with_inner do |inner|
399
+ Rdkafka::Bindings.rd_kafka_offsets_store(
364
400
  inner,
365
- message.topic,
366
- nil
401
+ tpl
367
402
  )
368
403
  end
369
- response = Rdkafka::Bindings.rd_kafka_offset_store(
370
- native_topic,
371
- message.partition,
372
- message.offset
373
- )
404
+
374
405
  if response != 0
375
406
  raise Rdkafka::RdkafkaError.new(response)
376
407
  end
377
408
  ensure
378
- if native_topic && !native_topic.null?
379
- Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
380
- end
409
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
381
410
  end
382
411
 
383
412
  # Seek to a particular message. The next poll on the topic/partition will return the
384
413
  # message at the given offset.
385
414
  #
386
415
  # @param message [Rdkafka::Consumer::Message] The message to which to seek
387
- #
388
- # @raise [RdkafkaError] When seeking fails
389
- #
390
416
  # @return [nil]
417
+ # @raise [RdkafkaError] When seeking fails
391
418
  def seek(message)
392
419
  closed_consumer_check(__method__)
393
420
 
@@ -415,6 +442,39 @@ module Rdkafka
415
442
  end
416
443
  end
417
444
 
445
+ # Lookup offset for the given partitions by timestamp.
446
+ #
447
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
448
+ #
449
+ # @return [TopicPartitionList]
450
+ #
451
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
452
+ def offsets_for_times(list, timeout_ms = 1000)
453
+ closed_consumer_check(__method__)
454
+
455
+ if !list.is_a?(TopicPartitionList)
456
+ raise TypeError.new("list has to be a TopicPartitionList")
457
+ end
458
+
459
+ tpl = list.to_native_tpl
460
+
461
+ response = @native_kafka.with_inner do |inner|
462
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
463
+ inner,
464
+ tpl,
465
+ timeout_ms # timeout
466
+ )
467
+ end
468
+
469
+ if response != 0
470
+ raise Rdkafka::RdkafkaError.new(response)
471
+ end
472
+
473
+ TopicPartitionList.from_native_tpl(tpl)
474
+ ensure
475
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
476
+ end
477
+
418
478
  # Manually commit the current offsets of this consumer.
419
479
  #
420
480
  # To use this set `enable.auto.commit`to `false` to disable automatic triggering
@@ -426,10 +486,8 @@ module Rdkafka
426
486
  #
427
487
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
428
488
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
429
- #
430
- # @raise [RdkafkaError] When committing fails
431
- #
432
489
  # @return [nil]
490
+ # @raise [RdkafkaError] When committing fails
433
491
  def commit(list=nil, async=false)
434
492
  closed_consumer_check(__method__)
435
493
 
@@ -454,10 +512,8 @@ module Rdkafka
454
512
  # Poll for the next message on one of the subscribed topics
455
513
  #
456
514
  # @param timeout_ms [Integer] Timeout of this poll
457
- #
458
- # @raise [RdkafkaError] When polling fails
459
- #
460
515
  # @return [Message, nil] A message or nil if there was no new message within the timeout
516
+ # @raise [RdkafkaError] When polling fails
461
517
  def poll(timeout_ms)
462
518
  closed_consumer_check(__method__)
463
519
 
@@ -483,18 +539,41 @@ module Rdkafka
483
539
  end
484
540
  end
485
541
 
542
+ # Polls the main rdkafka queue (not the consumer one). Do **NOT** use it if `consumer_poll_set`
543
+ # was set to `true`.
544
+ #
545
+ # Events will cause application-provided callbacks to be called.
546
+ #
547
+ # Events (in the context of the consumer):
548
+ # - error callbacks
549
+ # - stats callbacks
550
+ # - any other callbacks supported by librdkafka that are not part of the consumer_poll, that
551
+ # would have a callback configured and activated.
552
+ #
553
+ # This method needs to be called at regular intervals to serve any queued callbacks waiting to
554
+ # be called. When in use, does **NOT** replace `#poll` but needs to run complementary with it.
555
+ #
556
+ # @param timeout_ms [Integer] poll timeout. If set to 0 will run async, when set to -1 will
557
+ # block until any events available.
558
+ #
559
+ # @note This method technically should be called `#poll` and the current `#poll` should be
560
+ # called `#consumer_poll` though we keep the current naming convention to make it backward
561
+ # compatible.
562
+ def events_poll(timeout_ms = 0)
563
+ @native_kafka.with_inner do |inner|
564
+ Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
565
+ end
566
+ end
567
+
486
568
  # Poll for new messages and yield for each received one. Iteration
487
569
  # will end when the consumer is closed.
488
570
  #
489
- # If `enable.partition.eof` is turned on in the config this will raise an
490
- # error when an eof is reached, so you probably want to disable that when
491
- # using this method of iteration.
492
- #
493
- # @raise [RdkafkaError] When polling fails
571
+ # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
572
+ # reached, so you probably want to disable that when using this method of iteration.
494
573
  #
495
574
  # @yieldparam message [Message] Received message
496
- #
497
575
  # @return [nil]
576
+ # @raise [RdkafkaError] When polling fails
498
577
  def each
499
578
  loop do
500
579
  message = poll(250)
@@ -546,19 +625,18 @@ module Rdkafka
546
625
  # that you may or may not see again.
547
626
  #
548
627
  # @param max_items [Integer] Maximum size of the yielded array of messages
549
- #
550
628
  # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
551
- #
552
629
  # @param timeout_ms [Integer] max time to wait for up to max_items
553
630
  #
554
- # @raise [RdkafkaError] When polling fails
555
- #
556
- # @yield [messages, pending_exception]
557
631
  # @yieldparam messages [Array] An array of received Message
558
632
  # @yieldparam pending_exception [Exception] normally nil, or an exception
633
+ #
634
+ # @yield [messages, pending_exception]
559
635
  # which will be propagated after processing of the partial batch is complete.
560
636
  #
561
637
  # @return [nil]
638
+ #
639
+ # @raise [RdkafkaError] When polling fails
562
640
  def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
563
641
  closed_consumer_check(__method__)
564
642
  slice = []
@@ -594,12 +672,24 @@ module Rdkafka
594
672
  end
595
673
  end
596
674
 
597
- private
598
- def monotonic_now
599
- # needed because Time.now can go backwards
600
- Process.clock_gettime(Process::CLOCK_MONOTONIC)
675
+ # Returns pointer to the consumer group metadata. It is used only in the context of
676
+ # exactly-once-semantics in transactions, this is why it is never remapped to Ruby
677
+ #
678
+ # This API is **not** usable by itself from Ruby
679
+ #
680
+ # @note This pointer **needs** to be removed with `#rd_kafka_consumer_group_metadata_destroy`
681
+ #
682
+ # @private
683
+ def consumer_group_metadata_pointer
684
+ closed_consumer_check(__method__)
685
+
686
+ @native_kafka.with_inner do |inner|
687
+ Bindings.rd_kafka_consumer_group_metadata(inner)
688
+ end
601
689
  end
602
690
 
691
+ private
692
+
603
693
  def closed_consumer_check(method)
604
694
  raise Rdkafka::ClosedConsumerError.new(method) if closed?
605
695
  end
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Namespace for some small utilities used in multiple components
5
+ module Helpers
6
+ # Time related methods used across Karafka
7
+ module Time
8
+ # @return [Float] current monotonic time in seconds with microsecond precision
9
+ def monotonic_now
10
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ end
12
+ end
13
+ end
14
+ end
@@ -4,7 +4,18 @@ module Rdkafka
4
4
  class Metadata
5
5
  attr_reader :brokers, :topics
6
6
 
7
- def initialize(native_client, topic_name = nil, timeout_ms = 250)
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
8
19
  native_topic = if topic_name
9
20
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
10
21
  end
@@ -22,6 +33,16 @@ module Rdkafka
22
33
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
23
34
 
24
35
  metadata_from_native(ptr.read_pointer)
36
+ rescue ::Rdkafka::RdkafkaError => e
37
+ raise unless RETRIED_ERRORS.include?(e.code)
38
+ raise if attempt > 10
39
+
40
+ backoff_factor = 2**attempt
41
+ timeout = backoff_factor * 0.1
42
+
43
+ sleep(timeout)
44
+
45
+ retry
25
46
  ensure
26
47
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
27
48
  Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
@@ -4,8 +4,9 @@ module Rdkafka
4
4
  # @private
5
5
  # A wrapper around a native kafka that polls and cleanly exits
6
6
  class NativeKafka
7
- def initialize(inner, run_polling_thread:)
7
+ def initialize(inner, run_polling_thread:, opaque:)
8
8
  @inner = inner
9
+ @opaque = opaque
9
10
  # Lock around external access
10
11
  @access_mutex = Mutex.new
11
12
  # Lock around internal polling
@@ -27,6 +28,9 @@ module Rdkafka
27
28
  # counter for operations in progress using inner
28
29
  @operations_in_progress = 0
29
30
 
31
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
32
+ Rdkafka::Bindings.rd_kafka_poll(inner, 0)
33
+
30
34
  if run_polling_thread
31
35
  # Start thread to poll client for delivery callbacks,
32
36
  # not used in consumer.
@@ -109,6 +113,7 @@ module Rdkafka
109
113
 
110
114
  Rdkafka::Bindings.rd_kafka_destroy(@inner)
111
115
  @inner = nil
116
+ @opaque = nil
112
117
  end
113
118
  end
114
119
  end
@@ -11,6 +11,9 @@ module Rdkafka
11
11
  :offset, :int64,
12
12
  :topic_name, :pointer
13
13
 
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
16
+
14
17
  # @return [String] the name of the operation (e.g. "delivery")
15
18
  def operation_name
16
19
  "delivery"
@@ -18,7 +21,15 @@ module Rdkafka
18
21
 
19
22
  # @return [DeliveryReport] a report on the delivery of the message
20
23
  def create_result
21
- DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
24
+ DeliveryReport.new(
25
+ self[:partition],
26
+ self[:offset],
27
+ # For part of errors, we will not get a topic name reference and in cases like this
28
+ # we should not return it
29
+ self[:topic_name].null? ? nil : self[:topic_name].read_string,
30
+ self[:response] != 0 ? RdkafkaError.new(self[:response]) : nil,
31
+ label
32
+ )
22
33
  end
23
34
  end
24
35
  end
@@ -12,21 +12,34 @@ module Rdkafka
12
12
  # @return [Integer]
13
13
  attr_reader :offset
14
14
 
15
- # The name of the topic this message was produced to.
16
- # @return [String]
15
+ # The name of the topic this message was produced to or nil in case of reports with errors
16
+ # where topic was not reached.
17
+ #
18
+ # @return [String, nil]
17
19
  attr_reader :topic_name
18
20
 
19
21
  # Error in case happen during produce.
20
22
  # @return [Integer]
21
23
  attr_reader :error
22
24
 
25
+ # @return [Object, nil] label set during message production or nil by default
26
+ attr_reader :label
27
+
28
+ # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
29
+ # where the topic name is under `#topic` method. That way we have a consistent name that
30
+ # is present in both places
31
+ #
32
+ # We do not remove the original `#topic_name` because of backwards compatibility
33
+ alias topic topic_name
34
+
23
35
  private
24
36
 
25
- def initialize(partition, offset, topic_name = nil, error = nil)
37
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
26
38
  @partition = partition
27
39
  @offset = offset
28
40
  @topic_name = topic_name
29
41
  @error = error
42
+ @label = label
30
43
  end
31
44
  end
32
45
  end