rdkafka 0.13.1 → 0.14.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +1 -0
  3. data/.github/workflows/ci.yml +58 -0
  4. data/.gitignore +4 -0
  5. data/.rspec +1 -0
  6. data/.ruby-gemset +1 -0
  7. data/.ruby-version +1 -0
  8. data/CHANGELOG.md +39 -24
  9. data/{LICENSE → MIT-LICENSE} +2 -1
  10. data/README.md +19 -20
  11. data/certs/cert_chain.pem +26 -0
  12. data/docker-compose.yml +16 -15
  13. data/ext/README.md +1 -1
  14. data/ext/Rakefile +26 -53
  15. data/lib/rdkafka/abstract_handle.rb +37 -24
  16. data/lib/rdkafka/admin.rb +6 -7
  17. data/lib/rdkafka/bindings.rb +8 -5
  18. data/lib/rdkafka/config.rb +30 -17
  19. data/lib/rdkafka/consumer/headers.rb +2 -4
  20. data/lib/rdkafka/consumer/topic_partition_list.rb +3 -1
  21. data/lib/rdkafka/consumer.rb +92 -53
  22. data/lib/rdkafka/helpers/time.rb +14 -0
  23. data/lib/rdkafka/metadata.rb +22 -1
  24. data/lib/rdkafka/native_kafka.rb +6 -1
  25. data/lib/rdkafka/producer.rb +85 -7
  26. data/lib/rdkafka/version.rb +3 -3
  27. data/lib/rdkafka.rb +10 -1
  28. data/rdkafka.gemspec +17 -3
  29. data/renovate.json +6 -0
  30. data/spec/rdkafka/abstract_handle_spec.rb +0 -2
  31. data/spec/rdkafka/admin/create_topic_handle_spec.rb +0 -2
  32. data/spec/rdkafka/admin/create_topic_report_spec.rb +0 -2
  33. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +0 -2
  34. data/spec/rdkafka/admin/delete_topic_report_spec.rb +0 -2
  35. data/spec/rdkafka/admin_spec.rb +1 -2
  36. data/spec/rdkafka/bindings_spec.rb +0 -1
  37. data/spec/rdkafka/callbacks_spec.rb +0 -2
  38. data/spec/rdkafka/config_spec.rb +0 -2
  39. data/spec/rdkafka/consumer/headers_spec.rb +0 -2
  40. data/spec/rdkafka/consumer/message_spec.rb +0 -2
  41. data/spec/rdkafka/consumer/partition_spec.rb +0 -2
  42. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +19 -2
  43. data/spec/rdkafka/consumer_spec.rb +143 -39
  44. data/spec/rdkafka/error_spec.rb +0 -2
  45. data/spec/rdkafka/metadata_spec.rb +2 -3
  46. data/spec/rdkafka/native_kafka_spec.rb +2 -3
  47. data/spec/rdkafka/producer/delivery_handle_spec.rb +0 -2
  48. data/spec/rdkafka/producer/delivery_report_spec.rb +0 -2
  49. data/spec/rdkafka/producer_spec.rb +157 -1
  50. data.tar.gz.sig +0 -0
  51. metadata +51 -13
  52. metadata.gz.sig +0 -0
  53. data/.semaphore/semaphore.yml +0 -27
  54. data/dist/librdkafka_2.0.2.tar.gz +0 -0
@@ -1,9 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "ffi"
4
- require "json"
5
- require "logger"
6
-
7
3
  module Rdkafka
8
4
  # @private
9
5
  module Bindings
@@ -35,12 +31,13 @@ module Rdkafka
35
31
 
36
32
  # Polling
37
33
 
38
- attach_function :rd_kafka_flush, [:pointer, :int], :void, blocking: true
34
+ attach_function :rd_kafka_flush, [:pointer, :int], :int, blocking: true
39
35
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
40
36
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
41
37
 
42
38
  # Metadata
43
39
 
40
+ attach_function :rd_kafka_name, [:pointer], :string, blocking: true
44
41
  attach_function :rd_kafka_memberid, [:pointer], :string, blocking: true
45
42
  attach_function :rd_kafka_clusterid, [:pointer], :string, blocking: true
46
43
  attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int, blocking: true
@@ -157,6 +154,7 @@ module Rdkafka
157
154
  ) do |_client_prr, err_code, reason, _opaque|
158
155
  if Rdkafka::Config.error_callback
159
156
  error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason)
157
+ error.set_backtrace(caller)
160
158
  Rdkafka::Config.error_callback.call(error)
161
159
  end
162
160
  end
@@ -190,6 +188,8 @@ module Rdkafka
190
188
  attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
191
189
  attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
192
190
  attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
191
+ attach_function :rd_kafka_offsets_for_times, [:pointer, :pointer, :int], :int, blocking: true
192
+ attach_function :rd_kafka_position, [:pointer, :pointer], :int, blocking: true
193
193
 
194
194
  # Headers
195
195
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -251,10 +251,13 @@ module Rdkafka
251
251
  RD_KAFKA_VTYPE_TIMESTAMP = 8
252
252
  RD_KAFKA_VTYPE_HEADER = 9
253
253
  RD_KAFKA_VTYPE_HEADERS = 10
254
+ RD_KAFKA_PURGE_F_QUEUE = 1
255
+ RD_KAFKA_PURGE_F_INFLIGHT = 2
254
256
 
255
257
  RD_KAFKA_MSG_F_COPY = 0x2
256
258
 
257
259
  attach_function :rd_kafka_producev, [:pointer, :varargs], :int, blocking: true
260
+ attach_function :rd_kafka_purge, [:pointer, :int], :int, blocking: true
258
261
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
259
262
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
260
263
 
@@ -1,11 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "logger"
4
-
5
3
  module Rdkafka
6
4
  # Configuration for a Kafka consumer or producer. You can create an instance and use
7
5
  # the consumer and producer methods to create a client. Documentation of the available
8
- # configuration options is available on https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md.
6
+ # configuration options is available on https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md.
9
7
  class Config
10
8
  # @private
11
9
  @@logger = Logger.new(STDOUT)
@@ -14,7 +12,7 @@ module Rdkafka
14
12
  # @private
15
13
  @@error_callback = nil
16
14
  # @private
17
- @@opaques = {}
15
+ @@opaques = ObjectSpace::WeakMap.new
18
16
  # @private
19
17
  @@log_queue = Queue.new
20
18
 
@@ -53,13 +51,13 @@ module Rdkafka
53
51
 
54
52
  # Set a callback that will be called every time the underlying client emits statistics.
55
53
  # You can configure if and how often this happens using `statistics.interval.ms`.
56
- # The callback is called with a hash that's documented here: https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
54
+ # The callback is called with a hash that's documented here: https://github.com/confluentinc/librdkafka/blob/master/STATISTICS.md
57
55
  #
58
56
  # @param callback [Proc, #call] The callback
59
57
  #
60
58
  # @return [nil]
61
59
  def self.statistics_callback=(callback)
62
- raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
60
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call) || callback == nil
63
61
  @@statistics_callback = callback
64
62
  end
65
63
 
@@ -142,12 +140,12 @@ module Rdkafka
142
140
  @consumer_rebalance_listener = listener
143
141
  end
144
142
 
145
- # Create a consumer with this configuration.
143
+ # Creates a consumer with this configuration.
144
+ #
145
+ # @return [Consumer] The created consumer
146
146
  #
147
147
  # @raise [ConfigError] When the configuration contains invalid options
148
148
  # @raise [ClientCreationError] When the native client cannot be created
149
- #
150
- # @return [Consumer] The created consumer
151
149
  def consumer
152
150
  opaque = Opaque.new
153
151
  config = native_config(opaque)
@@ -164,15 +162,21 @@ module Rdkafka
164
162
  Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
165
163
 
166
164
  # Return consumer with Kafka client
167
- Rdkafka::Consumer.new(Rdkafka::NativeKafka.new(kafka, run_polling_thread: false))
165
+ Rdkafka::Consumer.new(
166
+ Rdkafka::NativeKafka.new(
167
+ kafka,
168
+ run_polling_thread: false,
169
+ opaque: opaque
170
+ )
171
+ )
168
172
  end
169
173
 
170
174
  # Create a producer with this configuration.
171
175
  #
176
+ # @return [Producer] The created producer
177
+ #
172
178
  # @raise [ConfigError] When the configuration contains invalid options
173
179
  # @raise [ClientCreationError] When the native client cannot be created
174
- #
175
- # @return [Producer] The created producer
176
180
  def producer
177
181
  # Create opaque
178
182
  opaque = Opaque.new
@@ -182,22 +186,31 @@ module Rdkafka
182
186
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
183
187
  # Return producer with Kafka client
184
188
  partitioner_name = self[:partitioner] || self["partitioner"]
185
- Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true), partitioner_name).tap do |producer|
189
+ Rdkafka::Producer.new(
190
+ Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true, opaque: opaque),
191
+ partitioner_name
192
+ ).tap do |producer|
186
193
  opaque.producer = producer
187
194
  end
188
195
  end
189
196
 
190
- # Create an admin instance with this configuration.
197
+ # Creates an admin instance with this configuration.
198
+ #
199
+ # @return [Admin] The created admin instance
191
200
  #
192
201
  # @raise [ConfigError] When the configuration contains invalid options
193
202
  # @raise [ClientCreationError] When the native client cannot be created
194
- #
195
- # @return [Admin] The created admin instance
196
203
  def admin
197
204
  opaque = Opaque.new
198
205
  config = native_config(opaque)
199
206
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
200
- Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true))
207
+ Rdkafka::Admin.new(
208
+ Rdkafka::NativeKafka.new(
209
+ native_kafka(config, :rd_kafka_producer),
210
+ run_polling_thread: true,
211
+ opaque: opaque
212
+ )
213
+ )
201
214
  end
202
215
 
203
216
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -18,13 +18,11 @@ module Rdkafka
18
18
 
19
19
  # Reads a librdkafka native message's headers and returns them as a Ruby Hash
20
20
  #
21
- # @param [librdkakfa message] native_message
21
+ # @private
22
22
  #
23
+ # @param [librdkakfa message] native_message
23
24
  # @return [Hash<String, String>] headers Hash for the native_message
24
- #
25
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
26
- #
27
- # @private
28
26
  def self.from_native(native_message)
29
27
  headers_ptrptr = FFI::MemoryPointer.new(:pointer)
30
28
  err = Rdkafka::Bindings.rd_kafka_message_headers(native_message, headers_ptrptr)
@@ -142,11 +142,13 @@ module Rdkafka
142
142
  )
143
143
 
144
144
  if p.offset
145
+ offset = p.offset.is_a?(Time) ? p.offset.to_f * 1_000 : p.offset
146
+
145
147
  Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
146
148
  tpl,
147
149
  topic,
148
150
  p.partition,
149
- p.offset
151
+ offset
150
152
  )
151
153
  end
152
154
  end
@@ -12,6 +12,7 @@ module Rdkafka
12
12
  # `each_slice` to consume batches of messages.
13
13
  class Consumer
14
14
  include Enumerable
15
+ include Helpers::Time
15
16
 
16
17
  # @private
17
18
  def initialize(native_kafka)
@@ -22,6 +23,13 @@ module Rdkafka
22
23
  ->(_) { close }
23
24
  end
24
25
 
26
+ # @return [String] consumer name
27
+ def name
28
+ @name ||= @native_kafka.with_inner do |inner|
29
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
30
+ end
31
+ end
32
+
25
33
  # Close this consumer
26
34
  # @return [nil]
27
35
  def close
@@ -40,13 +48,11 @@ module Rdkafka
40
48
  @native_kafka.closed?
41
49
  end
42
50
 
43
- # Subscribe to one or more topics letting Kafka handle partition assignments.
51
+ # Subscribes to one or more topics letting Kafka handle partition assignments.
44
52
  #
45
53
  # @param topics [Array<String>] One or more topic names
46
- #
47
- # @raise [RdkafkaError] When subscribing fails
48
- #
49
54
  # @return [nil]
55
+ # @raise [RdkafkaError] When subscribing fails
50
56
  def subscribe(*topics)
51
57
  closed_consumer_check(__method__)
52
58
 
@@ -70,9 +76,8 @@ module Rdkafka
70
76
 
71
77
  # Unsubscribe from all subscribed topics.
72
78
  #
73
- # @raise [RdkafkaError] When unsubscribing fails
74
- #
75
79
  # @return [nil]
80
+ # @raise [RdkafkaError] When unsubscribing fails
76
81
  def unsubscribe
77
82
  closed_consumer_check(__method__)
78
83
 
@@ -87,10 +92,8 @@ module Rdkafka
87
92
  # Pause producing or consumption for the provided list of partitions
88
93
  #
89
94
  # @param list [TopicPartitionList] The topic with partitions to pause
90
- #
91
- # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
92
- #
93
95
  # @return [nil]
96
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
94
97
  def pause(list)
95
98
  closed_consumer_check(__method__)
96
99
 
@@ -114,13 +117,11 @@ module Rdkafka
114
117
  end
115
118
  end
116
119
 
117
- # Resume producing consumption for the provided list of partitions
120
+ # Resumes producing consumption for the provided list of partitions
118
121
  #
119
122
  # @param list [TopicPartitionList] The topic with partitions to pause
120
- #
121
- # @raise [RdkafkaError] When resume subscription fails.
122
- #
123
123
  # @return [nil]
124
+ # @raise [RdkafkaError] When resume subscription fails.
124
125
  def resume(list)
125
126
  closed_consumer_check(__method__)
126
127
 
@@ -142,11 +143,10 @@ module Rdkafka
142
143
  end
143
144
  end
144
145
 
145
- # Return the current subscription to topics and partitions
146
- #
147
- # @raise [RdkafkaError] When getting the subscription fails.
146
+ # Returns the current subscription to topics and partitions
148
147
  #
149
148
  # @return [TopicPartitionList]
149
+ # @raise [RdkafkaError] When getting the subscription fails.
150
150
  def subscription
151
151
  closed_consumer_check(__method__)
152
152
 
@@ -171,7 +171,6 @@ module Rdkafka
171
171
  # Atomic assignment of partitions to consume
172
172
  #
173
173
  # @param list [TopicPartitionList] The topic with partitions to assign
174
- #
175
174
  # @raise [RdkafkaError] When assigning fails
176
175
  def assign(list)
177
176
  closed_consumer_check(__method__)
@@ -196,9 +195,8 @@ module Rdkafka
196
195
 
197
196
  # Returns the current partition assignment.
198
197
  #
199
- # @raise [RdkafkaError] When getting the assignment fails.
200
- #
201
198
  # @return [TopicPartitionList]
199
+ # @raise [RdkafkaError] When getting the assignment fails.
202
200
  def assignment
203
201
  closed_consumer_check(__method__)
204
202
 
@@ -224,14 +222,14 @@ module Rdkafka
224
222
  end
225
223
 
226
224
  # Return the current committed offset per partition for this consumer group.
227
- # The offset field of each requested partition will either be set to stored offset or to -1001 in case there was no stored offset for that partition.
225
+ # The offset field of each requested partition will either be set to stored offset or to -1001
226
+ # in case there was no stored offset for that partition.
228
227
  #
229
- # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
228
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil
229
+ # to use the current subscription.
230
230
  # @param timeout_ms [Integer] The timeout for fetching this information.
231
- #
232
- # @raise [RdkafkaError] When getting the committed positions fails.
233
- #
234
231
  # @return [TopicPartitionList]
232
+ # @raise [RdkafkaError] When getting the committed positions fails.
235
233
  def committed(list=nil, timeout_ms=1200)
236
234
  closed_consumer_check(__method__)
237
235
 
@@ -256,15 +254,41 @@ module Rdkafka
256
254
  end
257
255
  end
258
256
 
257
+ # Return the current positions (offsets) for topics and partitions.
258
+ # The offset field of each requested partition will be set to the offset of the last consumed message + 1, or nil in case there was no previous message.
259
+ #
260
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
261
+ #
262
+ # @raise [RdkafkaError] When getting the positions fails.
263
+ #
264
+ # @return [TopicPartitionList]
265
+ def position(list=nil)
266
+ if list.nil?
267
+ list = assignment
268
+ elsif !list.is_a?(TopicPartitionList)
269
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
270
+ end
271
+
272
+ tpl = list.to_native_tpl
273
+
274
+ response = @native_kafka.with_inner do |inner|
275
+ Rdkafka::Bindings.rd_kafka_position(inner, tpl)
276
+ end
277
+
278
+ if response != 0
279
+ raise Rdkafka::RdkafkaError.new(response)
280
+ end
281
+
282
+ TopicPartitionList.from_native_tpl(tpl)
283
+ end
284
+
259
285
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
260
286
  #
261
287
  # @param topic [String] The topic to query
262
288
  # @param partition [Integer] The partition to query
263
289
  # @param timeout_ms [Integer] The timeout for querying the broker
264
- #
265
- # @raise [RdkafkaError] When querying the broker fails.
266
- #
267
290
  # @return [Integer] The low and high watermark
291
+ # @raise [RdkafkaError] When querying the broker fails.
268
292
  def query_watermark_offsets(topic, partition, timeout_ms=200)
269
293
  closed_consumer_check(__method__)
270
294
 
@@ -298,10 +322,9 @@ module Rdkafka
298
322
  #
299
323
  # @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
300
324
  # @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
301
- #
325
+ # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
326
+ # per partition
302
327
  # @raise [RdkafkaError] When querying the broker fails.
303
- #
304
- # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
305
328
  def lag(topic_partition_list, watermark_timeout_ms=100)
306
329
  out = {}
307
330
 
@@ -350,10 +373,8 @@ module Rdkafka
350
373
  # When using this `enable.auto.offset.store` should be set to `false` in the config.
351
374
  #
352
375
  # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
353
- #
354
- # @raise [RdkafkaError] When storing the offset fails
355
- #
356
376
  # @return [nil]
377
+ # @raise [RdkafkaError] When storing the offset fails
357
378
  def store_offset(message)
358
379
  closed_consumer_check(__method__)
359
380
 
@@ -384,10 +405,8 @@ module Rdkafka
384
405
  # message at the given offset.
385
406
  #
386
407
  # @param message [Rdkafka::Consumer::Message] The message to which to seek
387
- #
388
- # @raise [RdkafkaError] When seeking fails
389
- #
390
408
  # @return [nil]
409
+ # @raise [RdkafkaError] When seeking fails
391
410
  def seek(message)
392
411
  closed_consumer_check(__method__)
393
412
 
@@ -415,6 +434,39 @@ module Rdkafka
415
434
  end
416
435
  end
417
436
 
437
+ # Lookup offset for the given partitions by timestamp.
438
+ #
439
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
440
+ #
441
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
442
+ #
443
+ # @return [TopicPartitionList]
444
+ def offsets_for_times(list, timeout_ms = 1000)
445
+ closed_consumer_check(__method__)
446
+
447
+ if !list.is_a?(TopicPartitionList)
448
+ raise TypeError.new("list has to be a TopicPartitionList")
449
+ end
450
+
451
+ tpl = list.to_native_tpl
452
+
453
+ response = @native_kafka.with_inner do |inner|
454
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
455
+ inner,
456
+ tpl,
457
+ timeout_ms # timeout
458
+ )
459
+ end
460
+
461
+ if response != 0
462
+ raise Rdkafka::RdkafkaError.new(response)
463
+ end
464
+
465
+ TopicPartitionList.from_native_tpl(tpl)
466
+ ensure
467
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
468
+ end
469
+
418
470
  # Manually commit the current offsets of this consumer.
419
471
  #
420
472
  # To use this set `enable.auto.commit`to `false` to disable automatic triggering
@@ -426,10 +478,8 @@ module Rdkafka
426
478
  #
427
479
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
428
480
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
429
- #
430
- # @raise [RdkafkaError] When committing fails
431
- #
432
481
  # @return [nil]
482
+ # @raise [RdkafkaError] When committing fails
433
483
  def commit(list=nil, async=false)
434
484
  closed_consumer_check(__method__)
435
485
 
@@ -454,10 +504,8 @@ module Rdkafka
454
504
  # Poll for the next message on one of the subscribed topics
455
505
  #
456
506
  # @param timeout_ms [Integer] Timeout of this poll
457
- #
458
- # @raise [RdkafkaError] When polling fails
459
- #
460
507
  # @return [Message, nil] A message or nil if there was no new message within the timeout
508
+ # @raise [RdkafkaError] When polling fails
461
509
  def poll(timeout_ms)
462
510
  closed_consumer_check(__method__)
463
511
 
@@ -486,14 +534,11 @@ module Rdkafka
486
534
  # Poll for new messages and yield for each received one. Iteration
487
535
  # will end when the consumer is closed.
488
536
  #
489
- # If `enable.partition.eof` is turned on in the config this will raise an
490
- # error when an eof is reached, so you probably want to disable that when
491
- # using this method of iteration.
537
+ # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
538
+ # reached, so you probably want to disable that when using this method of iteration.
492
539
  #
493
540
  # @raise [RdkafkaError] When polling fails
494
- #
495
541
  # @yieldparam message [Message] Received message
496
- #
497
542
  # @return [nil]
498
543
  def each
499
544
  loop do
@@ -546,9 +591,7 @@ module Rdkafka
546
591
  # that you may or may not see again.
547
592
  #
548
593
  # @param max_items [Integer] Maximum size of the yielded array of messages
549
- #
550
594
  # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
551
- #
552
595
  # @param timeout_ms [Integer] max time to wait for up to max_items
553
596
  #
554
597
  # @raise [RdkafkaError] When polling fails
@@ -595,10 +638,6 @@ module Rdkafka
595
638
  end
596
639
 
597
640
  private
598
- def monotonic_now
599
- # needed because Time.now can go backwards
600
- Process.clock_gettime(Process::CLOCK_MONOTONIC)
601
- end
602
641
 
603
642
  def closed_consumer_check(method)
604
643
  raise Rdkafka::ClosedConsumerError.new(method) if closed?
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Namespace for some small utilities used in multiple components
5
+ module Helpers
6
+ # Time related methods used across Karafka
7
+ module Time
8
+ # @return [Float] current monotonic time in seconds with microsecond precision
9
+ def monotonic_now
10
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ end
12
+ end
13
+ end
14
+ end
@@ -4,7 +4,18 @@ module Rdkafka
4
4
  class Metadata
5
5
  attr_reader :brokers, :topics
6
6
 
7
- def initialize(native_client, topic_name = nil, timeout_ms = 250)
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
8
19
  native_topic = if topic_name
9
20
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
10
21
  end
@@ -22,6 +33,16 @@ module Rdkafka
22
33
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
23
34
 
24
35
  metadata_from_native(ptr.read_pointer)
36
+ rescue ::Rdkafka::RdkafkaError => e
37
+ raise unless RETRIED_ERRORS.include?(e.code)
38
+ raise if attempt > 10
39
+
40
+ backoff_factor = 2**attempt
41
+ timeout = backoff_factor * 0.1
42
+
43
+ sleep(timeout)
44
+
45
+ retry
25
46
  ensure
26
47
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
27
48
  Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
@@ -4,8 +4,9 @@ module Rdkafka
4
4
  # @private
5
5
  # A wrapper around a native kafka that polls and cleanly exits
6
6
  class NativeKafka
7
- def initialize(inner, run_polling_thread:)
7
+ def initialize(inner, run_polling_thread:, opaque:)
8
8
  @inner = inner
9
+ @opaque = opaque
9
10
  # Lock around external access
10
11
  @access_mutex = Mutex.new
11
12
  # Lock around internal polling
@@ -27,6 +28,9 @@ module Rdkafka
27
28
  # counter for operations in progress using inner
28
29
  @operations_in_progress = 0
29
30
 
31
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
32
+ Rdkafka::Bindings.rd_kafka_poll(inner, 0)
33
+
30
34
  if run_polling_thread
31
35
  # Start thread to poll client for delivery callbacks,
32
36
  # not used in consumer.
@@ -109,6 +113,7 @@ module Rdkafka
109
113
 
110
114
  Rdkafka::Bindings.rd_kafka_destroy(@inner)
111
115
  @inner = nil
116
+ @opaque = nil
112
117
  end
113
118
  end
114
119
  end