karafka-rdkafka 0.12.3 → 0.13.0.beta2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +1 -1
  4. data/CHANGELOG.md +23 -0
  5. data/Gemfile +2 -0
  6. data/README.md +26 -0
  7. data/Rakefile +2 -0
  8. data/ext/Rakefile +2 -0
  9. data/karafka-rdkafka.gemspec +2 -0
  10. data/lib/rdkafka/abstract_handle.rb +2 -0
  11. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  12. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  13. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  14. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  15. data/lib/rdkafka/admin.rb +95 -73
  16. data/lib/rdkafka/bindings.rb +52 -37
  17. data/lib/rdkafka/callbacks.rb +2 -0
  18. data/lib/rdkafka/config.rb +13 -10
  19. data/lib/rdkafka/consumer/headers.rb +24 -7
  20. data/lib/rdkafka/consumer/message.rb +3 -1
  21. data/lib/rdkafka/consumer/partition.rb +2 -0
  22. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  23. data/lib/rdkafka/consumer.rb +100 -44
  24. data/lib/rdkafka/error.rb +9 -0
  25. data/lib/rdkafka/metadata.rb +25 -2
  26. data/lib/rdkafka/native_kafka.rb +81 -0
  27. data/lib/rdkafka/producer/delivery_handle.rb +2 -0
  28. data/lib/rdkafka/producer/delivery_report.rb +3 -1
  29. data/lib/rdkafka/producer.rb +75 -12
  30. data/lib/rdkafka/version.rb +3 -1
  31. data/lib/rdkafka.rb +3 -1
  32. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  36. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  37. data/spec/rdkafka/admin_spec.rb +4 -3
  38. data/spec/rdkafka/bindings_spec.rb +2 -0
  39. data/spec/rdkafka/callbacks_spec.rb +2 -0
  40. data/spec/rdkafka/config_spec.rb +17 -2
  41. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  42. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  44. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  45. data/spec/rdkafka/consumer_spec.rb +124 -22
  46. data/spec/rdkafka/error_spec.rb +2 -0
  47. data/spec/rdkafka/metadata_spec.rb +2 -0
  48. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -34
  49. data/spec/rdkafka/producer/delivery_handle_spec.rb +2 -0
  50. data/spec/rdkafka/producer/delivery_report_spec.rb +4 -2
  51. data/spec/rdkafka/producer_spec.rb +118 -17
  52. data/spec/spec_helper.rb +17 -1
  53. data.tar.gz.sig +0 -0
  54. metadata +10 -10
  55. metadata.gz.sig +0 -0
  56. data/bin/console +0 -11
  57. data/lib/rdkafka/producer/client.rb +0 -47
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "logger"
2
4
 
3
5
  module Rdkafka
@@ -30,7 +32,6 @@ module Rdkafka
30
32
  @@logger
31
33
  end
32
34
 
33
-
34
35
  # Returns a queue whose contents will be passed to the configured logger. Each entry
35
36
  # should follow the format [Logger::Severity, String]. The benefit over calling the
36
37
  # logger directly is that this is safe to use from trap contexts.
@@ -47,7 +48,7 @@ module Rdkafka
47
48
  # @return [nil]
48
49
  def self.logger=(logger)
49
50
  raise NoLoggerError if logger.nil?
50
- @@logger=logger
51
+ @@logger = logger
51
52
  end
52
53
 
53
54
  # Set a callback that will be called every time the underlying client emits statistics.
@@ -156,13 +157,14 @@ module Rdkafka
156
157
  Rdkafka::Bindings.rd_kafka_conf_set_rebalance_cb(config, Rdkafka::Bindings::RebalanceCallback)
157
158
  end
158
159
 
160
+ # Create native client
159
161
  kafka = native_kafka(config, :rd_kafka_consumer)
160
162
 
161
163
  # Redirect the main queue to the consumer
162
164
  Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
163
165
 
164
166
  # Return consumer with Kafka client
165
- Rdkafka::Consumer.new(kafka)
167
+ Rdkafka::Consumer.new(Rdkafka::NativeKafka.new(kafka, run_polling_thread: false))
166
168
  end
167
169
 
168
170
  # Create a producer with this configuration.
@@ -179,7 +181,8 @@ module Rdkafka
179
181
  # Set callback to receive delivery reports on config
180
182
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
181
183
  # Return producer with Kafka client
182
- Rdkafka::Producer.new(Rdkafka::Producer::Client.new(native_kafka(config, :rd_kafka_producer)), self[:partitioner]).tap do |producer|
184
+ partitioner_name = self[:partitioner] || self["partitioner"]
185
+ Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true), partitioner_name).tap do |producer|
183
186
  opaque.producer = producer
184
187
  end
185
188
  end
@@ -194,7 +197,7 @@ module Rdkafka
194
197
  opaque = Opaque.new
195
198
  config = native_config(opaque)
196
199
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
197
- Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
200
+ Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true))
198
201
  end
199
202
 
200
203
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -210,7 +213,7 @@ module Rdkafka
210
213
 
211
214
  # This method is only intended to be used to create a client,
212
215
  # using it in another way will leak memory.
213
- def native_config(opaque=nil)
216
+ def native_config(opaque = nil)
214
217
  Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
215
218
  # Create config
216
219
  @config_hash.merge(REQUIRED_CONFIG).each do |key, value|
@@ -282,18 +285,18 @@ module Rdkafka
282
285
  producer.call_delivery_callback(delivery_report, delivery_handle) if producer
283
286
  end
284
287
 
285
- def call_on_partitions_assigned(consumer, list)
288
+ def call_on_partitions_assigned(list)
286
289
  return unless consumer_rebalance_listener
287
290
  return unless consumer_rebalance_listener.respond_to?(:on_partitions_assigned)
288
291
 
289
- consumer_rebalance_listener.on_partitions_assigned(consumer, list)
292
+ consumer_rebalance_listener.on_partitions_assigned(list)
290
293
  end
291
294
 
292
- def call_on_partitions_revoked(consumer, list)
295
+ def call_on_partitions_revoked(list)
293
296
  return unless consumer_rebalance_listener
294
297
  return unless consumer_rebalance_listener.respond_to?(:on_partitions_revoked)
295
298
 
296
- consumer_rebalance_listener.on_partitions_revoked(consumer, list)
299
+ consumer_rebalance_listener.on_partitions_revoked(list)
297
300
  end
298
301
  end
299
302
  end
@@ -1,10 +1,26 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
- # A message headers
4
- class Headers
5
- # Reads a native kafka's message header into ruby's hash
5
+ # Interface to return headers for a consumer message
6
+ module Headers
7
+ class HashWithSymbolKeysTreatedLikeStrings < Hash
8
+ def [](key)
9
+ if key.is_a?(Symbol)
10
+ Kernel.warn("rdkafka deprecation warning: header access with Symbol key #{key.inspect} treated as a String. " \
11
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
12
+ super(key.to_s)
13
+ else
14
+ super
15
+ end
16
+ end
17
+ end
18
+
19
+ # Reads a librdkafka native message's headers and returns them as a Ruby Hash
20
+ #
21
+ # @param [librdkakfa message] native_message
6
22
  #
7
- # @return [Hash<String, String>] a message headers
23
+ # @return [Hash<String, String>] headers Hash for the native_message
8
24
  #
9
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
10
26
  #
@@ -24,7 +40,8 @@ module Rdkafka
24
40
  name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
41
  value_ptrptr = FFI::MemoryPointer.new(:pointer)
26
42
  size_ptr = Rdkafka::Bindings::SizePtr.new
27
- headers = {}
43
+
44
+ headers = HashWithSymbolKeysTreatedLikeStrings.new
28
45
 
29
46
  idx = 0
30
47
  loop do
@@ -51,12 +68,12 @@ module Rdkafka
51
68
 
52
69
  value = value_ptr.read_string(size)
53
70
 
54
- headers[name.to_sym] = value
71
+ headers[name] = value
55
72
 
56
73
  idx += 1
57
74
  end
58
75
 
59
- headers
76
+ headers.freeze
60
77
  end
61
78
  end
62
79
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A message that was consumed from a topic.
@@ -18,7 +20,7 @@ module Rdkafka
18
20
  # @return [String, nil]
19
21
  attr_reader :key
20
22
 
21
- # This message's offset in it's partition
23
+ # This message's offset in its partition
22
24
  # @return [Integer]
23
25
  attr_reader :offset
24
26
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # Information about a partition, used in {TopicPartitionList}.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A list of topics with their partition information
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
3
5
  # brokers automatically assign partitions and load balance partitions over consumers that
@@ -14,18 +16,33 @@ module Rdkafka
14
16
  # @private
15
17
  def initialize(native_kafka)
16
18
  @native_kafka = native_kafka
17
- @closing = false
19
+ end
20
+
21
+ # @return [String] consumer name
22
+ def name
23
+ @name ||= @native_kafka.with_inner do |inner|
24
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
25
+ end
26
+ end
27
+
28
+ def finalizer
29
+ ->(_) { close }
18
30
  end
19
31
 
20
32
  # Close this consumer
21
33
  # @return [nil]
22
34
  def close
23
- return unless @native_kafka
35
+ return if closed?
36
+ ObjectSpace.undefine_finalizer(self)
37
+ @native_kafka.with_inner do |inner|
38
+ Rdkafka::Bindings.rd_kafka_consumer_close(inner)
39
+ end
40
+ @native_kafka.close
41
+ end
24
42
 
25
- @closing = true
26
- Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
- @native_kafka = nil
43
+ # Whether this consumer has closed
44
+ def closed?
45
+ @native_kafka.closed?
29
46
  end
30
47
 
31
48
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -46,7 +63,9 @@ module Rdkafka
46
63
  end
47
64
 
48
65
  # Subscribe to topic partition list and check this was successful
49
- response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
66
+ response = @native_kafka.with_inner do |inner|
67
+ Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
68
+ end
50
69
  if response != 0
51
70
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
52
71
  end
@@ -62,7 +81,9 @@ module Rdkafka
62
81
  def unsubscribe
63
82
  closed_consumer_check(__method__)
64
83
 
65
- response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
84
+ response = @native_kafka.with_inner do |inner|
85
+ Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
86
+ end
66
87
  if response != 0
67
88
  raise Rdkafka::RdkafkaError.new(response)
68
89
  end
@@ -85,7 +106,9 @@ module Rdkafka
85
106
  tpl = list.to_native_tpl
86
107
 
87
108
  begin
88
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
109
+ response = @native_kafka.with_inner do |inner|
110
+ Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
111
+ end
89
112
 
90
113
  if response != 0
91
114
  list = TopicPartitionList.from_native_tpl(tpl)
@@ -113,7 +136,9 @@ module Rdkafka
113
136
  tpl = list.to_native_tpl
114
137
 
115
138
  begin
116
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
139
+ response = @native_kafka.with_inner do |inner|
140
+ Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
141
+ end
117
142
  if response != 0
118
143
  raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
144
  end
@@ -131,7 +156,9 @@ module Rdkafka
131
156
  closed_consumer_check(__method__)
132
157
 
133
158
  ptr = FFI::MemoryPointer.new(:pointer)
134
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
159
+ response = @native_kafka.with_inner do |inner|
160
+ Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
161
+ end
135
162
 
136
163
  if response != 0
137
164
  raise Rdkafka::RdkafkaError.new(response)
@@ -161,7 +188,9 @@ module Rdkafka
161
188
  tpl = list.to_native_tpl
162
189
 
163
190
  begin
164
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
191
+ response = @native_kafka.with_inner do |inner|
192
+ Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
193
+ end
165
194
  if response != 0
166
195
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
196
  end
@@ -179,7 +208,9 @@ module Rdkafka
179
208
  closed_consumer_check(__method__)
180
209
 
181
210
  ptr = FFI::MemoryPointer.new(:pointer)
182
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
211
+ response = @native_kafka.with_inner do |inner|
212
+ Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
213
+ end
183
214
  if response != 0
184
215
  raise Rdkafka::RdkafkaError.new(response)
185
216
  end
@@ -197,6 +228,15 @@ module Rdkafka
197
228
  ptr.free unless ptr.nil?
198
229
  end
199
230
 
231
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
232
+ def assignment_lost?
233
+ closed_consumer_check(__method__)
234
+
235
+ @native_kafka.with_inner do |inner|
236
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
237
+ end
238
+ end
239
+
200
240
  # Return the current committed offset per partition for this consumer group.
201
241
  # The offset field of each requested partition will either be set to stored offset or to -1001 in case there was no stored offset for that partition.
202
242
  #
@@ -218,7 +258,9 @@ module Rdkafka
218
258
  tpl = list.to_native_tpl
219
259
 
220
260
  begin
221
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
261
+ response = @native_kafka.with_inner do |inner|
262
+ Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
263
+ end
222
264
  if response != 0
223
265
  raise Rdkafka::RdkafkaError.new(response)
224
266
  end
@@ -243,14 +285,16 @@ module Rdkafka
243
285
  low = FFI::MemoryPointer.new(:int64, 1)
244
286
  high = FFI::MemoryPointer.new(:int64, 1)
245
287
 
246
- response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
247
- @native_kafka,
248
- topic,
249
- partition,
250
- low,
251
- high,
252
- timeout_ms,
253
- )
288
+ response = @native_kafka.with_inner do |inner|
289
+ Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
290
+ inner,
291
+ topic,
292
+ partition,
293
+ low,
294
+ high,
295
+ timeout_ms,
296
+ )
297
+ end
254
298
  if response != 0
255
299
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
256
300
  end
@@ -298,7 +342,9 @@ module Rdkafka
298
342
  # @return [String, nil]
299
343
  def cluster_id
300
344
  closed_consumer_check(__method__)
301
- Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
345
+ @native_kafka.with_inner do |inner|
346
+ Rdkafka::Bindings.rd_kafka_clusterid(inner)
347
+ end
302
348
  end
303
349
 
304
350
  # Returns this client's broker-assigned group member id
@@ -308,7 +354,9 @@ module Rdkafka
308
354
  # @return [String, nil]
309
355
  def member_id
310
356
  closed_consumer_check(__method__)
311
- Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
357
+ @native_kafka.with_inner do |inner|
358
+ Rdkafka::Bindings.rd_kafka_memberid(inner)
359
+ end
312
360
  end
313
361
 
314
362
  # Store offset of a message to be used in the next commit of this consumer
@@ -325,11 +373,13 @@ module Rdkafka
325
373
 
326
374
  # rd_kafka_offset_store is one of the few calls that does not support
327
375
  # a string as the topic, so create a native topic for it.
328
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
329
- @native_kafka,
330
- message.topic,
331
- nil
332
- )
376
+ native_topic = @native_kafka.with_inner do |inner|
377
+ Rdkafka::Bindings.rd_kafka_topic_new(
378
+ inner,
379
+ message.topic,
380
+ nil
381
+ )
382
+ end
333
383
  response = Rdkafka::Bindings.rd_kafka_offset_store(
334
384
  native_topic,
335
385
  message.partition,
@@ -357,11 +407,13 @@ module Rdkafka
357
407
 
358
408
  # rd_kafka_offset_store is one of the few calls that does not support
359
409
  # a string as the topic, so create a native topic for it.
360
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
361
- @native_kafka,
362
- message.topic,
363
- nil
364
- )
410
+ native_topic = @native_kafka.with_inner do |inner|
411
+ Rdkafka::Bindings.rd_kafka_topic_new(
412
+ inner,
413
+ message.topic,
414
+ nil
415
+ )
416
+ end
365
417
  response = Rdkafka::Bindings.rd_kafka_seek(
366
418
  native_topic,
367
419
  message.partition,
@@ -402,7 +454,9 @@ module Rdkafka
402
454
  tpl = list ? list.to_native_tpl : nil
403
455
 
404
456
  begin
405
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
457
+ response = @native_kafka.with_inner do |inner|
458
+ Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
459
+ end
406
460
  if response != 0
407
461
  raise Rdkafka::RdkafkaError.new(response)
408
462
  end
@@ -421,7 +475,9 @@ module Rdkafka
421
475
  def poll(timeout_ms)
422
476
  closed_consumer_check(__method__)
423
477
 
424
- message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
478
+ message_ptr = @native_kafka.with_inner do |inner|
479
+ Rdkafka::Bindings.rd_kafka_consumer_poll(inner, timeout_ms)
480
+ end
425
481
  if message_ptr.null?
426
482
  nil
427
483
  else
@@ -436,7 +492,7 @@ module Rdkafka
436
492
  end
437
493
  ensure
438
494
  # Clean up rdkafka message if there is one
439
- if !message_ptr.nil? && !message_ptr.null?
495
+ if message_ptr && !message_ptr.null?
440
496
  Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
441
497
  end
442
498
  end
@@ -459,7 +515,7 @@ module Rdkafka
459
515
  if message
460
516
  yield(message)
461
517
  else
462
- if @closing
518
+ if closed?
463
519
  break
464
520
  else
465
521
  next
@@ -468,10 +524,6 @@ module Rdkafka
468
524
  end
469
525
  end
470
526
 
471
- def closed_consumer_check(method)
472
- raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
- end
474
-
475
527
  # Poll for new messages and yield them in batches that may contain
476
528
  # messages from more than one partition.
477
529
  #
@@ -527,7 +579,7 @@ module Rdkafka
527
579
  bytes = 0
528
580
  end_time = monotonic_now + timeout_ms / 1000.0
529
581
  loop do
530
- break if @closing
582
+ break if closed?
531
583
  max_wait = end_time - monotonic_now
532
584
  max_wait_ms = if max_wait <= 0
533
585
  0 # should not block, but may retrieve a message
@@ -545,7 +597,7 @@ module Rdkafka
545
597
  end
546
598
  if message
547
599
  slice << message
548
- bytes += message.payload.bytesize
600
+ bytes += message.payload.bytesize if message.payload
549
601
  end
550
602
  if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
603
  yield slice.dup, nil
@@ -561,5 +613,9 @@ module Rdkafka
561
613
  # needed because Time.now can go backwards
562
614
  Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
615
  end
616
+
617
+ def closed_consumer_check(method)
618
+ raise Rdkafka::ClosedConsumerError.new(method) if closed?
619
+ end
564
620
  end
565
621
  end
data/lib/rdkafka/error.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # Base error class.
3
5
  class BaseError < RuntimeError; end
@@ -83,4 +85,11 @@ module Rdkafka
83
85
  super("Illegal call to #{method.to_s} on a closed producer")
84
86
  end
85
87
  end
88
+
89
+ # Error class for public consumer method calls on a closed admin.
90
+ class ClosedAdminError < BaseError
91
+ def initialize(method)
92
+ super("Illegal call to #{method.to_s} on a closed admin")
93
+ end
94
+ end
86
95
  end
@@ -1,8 +1,21 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Metadata
3
5
  attr_reader :brokers, :topics
4
6
 
5
- def initialize(native_client, topic_name = nil)
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
6
19
  native_topic = if topic_name
7
20
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
21
  end
@@ -14,12 +27,22 @@ module Rdkafka
14
27
  topic_flag = topic_name.nil? ? 1 : 0
15
28
 
16
29
  # Retrieve the Metadata
17
- result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 2_000)
30
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
18
31
 
19
32
  # Error Handling
20
33
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
21
34
 
22
35
  metadata_from_native(ptr.read_pointer)
36
+ rescue ::Rdkafka::RdkafkaError => e
37
+ raise unless RETRIED_ERRORS.include?(e.code)
38
+ raise if attempt > 10
39
+
40
+ backoff_factor = 2**attempt
41
+ timeout = backoff_factor * 0.1
42
+
43
+ sleep(timeout)
44
+
45
+ retry
23
46
  ensure
24
47
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
25
48
  Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
@@ -0,0 +1,81 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner, run_polling_thread:)
8
+ @inner = inner
9
+ # Lock around external access
10
+ @access_mutex = Mutex.new
11
+ # Lock around internal polling
12
+ @poll_mutex = Mutex.new
13
+
14
+ if run_polling_thread
15
+ # Start thread to poll client for delivery callbacks,
16
+ # not used in consumer.
17
+ @polling_thread = Thread.new do
18
+ loop do
19
+ @poll_mutex.synchronize do
20
+ Rdkafka::Bindings.rd_kafka_poll(inner, 100)
21
+ end
22
+
23
+ # Exit thread if closing and the poll queue is empty
24
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
25
+ break
26
+ end
27
+ end
28
+ end
29
+
30
+ @polling_thread.abort_on_exception = true
31
+ @polling_thread[:closing] = false
32
+ end
33
+
34
+ @closing = false
35
+ end
36
+
37
+ def with_inner
38
+ return if @inner.nil?
39
+
40
+ yield @inner
41
+ end
42
+
43
+ def finalizer
44
+ ->(_) { close }
45
+ end
46
+
47
+ def closed?
48
+ @closing || @inner.nil?
49
+ end
50
+
51
+ def close(object_id=nil)
52
+ return if closed?
53
+
54
+ @access_mutex.lock
55
+
56
+ # Indicate to the outside world that we are closing
57
+ @closing = true
58
+
59
+ if @polling_thread
60
+ # Indicate to polling thread that we're closing
61
+ @polling_thread[:closing] = true
62
+
63
+ # Wait for the polling thread to finish up,
64
+ # this can be aborted in practice if this
65
+ # code runs from a finalizer.
66
+ @polling_thread.join
67
+ end
68
+
69
+ # Destroy the client after locking both mutexes
70
+ @poll_mutex.lock
71
+
72
+ # This check prevents a race condition, where we would enter the close in two threads
73
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
74
+ # and would continue to run, trying to destroy inner twice
75
+ return unless @inner
76
+
77
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
78
+ @inner = nil
79
+ end
80
+ end
81
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Handle to wait for a delivery report which is returned when
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Delivery report for a successfully produced message.
@@ -15,7 +17,7 @@ module Rdkafka
15
17
  attr_reader :topic_name
16
18
 
17
19
  # Error in case happen during produce.
18
- # @return [String]
20
+ # @return [Integer]
19
21
  attr_reader :error
20
22
 
21
23
  private