rdkafka 0.11.1 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +7 -3
  3. data/CHANGELOG.md +23 -2
  4. data/Gemfile +2 -0
  5. data/README.md +26 -0
  6. data/Rakefile +2 -0
  7. data/dist/librdkafka_2.0.2.tar.gz +0 -0
  8. data/ext/Rakefile +56 -27
  9. data/lib/rdkafka/abstract_handle.rb +2 -0
  10. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  11. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  12. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  13. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  14. data/lib/rdkafka/admin.rb +50 -33
  15. data/lib/rdkafka/bindings.rb +59 -39
  16. data/lib/rdkafka/callbacks.rb +7 -1
  17. data/lib/rdkafka/config.rb +15 -12
  18. data/lib/rdkafka/consumer/headers.rb +24 -7
  19. data/lib/rdkafka/consumer/message.rb +3 -1
  20. data/lib/rdkafka/consumer/partition.rb +2 -0
  21. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  22. data/lib/rdkafka/consumer.rb +86 -44
  23. data/lib/rdkafka/error.rb +15 -0
  24. data/lib/rdkafka/metadata.rb +4 -2
  25. data/lib/rdkafka/native_kafka.rb +115 -0
  26. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  27. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  28. data/lib/rdkafka/producer.rb +56 -38
  29. data/lib/rdkafka/version.rb +5 -3
  30. data/lib/rdkafka.rb +3 -0
  31. data/rdkafka.gemspec +2 -0
  32. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  36. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  37. data/spec/rdkafka/admin_spec.rb +4 -3
  38. data/spec/rdkafka/bindings_spec.rb +9 -0
  39. data/spec/rdkafka/callbacks_spec.rb +2 -0
  40. data/spec/rdkafka/config_spec.rb +17 -2
  41. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  42. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  44. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  45. data/spec/rdkafka/consumer_spec.rb +123 -27
  46. data/spec/rdkafka/error_spec.rb +2 -0
  47. data/spec/rdkafka/metadata_spec.rb +2 -0
  48. data/spec/rdkafka/native_kafka_spec.rb +124 -0
  49. data/spec/rdkafka/producer/delivery_handle_spec.rb +5 -0
  50. data/spec/rdkafka/producer/delivery_report_spec.rb +8 -2
  51. data/spec/rdkafka/producer_spec.rb +103 -24
  52. data/spec/spec_helper.rb +17 -1
  53. metadata +13 -9
  54. data/bin/console +0 -11
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "logger"
2
4
 
3
5
  module Rdkafka
@@ -30,7 +32,6 @@ module Rdkafka
30
32
  @@logger
31
33
  end
32
34
 
33
-
34
35
  # Returns a queue whose contents will be passed to the configured logger. Each entry
35
36
  # should follow the format [Logger::Severity, String]. The benefit over calling the
36
37
  # logger directly is that this is safe to use from trap contexts.
@@ -47,7 +48,7 @@ module Rdkafka
47
48
  # @return [nil]
48
49
  def self.logger=(logger)
49
50
  raise NoLoggerError if logger.nil?
50
- @@logger=logger
51
+ @@logger = logger
51
52
  end
52
53
 
53
54
  # Set a callback that will be called every time the underlying client emits statistics.
@@ -156,13 +157,14 @@ module Rdkafka
156
157
  Rdkafka::Bindings.rd_kafka_conf_set_rebalance_cb(config, Rdkafka::Bindings::RebalanceCallback)
157
158
  end
158
159
 
160
+ # Create native client
159
161
  kafka = native_kafka(config, :rd_kafka_consumer)
160
162
 
161
163
  # Redirect the main queue to the consumer
162
164
  Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
163
165
 
164
166
  # Return consumer with Kafka client
165
- Rdkafka::Consumer.new(kafka)
167
+ Rdkafka::Consumer.new(Rdkafka::NativeKafka.new(kafka, run_polling_thread: false))
166
168
  end
167
169
 
168
170
  # Create a producer with this configuration.
@@ -179,7 +181,8 @@ module Rdkafka
179
181
  # Set callback to receive delivery reports on config
180
182
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
181
183
  # Return producer with Kafka client
182
- Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
184
+ partitioner_name = self[:partitioner] || self["partitioner"]
185
+ Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true), partitioner_name).tap do |producer|
183
186
  opaque.producer = producer
184
187
  end
185
188
  end
@@ -194,7 +197,7 @@ module Rdkafka
194
197
  opaque = Opaque.new
195
198
  config = native_config(opaque)
196
199
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
197
- Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
200
+ Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true))
198
201
  end
199
202
 
200
203
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -210,7 +213,7 @@ module Rdkafka
210
213
 
211
214
  # This method is only intended to be used to create a client,
212
215
  # using it in another way will leak memory.
213
- def native_config(opaque=nil)
216
+ def native_config(opaque = nil)
214
217
  Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
215
218
  # Create config
216
219
  @config_hash.merge(REQUIRED_CONFIG).each do |key, value|
@@ -278,22 +281,22 @@ module Rdkafka
278
281
  attr_accessor :producer
279
282
  attr_accessor :consumer_rebalance_listener
280
283
 
281
- def call_delivery_callback(delivery_handle)
282
- producer.call_delivery_callback(delivery_handle) if producer
284
+ def call_delivery_callback(delivery_report, delivery_handle)
285
+ producer.call_delivery_callback(delivery_report, delivery_handle) if producer
283
286
  end
284
287
 
285
- def call_on_partitions_assigned(consumer, list)
288
+ def call_on_partitions_assigned(list)
286
289
  return unless consumer_rebalance_listener
287
290
  return unless consumer_rebalance_listener.respond_to?(:on_partitions_assigned)
288
291
 
289
- consumer_rebalance_listener.on_partitions_assigned(consumer, list)
292
+ consumer_rebalance_listener.on_partitions_assigned(list)
290
293
  end
291
294
 
292
- def call_on_partitions_revoked(consumer, list)
295
+ def call_on_partitions_revoked(list)
293
296
  return unless consumer_rebalance_listener
294
297
  return unless consumer_rebalance_listener.respond_to?(:on_partitions_revoked)
295
298
 
296
- consumer_rebalance_listener.on_partitions_revoked(consumer, list)
299
+ consumer_rebalance_listener.on_partitions_revoked(list)
297
300
  end
298
301
  end
299
302
  end
@@ -1,10 +1,26 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
- # A message headers
4
- class Headers
5
- # Reads a native kafka's message header into ruby's hash
5
+ # Interface to return headers for a consumer message
6
+ module Headers
7
+ class HashWithSymbolKeysTreatedLikeStrings < Hash
8
+ def [](key)
9
+ if key.is_a?(Symbol)
10
+ Kernel.warn("rdkafka deprecation warning: header access with Symbol key #{key.inspect} treated as a String. " \
11
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
12
+ super(key.to_s)
13
+ else
14
+ super
15
+ end
16
+ end
17
+ end
18
+
19
+ # Reads a librdkafka native message's headers and returns them as a Ruby Hash
20
+ #
21
+ # @param [librdkakfa message] native_message
6
22
  #
7
- # @return [Hash<String, String>] a message headers
23
+ # @return [Hash<String, String>] headers Hash for the native_message
8
24
  #
9
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
10
26
  #
@@ -24,7 +40,8 @@ module Rdkafka
24
40
  name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
41
  value_ptrptr = FFI::MemoryPointer.new(:pointer)
26
42
  size_ptr = Rdkafka::Bindings::SizePtr.new
27
- headers = {}
43
+
44
+ headers = HashWithSymbolKeysTreatedLikeStrings.new
28
45
 
29
46
  idx = 0
30
47
  loop do
@@ -51,12 +68,12 @@ module Rdkafka
51
68
 
52
69
  value = value_ptr.read_string(size)
53
70
 
54
- headers[name.to_sym] = value
71
+ headers[name] = value
55
72
 
56
73
  idx += 1
57
74
  end
58
75
 
59
- headers
76
+ headers.freeze
60
77
  end
61
78
  end
62
79
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A message that was consumed from a topic.
@@ -18,7 +20,7 @@ module Rdkafka
18
20
  # @return [String, nil]
19
21
  attr_reader :key
20
22
 
21
- # This message's offset in it's partition
23
+ # This message's offset in its partition
22
24
  # @return [Integer]
23
25
  attr_reader :offset
24
26
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # Information about a partition, used in {TopicPartitionList}.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A list of topics with their partition information
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
3
5
  # brokers automatically assign partitions and load balance partitions over consumers that
@@ -14,18 +16,28 @@ module Rdkafka
14
16
  # @private
15
17
  def initialize(native_kafka)
16
18
  @native_kafka = native_kafka
17
- @closing = false
19
+ end
20
+
21
+ def finalizer
22
+ ->(_) { close }
18
23
  end
19
24
 
20
25
  # Close this consumer
21
26
  # @return [nil]
22
27
  def close
23
- return unless @native_kafka
28
+ return if closed?
29
+ ObjectSpace.undefine_finalizer(self)
30
+
31
+ @native_kafka.synchronize do |inner|
32
+ Rdkafka::Bindings.rd_kafka_consumer_close(inner)
33
+ end
24
34
 
25
- @closing = true
26
- Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
- @native_kafka = nil
35
+ @native_kafka.close
36
+ end
37
+
38
+ # Whether this consumer has closed
39
+ def closed?
40
+ @native_kafka.closed?
29
41
  end
30
42
 
31
43
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -46,7 +58,9 @@ module Rdkafka
46
58
  end
47
59
 
48
60
  # Subscribe to topic partition list and check this was successful
49
- response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
61
+ response = @native_kafka.with_inner do |inner|
62
+ Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
63
+ end
50
64
  if response != 0
51
65
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
52
66
  end
@@ -62,7 +76,9 @@ module Rdkafka
62
76
  def unsubscribe
63
77
  closed_consumer_check(__method__)
64
78
 
65
- response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
79
+ response = @native_kafka.with_inner do |inner|
80
+ Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
81
+ end
66
82
  if response != 0
67
83
  raise Rdkafka::RdkafkaError.new(response)
68
84
  end
@@ -85,7 +101,9 @@ module Rdkafka
85
101
  tpl = list.to_native_tpl
86
102
 
87
103
  begin
88
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
104
+ response = @native_kafka.with_inner do |inner|
105
+ Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
106
+ end
89
107
 
90
108
  if response != 0
91
109
  list = TopicPartitionList.from_native_tpl(tpl)
@@ -113,7 +131,9 @@ module Rdkafka
113
131
  tpl = list.to_native_tpl
114
132
 
115
133
  begin
116
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
134
+ response = @native_kafka.with_inner do |inner|
135
+ Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
136
+ end
117
137
  if response != 0
118
138
  raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
139
  end
@@ -131,7 +151,9 @@ module Rdkafka
131
151
  closed_consumer_check(__method__)
132
152
 
133
153
  ptr = FFI::MemoryPointer.new(:pointer)
134
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
154
+ response = @native_kafka.with_inner do |inner|
155
+ Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
156
+ end
135
157
 
136
158
  if response != 0
137
159
  raise Rdkafka::RdkafkaError.new(response)
@@ -161,7 +183,9 @@ module Rdkafka
161
183
  tpl = list.to_native_tpl
162
184
 
163
185
  begin
164
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
186
+ response = @native_kafka.with_inner do |inner|
187
+ Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
188
+ end
165
189
  if response != 0
166
190
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
191
  end
@@ -179,7 +203,9 @@ module Rdkafka
179
203
  closed_consumer_check(__method__)
180
204
 
181
205
  ptr = FFI::MemoryPointer.new(:pointer)
182
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
206
+ response = @native_kafka.with_inner do |inner|
207
+ Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
208
+ end
183
209
  if response != 0
184
210
  raise Rdkafka::RdkafkaError.new(response)
185
211
  end
@@ -218,7 +244,9 @@ module Rdkafka
218
244
  tpl = list.to_native_tpl
219
245
 
220
246
  begin
221
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
247
+ response = @native_kafka.with_inner do |inner|
248
+ Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
249
+ end
222
250
  if response != 0
223
251
  raise Rdkafka::RdkafkaError.new(response)
224
252
  end
@@ -243,14 +271,16 @@ module Rdkafka
243
271
  low = FFI::MemoryPointer.new(:int64, 1)
244
272
  high = FFI::MemoryPointer.new(:int64, 1)
245
273
 
246
- response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
247
- @native_kafka,
248
- topic,
249
- partition,
250
- low,
251
- high,
252
- timeout_ms,
253
- )
274
+ response = @native_kafka.with_inner do |inner|
275
+ Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
276
+ inner,
277
+ topic,
278
+ partition,
279
+ low,
280
+ high,
281
+ timeout_ms,
282
+ )
283
+ end
254
284
  if response != 0
255
285
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
256
286
  end
@@ -298,7 +328,9 @@ module Rdkafka
298
328
  # @return [String, nil]
299
329
  def cluster_id
300
330
  closed_consumer_check(__method__)
301
- Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
331
+ @native_kafka.with_inner do |inner|
332
+ Rdkafka::Bindings.rd_kafka_clusterid(inner)
333
+ end
302
334
  end
303
335
 
304
336
  # Returns this client's broker-assigned group member id
@@ -308,7 +340,9 @@ module Rdkafka
308
340
  # @return [String, nil]
309
341
  def member_id
310
342
  closed_consumer_check(__method__)
311
- Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
343
+ @native_kafka.with_inner do |inner|
344
+ Rdkafka::Bindings.rd_kafka_memberid(inner)
345
+ end
312
346
  end
313
347
 
314
348
  # Store offset of a message to be used in the next commit of this consumer
@@ -325,11 +359,13 @@ module Rdkafka
325
359
 
326
360
  # rd_kafka_offset_store is one of the few calls that does not support
327
361
  # a string as the topic, so create a native topic for it.
328
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
329
- @native_kafka,
330
- message.topic,
331
- nil
332
- )
362
+ native_topic = @native_kafka.with_inner do |inner|
363
+ Rdkafka::Bindings.rd_kafka_topic_new(
364
+ inner,
365
+ message.topic,
366
+ nil
367
+ )
368
+ end
333
369
  response = Rdkafka::Bindings.rd_kafka_offset_store(
334
370
  native_topic,
335
371
  message.partition,
@@ -357,11 +393,13 @@ module Rdkafka
357
393
 
358
394
  # rd_kafka_offset_store is one of the few calls that does not support
359
395
  # a string as the topic, so create a native topic for it.
360
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
361
- @native_kafka,
362
- message.topic,
363
- nil
364
- )
396
+ native_topic = @native_kafka.with_inner do |inner|
397
+ Rdkafka::Bindings.rd_kafka_topic_new(
398
+ inner,
399
+ message.topic,
400
+ nil
401
+ )
402
+ end
365
403
  response = Rdkafka::Bindings.rd_kafka_seek(
366
404
  native_topic,
367
405
  message.partition,
@@ -402,7 +440,9 @@ module Rdkafka
402
440
  tpl = list ? list.to_native_tpl : nil
403
441
 
404
442
  begin
405
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
443
+ response = @native_kafka.with_inner do |inner|
444
+ Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
445
+ end
406
446
  if response != 0
407
447
  raise Rdkafka::RdkafkaError.new(response)
408
448
  end
@@ -421,7 +461,9 @@ module Rdkafka
421
461
  def poll(timeout_ms)
422
462
  closed_consumer_check(__method__)
423
463
 
424
- message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
464
+ message_ptr = @native_kafka.with_inner do |inner|
465
+ Rdkafka::Bindings.rd_kafka_consumer_poll(inner, timeout_ms)
466
+ end
425
467
  if message_ptr.null?
426
468
  nil
427
469
  else
@@ -436,7 +478,7 @@ module Rdkafka
436
478
  end
437
479
  ensure
438
480
  # Clean up rdkafka message if there is one
439
- if !message_ptr.nil? && !message_ptr.null?
481
+ if message_ptr && !message_ptr.null?
440
482
  Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
441
483
  end
442
484
  end
@@ -459,7 +501,7 @@ module Rdkafka
459
501
  if message
460
502
  yield(message)
461
503
  else
462
- if @closing
504
+ if closed?
463
505
  break
464
506
  else
465
507
  next
@@ -468,10 +510,6 @@ module Rdkafka
468
510
  end
469
511
  end
470
512
 
471
- def closed_consumer_check(method)
472
- raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
- end
474
-
475
513
  # Poll for new messages and yield them in batches that may contain
476
514
  # messages from more than one partition.
477
515
  #
@@ -527,7 +565,7 @@ module Rdkafka
527
565
  bytes = 0
528
566
  end_time = monotonic_now + timeout_ms / 1000.0
529
567
  loop do
530
- break if @closing
568
+ break if closed?
531
569
  max_wait = end_time - monotonic_now
532
570
  max_wait_ms = if max_wait <= 0
533
571
  0 # should not block, but may retrieve a message
@@ -545,7 +583,7 @@ module Rdkafka
545
583
  end
546
584
  if message
547
585
  slice << message
548
- bytes += message.payload.bytesize
586
+ bytes += message.payload.bytesize if message.payload
549
587
  end
550
588
  if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
589
  yield slice.dup, nil
@@ -561,5 +599,9 @@ module Rdkafka
561
599
  # needed because Time.now can go backwards
562
600
  Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
601
  end
602
+
603
+ def closed_consumer_check(method)
604
+ raise Rdkafka::ClosedConsumerError.new(method) if closed?
605
+ end
564
606
  end
565
607
  end
data/lib/rdkafka/error.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # Base error class.
3
5
  class BaseError < RuntimeError; end
@@ -83,4 +85,17 @@ module Rdkafka
83
85
  super("Illegal call to #{method.to_s} on a closed producer")
84
86
  end
85
87
  end
88
+
89
+ # Error class for public consumer method calls on a closed admin.
90
+ class ClosedAdminError < BaseError
91
+ def initialize(method)
92
+ super("Illegal call to #{method.to_s} on a closed admin")
93
+ end
94
+ end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
86
101
  end
@@ -1,8 +1,10 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Metadata
3
5
  attr_reader :brokers, :topics
4
6
 
5
- def initialize(native_client, topic_name = nil)
7
+ def initialize(native_client, topic_name = nil, timeout_ms = 250)
6
8
  native_topic = if topic_name
7
9
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
10
  end
@@ -14,7 +16,7 @@ module Rdkafka
14
16
  topic_flag = topic_name.nil? ? 1 : 0
15
17
 
16
18
  # Retrieve the Metadata
17
- result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
19
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
18
20
 
19
21
  # Error Handling
20
22
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
@@ -0,0 +1,115 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner, run_polling_thread:)
8
+ @inner = inner
9
+ # Lock around external access
10
+ @access_mutex = Mutex.new
11
+ # Lock around internal polling
12
+ @poll_mutex = Mutex.new
13
+ # Lock around decrementing the operations in progress counter
14
+ # We have two mutexes - one for increment (`@access_mutex`) and one for decrement mutex
15
+ # because they serve different purposes:
16
+ #
17
+ # - `@access_mutex` allows us to lock the execution and make sure that any operation within
18
+ # the `#synchronize` is the only one running and that there are no other running
19
+ # operations.
20
+ # - `@decrement_mutex` ensures, that our decrement operation is thread-safe for any Ruby
21
+ # implementation.
22
+ #
23
+ # We do not use the same mutex, because it could create a deadlock when an already
24
+ # incremented operation cannot decrement because `@access_lock` is now owned by a different
25
+ # thread in a synchronized mode and the synchronized mode is waiting on the decrement.
26
+ @decrement_mutex = Mutex.new
27
+ # counter for operations in progress using inner
28
+ @operations_in_progress = 0
29
+
30
+ if run_polling_thread
31
+ # Start thread to poll client for delivery callbacks,
32
+ # not used in consumer.
33
+ @polling_thread = Thread.new do
34
+ loop do
35
+ @poll_mutex.synchronize do
36
+ Rdkafka::Bindings.rd_kafka_poll(inner, 100)
37
+ end
38
+
39
+ # Exit thread if closing and the poll queue is empty
40
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
41
+ break
42
+ end
43
+ end
44
+ end
45
+
46
+ @polling_thread.abort_on_exception = true
47
+ @polling_thread[:closing] = false
48
+ end
49
+
50
+ @closing = false
51
+ end
52
+
53
+ def with_inner
54
+ if @access_mutex.owned?
55
+ @operations_in_progress += 1
56
+ else
57
+ @access_mutex.synchronize { @operations_in_progress += 1 }
58
+ end
59
+
60
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
61
+ ensure
62
+ @decrement_mutex.synchronize { @operations_in_progress -= 1 }
63
+ end
64
+
65
+ def synchronize(&block)
66
+ @access_mutex.synchronize do
67
+ # Wait for any commands using the inner to finish
68
+ # This can take a while on blocking operations like polling but is essential not to proceed
69
+ # with certain types of operations like resources destruction as it can cause the process
70
+ # to hang or crash
71
+ sleep(0.01) until @operations_in_progress.zero?
72
+
73
+ with_inner(&block)
74
+ end
75
+ end
76
+
77
+ def finalizer
78
+ ->(_) { close }
79
+ end
80
+
81
+ def closed?
82
+ @closing || @inner.nil?
83
+ end
84
+
85
+ def close(object_id=nil)
86
+ return if closed?
87
+
88
+ synchronize do
89
+ # Indicate to the outside world that we are closing
90
+ @closing = true
91
+
92
+ if @polling_thread
93
+ # Indicate to polling thread that we're closing
94
+ @polling_thread[:closing] = true
95
+
96
+ # Wait for the polling thread to finish up,
97
+ # this can be aborted in practice if this
98
+ # code runs from a finalizer.
99
+ @polling_thread.join
100
+ end
101
+
102
+ # Destroy the client after locking both mutexes
103
+ @poll_mutex.lock
104
+
105
+ # This check prevents a race condition, where we would enter the close in two threads
106
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
107
+ # and would continue to run, trying to destroy inner twice
108
+ return unless @inner
109
+
110
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
111
+ @inner = nil
112
+ end
113
+ end
114
+ end
115
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Handle to wait for a delivery report which is returned when
@@ -6,7 +8,8 @@ module Rdkafka
6
8
  layout :pending, :bool,
7
9
  :response, :int,
8
10
  :partition, :int,
9
- :offset, :int64
11
+ :offset, :int64,
12
+ :topic_name, :pointer
10
13
 
11
14
  # @return [String] the name of the operation (e.g. "delivery")
12
15
  def operation_name
@@ -15,7 +18,7 @@ module Rdkafka
15
18
 
16
19
  # @return [DeliveryReport] a report on the delivery of the message
17
20
  def create_result
18
- DeliveryReport.new(self[:partition], self[:offset])
21
+ DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
19
22
  end
20
23
  end
21
24
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Delivery report for a successfully produced message.
@@ -10,15 +12,20 @@ module Rdkafka
10
12
  # @return [Integer]
11
13
  attr_reader :offset
12
14
 
13
- # Error in case happen during produce.
15
+ # The name of the topic this message was produced to.
14
16
  # @return [String]
17
+ attr_reader :topic_name
18
+
19
+ # Error in case happen during produce.
20
+ # @return [Integer]
15
21
  attr_reader :error
16
22
 
17
23
  private
18
24
 
19
- def initialize(partition, offset, error = nil)
25
+ def initialize(partition, offset, topic_name = nil, error = nil)
20
26
  @partition = partition
21
27
  @offset = offset
28
+ @topic_name = topic_name
22
29
  @error = error
23
30
  end
24
31
  end