rdkafka 0.12.0 → 0.13.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +7 -3
  3. data/CHANGELOG.md +18 -0
  4. data/Gemfile +2 -0
  5. data/README.md +26 -0
  6. data/Rakefile +2 -0
  7. data/ext/Rakefile +2 -0
  8. data/lib/rdkafka/abstract_handle.rb +2 -0
  9. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  10. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  11. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  12. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  13. data/lib/rdkafka/admin.rb +48 -31
  14. data/lib/rdkafka/bindings.rb +50 -37
  15. data/lib/rdkafka/callbacks.rb +7 -1
  16. data/lib/rdkafka/config.rb +13 -10
  17. data/lib/rdkafka/consumer/headers.rb +24 -7
  18. data/lib/rdkafka/consumer/message.rb +3 -1
  19. data/lib/rdkafka/consumer/partition.rb +2 -0
  20. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  21. data/lib/rdkafka/consumer.rb +86 -44
  22. data/lib/rdkafka/error.rb +15 -0
  23. data/lib/rdkafka/metadata.rb +4 -2
  24. data/lib/rdkafka/native_kafka.rb +115 -0
  25. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  26. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  27. data/lib/rdkafka/producer.rb +35 -13
  28. data/lib/rdkafka/version.rb +5 -3
  29. data/lib/rdkafka.rb +3 -1
  30. data/rdkafka.gemspec +2 -0
  31. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  32. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  34. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  36. data/spec/rdkafka/admin_spec.rb +4 -3
  37. data/spec/rdkafka/bindings_spec.rb +2 -0
  38. data/spec/rdkafka/callbacks_spec.rb +2 -0
  39. data/spec/rdkafka/config_spec.rb +17 -2
  40. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  41. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  42. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  44. data/spec/rdkafka/consumer_spec.rb +120 -22
  45. data/spec/rdkafka/error_spec.rb +2 -0
  46. data/spec/rdkafka/metadata_spec.rb +2 -0
  47. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -34
  48. data/spec/rdkafka/producer/delivery_handle_spec.rb +5 -0
  49. data/spec/rdkafka/producer/delivery_report_spec.rb +8 -2
  50. data/spec/rdkafka/producer_spec.rb +51 -19
  51. data/spec/spec_helper.rb +17 -1
  52. metadata +12 -12
  53. data/bin/console +0 -11
  54. data/lib/rdkafka/producer/client.rb +0 -47
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A message that was consumed from a topic.
@@ -18,7 +20,7 @@ module Rdkafka
18
20
  # @return [String, nil]
19
21
  attr_reader :key
20
22
 
21
- # This message's offset in it's partition
23
+ # This message's offset in its partition
22
24
  # @return [Integer]
23
25
  attr_reader :offset
24
26
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # Information about a partition, used in {TopicPartitionList}.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A list of topics with their partition information
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
3
5
  # brokers automatically assign partitions and load balance partitions over consumers that
@@ -14,18 +16,28 @@ module Rdkafka
14
16
  # @private
15
17
  def initialize(native_kafka)
16
18
  @native_kafka = native_kafka
17
- @closing = false
19
+ end
20
+
21
+ def finalizer
22
+ ->(_) { close }
18
23
  end
19
24
 
20
25
  # Close this consumer
21
26
  # @return [nil]
22
27
  def close
23
- return unless @native_kafka
28
+ return if closed?
29
+ ObjectSpace.undefine_finalizer(self)
30
+
31
+ @native_kafka.synchronize do |inner|
32
+ Rdkafka::Bindings.rd_kafka_consumer_close(inner)
33
+ end
24
34
 
25
- @closing = true
26
- Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
- @native_kafka = nil
35
+ @native_kafka.close
36
+ end
37
+
38
+ # Whether this consumer has closed
39
+ def closed?
40
+ @native_kafka.closed?
29
41
  end
30
42
 
31
43
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -46,7 +58,9 @@ module Rdkafka
46
58
  end
47
59
 
48
60
  # Subscribe to topic partition list and check this was successful
49
- response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
61
+ response = @native_kafka.with_inner do |inner|
62
+ Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
63
+ end
50
64
  if response != 0
51
65
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
52
66
  end
@@ -62,7 +76,9 @@ module Rdkafka
62
76
  def unsubscribe
63
77
  closed_consumer_check(__method__)
64
78
 
65
- response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
79
+ response = @native_kafka.with_inner do |inner|
80
+ Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
81
+ end
66
82
  if response != 0
67
83
  raise Rdkafka::RdkafkaError.new(response)
68
84
  end
@@ -85,7 +101,9 @@ module Rdkafka
85
101
  tpl = list.to_native_tpl
86
102
 
87
103
  begin
88
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
104
+ response = @native_kafka.with_inner do |inner|
105
+ Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
106
+ end
89
107
 
90
108
  if response != 0
91
109
  list = TopicPartitionList.from_native_tpl(tpl)
@@ -113,7 +131,9 @@ module Rdkafka
113
131
  tpl = list.to_native_tpl
114
132
 
115
133
  begin
116
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
134
+ response = @native_kafka.with_inner do |inner|
135
+ Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
136
+ end
117
137
  if response != 0
118
138
  raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
139
  end
@@ -131,7 +151,9 @@ module Rdkafka
131
151
  closed_consumer_check(__method__)
132
152
 
133
153
  ptr = FFI::MemoryPointer.new(:pointer)
134
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
154
+ response = @native_kafka.with_inner do |inner|
155
+ Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
156
+ end
135
157
 
136
158
  if response != 0
137
159
  raise Rdkafka::RdkafkaError.new(response)
@@ -161,7 +183,9 @@ module Rdkafka
161
183
  tpl = list.to_native_tpl
162
184
 
163
185
  begin
164
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
186
+ response = @native_kafka.with_inner do |inner|
187
+ Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
188
+ end
165
189
  if response != 0
166
190
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
191
  end
@@ -179,7 +203,9 @@ module Rdkafka
179
203
  closed_consumer_check(__method__)
180
204
 
181
205
  ptr = FFI::MemoryPointer.new(:pointer)
182
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
206
+ response = @native_kafka.with_inner do |inner|
207
+ Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
208
+ end
183
209
  if response != 0
184
210
  raise Rdkafka::RdkafkaError.new(response)
185
211
  end
@@ -218,7 +244,9 @@ module Rdkafka
218
244
  tpl = list.to_native_tpl
219
245
 
220
246
  begin
221
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
247
+ response = @native_kafka.with_inner do |inner|
248
+ Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
249
+ end
222
250
  if response != 0
223
251
  raise Rdkafka::RdkafkaError.new(response)
224
252
  end
@@ -243,14 +271,16 @@ module Rdkafka
243
271
  low = FFI::MemoryPointer.new(:int64, 1)
244
272
  high = FFI::MemoryPointer.new(:int64, 1)
245
273
 
246
- response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
247
- @native_kafka,
248
- topic,
249
- partition,
250
- low,
251
- high,
252
- timeout_ms,
253
- )
274
+ response = @native_kafka.with_inner do |inner|
275
+ Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
276
+ inner,
277
+ topic,
278
+ partition,
279
+ low,
280
+ high,
281
+ timeout_ms,
282
+ )
283
+ end
254
284
  if response != 0
255
285
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
256
286
  end
@@ -298,7 +328,9 @@ module Rdkafka
298
328
  # @return [String, nil]
299
329
  def cluster_id
300
330
  closed_consumer_check(__method__)
301
- Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
331
+ @native_kafka.with_inner do |inner|
332
+ Rdkafka::Bindings.rd_kafka_clusterid(inner)
333
+ end
302
334
  end
303
335
 
304
336
  # Returns this client's broker-assigned group member id
@@ -308,7 +340,9 @@ module Rdkafka
308
340
  # @return [String, nil]
309
341
  def member_id
310
342
  closed_consumer_check(__method__)
311
- Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
343
+ @native_kafka.with_inner do |inner|
344
+ Rdkafka::Bindings.rd_kafka_memberid(inner)
345
+ end
312
346
  end
313
347
 
314
348
  # Store offset of a message to be used in the next commit of this consumer
@@ -325,11 +359,13 @@ module Rdkafka
325
359
 
326
360
  # rd_kafka_offset_store is one of the few calls that does not support
327
361
  # a string as the topic, so create a native topic for it.
328
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
329
- @native_kafka,
330
- message.topic,
331
- nil
332
- )
362
+ native_topic = @native_kafka.with_inner do |inner|
363
+ Rdkafka::Bindings.rd_kafka_topic_new(
364
+ inner,
365
+ message.topic,
366
+ nil
367
+ )
368
+ end
333
369
  response = Rdkafka::Bindings.rd_kafka_offset_store(
334
370
  native_topic,
335
371
  message.partition,
@@ -357,11 +393,13 @@ module Rdkafka
357
393
 
358
394
  # rd_kafka_offset_store is one of the few calls that does not support
359
395
  # a string as the topic, so create a native topic for it.
360
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
361
- @native_kafka,
362
- message.topic,
363
- nil
364
- )
396
+ native_topic = @native_kafka.with_inner do |inner|
397
+ Rdkafka::Bindings.rd_kafka_topic_new(
398
+ inner,
399
+ message.topic,
400
+ nil
401
+ )
402
+ end
365
403
  response = Rdkafka::Bindings.rd_kafka_seek(
366
404
  native_topic,
367
405
  message.partition,
@@ -402,7 +440,9 @@ module Rdkafka
402
440
  tpl = list ? list.to_native_tpl : nil
403
441
 
404
442
  begin
405
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
443
+ response = @native_kafka.with_inner do |inner|
444
+ Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
445
+ end
406
446
  if response != 0
407
447
  raise Rdkafka::RdkafkaError.new(response)
408
448
  end
@@ -421,7 +461,9 @@ module Rdkafka
421
461
  def poll(timeout_ms)
422
462
  closed_consumer_check(__method__)
423
463
 
424
- message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
464
+ message_ptr = @native_kafka.with_inner do |inner|
465
+ Rdkafka::Bindings.rd_kafka_consumer_poll(inner, timeout_ms)
466
+ end
425
467
  if message_ptr.null?
426
468
  nil
427
469
  else
@@ -436,7 +478,7 @@ module Rdkafka
436
478
  end
437
479
  ensure
438
480
  # Clean up rdkafka message if there is one
439
- if !message_ptr.nil? && !message_ptr.null?
481
+ if message_ptr && !message_ptr.null?
440
482
  Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
441
483
  end
442
484
  end
@@ -459,7 +501,7 @@ module Rdkafka
459
501
  if message
460
502
  yield(message)
461
503
  else
462
- if @closing
504
+ if closed?
463
505
  break
464
506
  else
465
507
  next
@@ -468,10 +510,6 @@ module Rdkafka
468
510
  end
469
511
  end
470
512
 
471
- def closed_consumer_check(method)
472
- raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
- end
474
-
475
513
  # Poll for new messages and yield them in batches that may contain
476
514
  # messages from more than one partition.
477
515
  #
@@ -527,7 +565,7 @@ module Rdkafka
527
565
  bytes = 0
528
566
  end_time = monotonic_now + timeout_ms / 1000.0
529
567
  loop do
530
- break if @closing
568
+ break if closed?
531
569
  max_wait = end_time - monotonic_now
532
570
  max_wait_ms = if max_wait <= 0
533
571
  0 # should not block, but may retrieve a message
@@ -545,7 +583,7 @@ module Rdkafka
545
583
  end
546
584
  if message
547
585
  slice << message
548
- bytes += message.payload.bytesize
586
+ bytes += message.payload.bytesize if message.payload
549
587
  end
550
588
  if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
589
  yield slice.dup, nil
@@ -561,5 +599,9 @@ module Rdkafka
561
599
  # needed because Time.now can go backwards
562
600
  Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
601
  end
602
+
603
+ def closed_consumer_check(method)
604
+ raise Rdkafka::ClosedConsumerError.new(method) if closed?
605
+ end
564
606
  end
565
607
  end
data/lib/rdkafka/error.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # Base error class.
3
5
  class BaseError < RuntimeError; end
@@ -83,4 +85,17 @@ module Rdkafka
83
85
  super("Illegal call to #{method.to_s} on a closed producer")
84
86
  end
85
87
  end
88
+
89
+ # Error class for public consumer method calls on a closed admin.
90
+ class ClosedAdminError < BaseError
91
+ def initialize(method)
92
+ super("Illegal call to #{method.to_s} on a closed admin")
93
+ end
94
+ end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
86
101
  end
@@ -1,8 +1,10 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Metadata
3
5
  attr_reader :brokers, :topics
4
6
 
5
- def initialize(native_client, topic_name = nil)
7
+ def initialize(native_client, topic_name = nil, timeout_ms = 250)
6
8
  native_topic = if topic_name
7
9
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
10
  end
@@ -14,7 +16,7 @@ module Rdkafka
14
16
  topic_flag = topic_name.nil? ? 1 : 0
15
17
 
16
18
  # Retrieve the Metadata
17
- result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
19
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
18
20
 
19
21
  # Error Handling
20
22
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
@@ -0,0 +1,115 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner, run_polling_thread:)
8
+ @inner = inner
9
+ # Lock around external access
10
+ @access_mutex = Mutex.new
11
+ # Lock around internal polling
12
+ @poll_mutex = Mutex.new
13
+ # Lock around decrementing the operations in progress counter
14
+ # We have two mutexes - one for increment (`@access_mutex`) and one for decrement mutex
15
+ # because they serve different purposes:
16
+ #
17
+ # - `@access_mutex` allows us to lock the execution and make sure that any operation within
18
+ # the `#synchronize` is the only one running and that there are no other running
19
+ # operations.
20
+ # - `@decrement_mutex` ensures, that our decrement operation is thread-safe for any Ruby
21
+ # implementation.
22
+ #
23
+ # We do not use the same mutex, because it could create a deadlock when an already
24
+ # incremented operation cannot decrement because `@access_lock` is now owned by a different
25
+ # thread in a synchronized mode and the synchronized mode is waiting on the decrement.
26
+ @decrement_mutex = Mutex.new
27
+ # counter for operations in progress using inner
28
+ @operations_in_progress = 0
29
+
30
+ if run_polling_thread
31
+ # Start thread to poll client for delivery callbacks,
32
+ # not used in consumer.
33
+ @polling_thread = Thread.new do
34
+ loop do
35
+ @poll_mutex.synchronize do
36
+ Rdkafka::Bindings.rd_kafka_poll(inner, 100)
37
+ end
38
+
39
+ # Exit thread if closing and the poll queue is empty
40
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
41
+ break
42
+ end
43
+ end
44
+ end
45
+
46
+ @polling_thread.abort_on_exception = true
47
+ @polling_thread[:closing] = false
48
+ end
49
+
50
+ @closing = false
51
+ end
52
+
53
+ def with_inner
54
+ if @access_mutex.owned?
55
+ @operations_in_progress += 1
56
+ else
57
+ @access_mutex.synchronize { @operations_in_progress += 1 }
58
+ end
59
+
60
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
61
+ ensure
62
+ @decrement_mutex.synchronize { @operations_in_progress -= 1 }
63
+ end
64
+
65
+ def synchronize(&block)
66
+ @access_mutex.synchronize do
67
+ # Wait for any commands using the inner to finish
68
+ # This can take a while on blocking operations like polling but is essential not to proceed
69
+ # with certain types of operations like resources destruction as it can cause the process
70
+ # to hang or crash
71
+ sleep(0.01) until @operations_in_progress.zero?
72
+
73
+ with_inner(&block)
74
+ end
75
+ end
76
+
77
+ def finalizer
78
+ ->(_) { close }
79
+ end
80
+
81
+ def closed?
82
+ @closing || @inner.nil?
83
+ end
84
+
85
+ def close(object_id=nil)
86
+ return if closed?
87
+
88
+ synchronize do
89
+ # Indicate to the outside world that we are closing
90
+ @closing = true
91
+
92
+ if @polling_thread
93
+ # Indicate to polling thread that we're closing
94
+ @polling_thread[:closing] = true
95
+
96
+ # Wait for the polling thread to finish up,
97
+ # this can be aborted in practice if this
98
+ # code runs from a finalizer.
99
+ @polling_thread.join
100
+ end
101
+
102
+ # Destroy the client after locking both mutexes
103
+ @poll_mutex.lock
104
+
105
+ # This check prevents a race condition, where we would enter the close in two threads
106
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
107
+ # and would continue to run, trying to destroy inner twice
108
+ return unless @inner
109
+
110
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
111
+ @inner = nil
112
+ end
113
+ end
114
+ end
115
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Handle to wait for a delivery report which is returned when
@@ -6,7 +8,8 @@ module Rdkafka
6
8
  layout :pending, :bool,
7
9
  :response, :int,
8
10
  :partition, :int,
9
- :offset, :int64
11
+ :offset, :int64,
12
+ :topic_name, :pointer
10
13
 
11
14
  # @return [String] the name of the operation (e.g. "delivery")
12
15
  def operation_name
@@ -15,7 +18,7 @@ module Rdkafka
15
18
 
16
19
  # @return [DeliveryReport] a report on the delivery of the message
17
20
  def create_result
18
- DeliveryReport.new(self[:partition], self[:offset])
21
+ DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
19
22
  end
20
23
  end
21
24
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Delivery report for a successfully produced message.
@@ -10,15 +12,20 @@ module Rdkafka
10
12
  # @return [Integer]
11
13
  attr_reader :offset
12
14
 
13
- # Error in case happen during produce.
15
+ # The name of the topic this message was produced to.
14
16
  # @return [String]
17
+ attr_reader :topic_name
18
+
19
+ # Error in case happen during produce.
20
+ # @return [Integer]
15
21
  attr_reader :error
16
22
 
17
23
  private
18
24
 
19
- def initialize(partition, offset, error = nil)
25
+ def initialize(partition, offset, topic_name = nil, error = nil)
20
26
  @partition = partition
21
27
  @offset = offset
28
+ @topic_name = topic_name
22
29
  @error = error
23
30
  end
24
31
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "objspace"
2
4
 
3
5
  module Rdkafka
@@ -16,12 +18,12 @@ module Rdkafka
16
18
  attr_reader :delivery_callback_arity
17
19
 
18
20
  # @private
19
- def initialize(client, partitioner_name)
20
- @client = client
21
+ def initialize(native_kafka, partitioner_name)
22
+ @native_kafka = native_kafka
21
23
  @partitioner_name = partitioner_name || "consistent_random"
22
24
 
23
- # Makes sure, that the producer gets closed before it gets GCed by Ruby
24
- ObjectSpace.define_finalizer(self, client.finalizer)
25
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
26
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
25
27
  end
26
28
 
27
29
  # Set a callback that will be called every time a message is successfully produced.
@@ -38,9 +40,26 @@ module Rdkafka
38
40
 
39
41
  # Close this producer and wait for the internal poll queue to empty.
40
42
  def close
43
+ return if closed?
41
44
  ObjectSpace.undefine_finalizer(self)
45
+ @native_kafka.close
46
+ end
47
+
48
+ # Whether this producer has closed
49
+ def closed?
50
+ @native_kafka.closed?
51
+ end
52
+
53
+ # Wait until all outstanding producer requests are completed, with the given timeout
54
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
55
+ #
56
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
57
+ def flush(timeout_ms=5_000)
58
+ closed_producer_check(__method__)
42
59
 
43
- @client.close
60
+ @native_kafka.with_inner do |inner|
61
+ Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
62
+ end
44
63
  end
45
64
 
46
65
  # Partition count for a given topic.
@@ -49,10 +68,11 @@ module Rdkafka
49
68
  # @param topic [String] The topic name.
50
69
  #
51
70
  # @return partition count [Integer,nil]
52
- #
53
71
  def partition_count(topic)
54
72
  closed_producer_check(__method__)
55
- Rdkafka::Metadata.new(@client.native, topic).topics&.first[:partition_count]
73
+ @native_kafka.with_inner do |inner|
74
+ Rdkafka::Metadata.new(inner, topic).topics&.first[:partition_count]
75
+ end
56
76
  end
57
77
 
58
78
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -143,10 +163,12 @@ module Rdkafka
143
163
  args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
144
164
 
145
165
  # Produce the message
146
- response = Rdkafka::Bindings.rd_kafka_producev(
147
- @client.native,
148
- *args
149
- )
166
+ response = @native_kafka.with_inner do |inner|
167
+ Rdkafka::Bindings.rd_kafka_producev(
168
+ inner,
169
+ *args
170
+ )
171
+ end
150
172
 
151
173
  # Raise error if the produce call was not successful
152
174
  if response != 0
@@ -157,7 +179,6 @@ module Rdkafka
157
179
  delivery_handle
158
180
  end
159
181
 
160
- # @private
161
182
  def call_delivery_callback(delivery_report, delivery_handle)
162
183
  return unless @delivery_callback
163
184
 
@@ -171,8 +192,9 @@ module Rdkafka
171
192
  callback.method(:call).arity
172
193
  end
173
194
 
195
+ private
174
196
  def closed_producer_check(method)
175
- raise Rdkafka::ClosedProducerError.new(method) if @client.closed?
197
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
176
198
  end
177
199
  end
178
200
  end
@@ -1,5 +1,7 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
- VERSION = "0.12.0"
3
- LIBRDKAFKA_VERSION = "1.9.0"
4
- LIBRDKAFKA_SOURCE_SHA256 = "59b6088b69ca6cf278c3f9de5cd6b7f3fd604212cd1c59870bc531c54147e889"
4
+ VERSION = "0.13.0"
5
+ LIBRDKAFKA_VERSION = "2.0.2"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "f321bcb1e015a34114c83cf1aa7b99ee260236aab096b85c003170c90a47ca9d"
5
7
  end
data/lib/rdkafka.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "rdkafka/version"
2
4
 
3
5
  require "rdkafka/abstract_handle"
@@ -16,7 +18,7 @@ require "rdkafka/consumer/partition"
16
18
  require "rdkafka/consumer/topic_partition_list"
17
19
  require "rdkafka/error"
18
20
  require "rdkafka/metadata"
21
+ require "rdkafka/native_kafka"
19
22
  require "rdkafka/producer"
20
- require "rdkafka/producer/client"
21
23
  require "rdkafka/producer/delivery_handle"
22
24
  require "rdkafka/producer/delivery_report"
data/rdkafka.gemspec CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('lib/rdkafka/version', __dir__)
2
4
 
3
5
  Gem::Specification.new do |gem|
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::AbstractHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::CreateTopicHandle do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "spec_helper"
2
4
 
3
5
  describe Rdkafka::Admin::CreateTopicReport do