rdkafka 0.13.0 → 0.15.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (73) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +2 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +58 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +141 -111
  10. data/{LICENSE → MIT-LICENSE} +2 -1
  11. data/README.md +48 -39
  12. data/certs/cert_chain.pem +26 -0
  13. data/docker-compose.yml +18 -15
  14. data/ext/README.md +1 -1
  15. data/ext/Rakefile +1 -1
  16. data/lib/rdkafka/abstract_handle.rb +40 -26
  17. data/lib/rdkafka/admin/acl_binding_result.rb +37 -0
  18. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  19. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  20. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  21. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  22. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  23. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  24. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  25. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  26. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin.rb +449 -7
  29. data/lib/rdkafka/bindings.rb +127 -5
  30. data/lib/rdkafka/callbacks.rb +187 -0
  31. data/lib/rdkafka/config.rb +53 -19
  32. data/lib/rdkafka/consumer/headers.rb +2 -4
  33. data/lib/rdkafka/consumer/topic_partition_list.rb +11 -8
  34. data/lib/rdkafka/consumer.rb +134 -59
  35. data/lib/rdkafka/helpers/time.rb +14 -0
  36. data/lib/rdkafka/metadata.rb +22 -1
  37. data/lib/rdkafka/native_kafka.rb +6 -1
  38. data/lib/rdkafka/producer.rb +87 -9
  39. data/lib/rdkafka/version.rb +3 -3
  40. data/lib/rdkafka.rb +21 -1
  41. data/rdkafka.gemspec +17 -3
  42. data/renovate.json +6 -0
  43. data/spec/rdkafka/abstract_handle_spec.rb +0 -2
  44. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  45. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  46. data/spec/rdkafka/admin/create_topic_handle_spec.rb +0 -2
  47. data/spec/rdkafka/admin/create_topic_report_spec.rb +0 -2
  48. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  49. data/spec/rdkafka/admin/delete_acl_report_spec.rb +71 -0
  50. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +0 -2
  51. data/spec/rdkafka/admin/delete_topic_report_spec.rb +0 -2
  52. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  53. data/spec/rdkafka/admin/describe_acl_report_spec.rb +72 -0
  54. data/spec/rdkafka/admin_spec.rb +205 -2
  55. data/spec/rdkafka/bindings_spec.rb +0 -1
  56. data/spec/rdkafka/callbacks_spec.rb +0 -2
  57. data/spec/rdkafka/config_spec.rb +8 -2
  58. data/spec/rdkafka/consumer/headers_spec.rb +0 -2
  59. data/spec/rdkafka/consumer/message_spec.rb +0 -2
  60. data/spec/rdkafka/consumer/partition_spec.rb +0 -2
  61. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +19 -2
  62. data/spec/rdkafka/consumer_spec.rb +212 -39
  63. data/spec/rdkafka/error_spec.rb +0 -2
  64. data/spec/rdkafka/metadata_spec.rb +2 -3
  65. data/spec/rdkafka/native_kafka_spec.rb +2 -3
  66. data/spec/rdkafka/producer/delivery_handle_spec.rb +0 -2
  67. data/spec/rdkafka/producer/delivery_report_spec.rb +0 -2
  68. data/spec/rdkafka/producer_spec.rb +157 -1
  69. data/spec/spec_helper.rb +3 -1
  70. data.tar.gz.sig +3 -0
  71. metadata +76 -13
  72. metadata.gz.sig +3 -0
  73. data/.semaphore/semaphore.yml +0 -27
@@ -12,6 +12,7 @@ module Rdkafka
12
12
  # `each_slice` to consume batches of messages.
13
13
  class Consumer
14
14
  include Enumerable
15
+ include Helpers::Time
15
16
 
16
17
  # @private
17
18
  def initialize(native_kafka)
@@ -22,6 +23,13 @@ module Rdkafka
22
23
  ->(_) { close }
23
24
  end
24
25
 
26
+ # @return [String] consumer name
27
+ def name
28
+ @name ||= @native_kafka.with_inner do |inner|
29
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
30
+ end
31
+ end
32
+
25
33
  # Close this consumer
26
34
  # @return [nil]
27
35
  def close
@@ -40,13 +48,11 @@ module Rdkafka
40
48
  @native_kafka.closed?
41
49
  end
42
50
 
43
- # Subscribe to one or more topics letting Kafka handle partition assignments.
51
+ # Subscribes to one or more topics letting Kafka handle partition assignments.
44
52
  #
45
53
  # @param topics [Array<String>] One or more topic names
46
- #
47
- # @raise [RdkafkaError] When subscribing fails
48
- #
49
54
  # @return [nil]
55
+ # @raise [RdkafkaError] When subscribing fails
50
56
  def subscribe(*topics)
51
57
  closed_consumer_check(__method__)
52
58
 
@@ -70,9 +76,8 @@ module Rdkafka
70
76
 
71
77
  # Unsubscribe from all subscribed topics.
72
78
  #
73
- # @raise [RdkafkaError] When unsubscribing fails
74
- #
75
79
  # @return [nil]
80
+ # @raise [RdkafkaError] When unsubscribing fails
76
81
  def unsubscribe
77
82
  closed_consumer_check(__method__)
78
83
 
@@ -87,10 +92,8 @@ module Rdkafka
87
92
  # Pause producing or consumption for the provided list of partitions
88
93
  #
89
94
  # @param list [TopicPartitionList] The topic with partitions to pause
90
- #
91
- # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
92
- #
93
95
  # @return [nil]
96
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
94
97
  def pause(list)
95
98
  closed_consumer_check(__method__)
96
99
 
@@ -114,13 +117,11 @@ module Rdkafka
114
117
  end
115
118
  end
116
119
 
117
- # Resume producing consumption for the provided list of partitions
120
+ # Resumes producing consumption for the provided list of partitions
118
121
  #
119
122
  # @param list [TopicPartitionList] The topic with partitions to pause
120
- #
121
- # @raise [RdkafkaError] When resume subscription fails.
122
- #
123
123
  # @return [nil]
124
+ # @raise [RdkafkaError] When resume subscription fails.
124
125
  def resume(list)
125
126
  closed_consumer_check(__method__)
126
127
 
@@ -142,11 +143,10 @@ module Rdkafka
142
143
  end
143
144
  end
144
145
 
145
- # Return the current subscription to topics and partitions
146
- #
147
- # @raise [RdkafkaError] When getting the subscription fails.
146
+ # Returns the current subscription to topics and partitions
148
147
  #
149
148
  # @return [TopicPartitionList]
149
+ # @raise [RdkafkaError] When getting the subscription fails.
150
150
  def subscription
151
151
  closed_consumer_check(__method__)
152
152
 
@@ -171,7 +171,6 @@ module Rdkafka
171
171
  # Atomic assignment of partitions to consume
172
172
  #
173
173
  # @param list [TopicPartitionList] The topic with partitions to assign
174
- #
175
174
  # @raise [RdkafkaError] When assigning fails
176
175
  def assign(list)
177
176
  closed_consumer_check(__method__)
@@ -196,9 +195,8 @@ module Rdkafka
196
195
 
197
196
  # Returns the current partition assignment.
198
197
  #
199
- # @raise [RdkafkaError] When getting the assignment fails.
200
- #
201
198
  # @return [TopicPartitionList]
199
+ # @raise [RdkafkaError] When getting the assignment fails.
202
200
  def assignment
203
201
  closed_consumer_check(__method__)
204
202
 
@@ -223,15 +221,24 @@ module Rdkafka
223
221
  ptr.free unless ptr.nil?
224
222
  end
225
223
 
224
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
225
+ def assignment_lost?
226
+ closed_consumer_check(__method__)
227
+
228
+ @native_kafka.with_inner do |inner|
229
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
230
+ end
231
+ end
232
+
226
233
  # Return the current committed offset per partition for this consumer group.
227
- # The offset field of each requested partition will either be set to stored offset or to -1001 in case there was no stored offset for that partition.
234
+ # The offset field of each requested partition will either be set to stored offset or to -1001
235
+ # in case there was no stored offset for that partition.
228
236
  #
229
- # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
237
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil
238
+ # to use the current subscription.
230
239
  # @param timeout_ms [Integer] The timeout for fetching this information.
231
- #
232
- # @raise [RdkafkaError] When getting the committed positions fails.
233
- #
234
240
  # @return [TopicPartitionList]
241
+ # @raise [RdkafkaError] When getting the committed positions fails.
235
242
  def committed(list=nil, timeout_ms=1200)
236
243
  closed_consumer_check(__method__)
237
244
 
@@ -256,16 +263,42 @@ module Rdkafka
256
263
  end
257
264
  end
258
265
 
266
+ # Return the current positions (offsets) for topics and partitions.
267
+ # The offset field of each requested partition will be set to the offset of the last consumed message + 1, or nil in case there was no previous message.
268
+ #
269
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
270
+ #
271
+ # @return [TopicPartitionList]
272
+ #
273
+ # @raise [RdkafkaError] When getting the positions fails.
274
+ def position(list=nil)
275
+ if list.nil?
276
+ list = assignment
277
+ elsif !list.is_a?(TopicPartitionList)
278
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
279
+ end
280
+
281
+ tpl = list.to_native_tpl
282
+
283
+ response = @native_kafka.with_inner do |inner|
284
+ Rdkafka::Bindings.rd_kafka_position(inner, tpl)
285
+ end
286
+
287
+ if response != 0
288
+ raise Rdkafka::RdkafkaError.new(response)
289
+ end
290
+
291
+ TopicPartitionList.from_native_tpl(tpl)
292
+ end
293
+
259
294
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
260
295
  #
261
296
  # @param topic [String] The topic to query
262
297
  # @param partition [Integer] The partition to query
263
298
  # @param timeout_ms [Integer] The timeout for querying the broker
264
- #
265
- # @raise [RdkafkaError] When querying the broker fails.
266
- #
267
299
  # @return [Integer] The low and high watermark
268
- def query_watermark_offsets(topic, partition, timeout_ms=200)
300
+ # @raise [RdkafkaError] When querying the broker fails.
301
+ def query_watermark_offsets(topic, partition, timeout_ms=1000)
269
302
  closed_consumer_check(__method__)
270
303
 
271
304
  low = FFI::MemoryPointer.new(:int64, 1)
@@ -298,11 +331,10 @@ module Rdkafka
298
331
  #
299
332
  # @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
300
333
  # @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
301
- #
334
+ # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
335
+ # per partition
302
336
  # @raise [RdkafkaError] When querying the broker fails.
303
- #
304
- # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
305
- def lag(topic_partition_list, watermark_timeout_ms=100)
337
+ def lag(topic_partition_list, watermark_timeout_ms=1000)
306
338
  out = {}
307
339
 
308
340
  topic_partition_list.to_h.each do |topic, partitions|
@@ -350,10 +382,8 @@ module Rdkafka
350
382
  # When using this `enable.auto.offset.store` should be set to `false` in the config.
351
383
  #
352
384
  # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
353
- #
354
- # @raise [RdkafkaError] When storing the offset fails
355
- #
356
385
  # @return [nil]
386
+ # @raise [RdkafkaError] When storing the offset fails
357
387
  def store_offset(message)
358
388
  closed_consumer_check(__method__)
359
389
 
@@ -384,10 +414,8 @@ module Rdkafka
384
414
  # message at the given offset.
385
415
  #
386
416
  # @param message [Rdkafka::Consumer::Message] The message to which to seek
387
- #
388
- # @raise [RdkafkaError] When seeking fails
389
- #
390
417
  # @return [nil]
418
+ # @raise [RdkafkaError] When seeking fails
391
419
  def seek(message)
392
420
  closed_consumer_check(__method__)
393
421
 
@@ -415,6 +443,39 @@ module Rdkafka
415
443
  end
416
444
  end
417
445
 
446
+ # Lookup offset for the given partitions by timestamp.
447
+ #
448
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
449
+ #
450
+ # @return [TopicPartitionList]
451
+ #
452
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
453
+ def offsets_for_times(list, timeout_ms = 1000)
454
+ closed_consumer_check(__method__)
455
+
456
+ if !list.is_a?(TopicPartitionList)
457
+ raise TypeError.new("list has to be a TopicPartitionList")
458
+ end
459
+
460
+ tpl = list.to_native_tpl
461
+
462
+ response = @native_kafka.with_inner do |inner|
463
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
464
+ inner,
465
+ tpl,
466
+ timeout_ms # timeout
467
+ )
468
+ end
469
+
470
+ if response != 0
471
+ raise Rdkafka::RdkafkaError.new(response)
472
+ end
473
+
474
+ TopicPartitionList.from_native_tpl(tpl)
475
+ ensure
476
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
477
+ end
478
+
418
479
  # Manually commit the current offsets of this consumer.
419
480
  #
420
481
  # To use this set `enable.auto.commit`to `false` to disable automatic triggering
@@ -426,10 +487,8 @@ module Rdkafka
426
487
  #
427
488
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
428
489
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
429
- #
430
- # @raise [RdkafkaError] When committing fails
431
- #
432
490
  # @return [nil]
491
+ # @raise [RdkafkaError] When committing fails
433
492
  def commit(list=nil, async=false)
434
493
  closed_consumer_check(__method__)
435
494
 
@@ -454,10 +513,8 @@ module Rdkafka
454
513
  # Poll for the next message on one of the subscribed topics
455
514
  #
456
515
  # @param timeout_ms [Integer] Timeout of this poll
457
- #
458
- # @raise [RdkafkaError] When polling fails
459
- #
460
516
  # @return [Message, nil] A message or nil if there was no new message within the timeout
517
+ # @raise [RdkafkaError] When polling fails
461
518
  def poll(timeout_ms)
462
519
  closed_consumer_check(__method__)
463
520
 
@@ -483,18 +540,41 @@ module Rdkafka
483
540
  end
484
541
  end
485
542
 
543
+ # Polls the main rdkafka queue (not the consumer one). Do **NOT** use it if `consumer_poll_set`
544
+ # was set to `true`.
545
+ #
546
+ # Events will cause application-provided callbacks to be called.
547
+ #
548
+ # Events (in the context of the consumer):
549
+ # - error callbacks
550
+ # - stats callbacks
551
+ # - any other callbacks supported by librdkafka that are not part of the consumer_poll, that
552
+ # would have a callback configured and activated.
553
+ #
554
+ # This method needs to be called at regular intervals to serve any queued callbacks waiting to
555
+ # be called. When in use, does **NOT** replace `#poll` but needs to run complementary with it.
556
+ #
557
+ # @param timeout_ms [Integer] poll timeout. If set to 0 will run async, when set to -1 will
558
+ # block until any events available.
559
+ #
560
+ # @note This method technically should be called `#poll` and the current `#poll` should be
561
+ # called `#consumer_poll` though we keep the current naming convention to make it backward
562
+ # compatible.
563
+ def events_poll(timeout_ms = 0)
564
+ @native_kafka.with_inner do |inner|
565
+ Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
566
+ end
567
+ end
568
+
486
569
  # Poll for new messages and yield for each received one. Iteration
487
570
  # will end when the consumer is closed.
488
571
  #
489
- # If `enable.partition.eof` is turned on in the config this will raise an
490
- # error when an eof is reached, so you probably want to disable that when
491
- # using this method of iteration.
492
- #
493
- # @raise [RdkafkaError] When polling fails
572
+ # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
573
+ # reached, so you probably want to disable that when using this method of iteration.
494
574
  #
495
575
  # @yieldparam message [Message] Received message
496
- #
497
576
  # @return [nil]
577
+ # @raise [RdkafkaError] When polling fails
498
578
  def each
499
579
  loop do
500
580
  message = poll(250)
@@ -546,19 +626,18 @@ module Rdkafka
546
626
  # that you may or may not see again.
547
627
  #
548
628
  # @param max_items [Integer] Maximum size of the yielded array of messages
549
- #
550
629
  # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
551
- #
552
630
  # @param timeout_ms [Integer] max time to wait for up to max_items
553
631
  #
554
- # @raise [RdkafkaError] When polling fails
555
- #
556
- # @yield [messages, pending_exception]
557
632
  # @yieldparam messages [Array] An array of received Message
558
633
  # @yieldparam pending_exception [Exception] normally nil, or an exception
634
+ #
635
+ # @yield [messages, pending_exception]
559
636
  # which will be propagated after processing of the partial batch is complete.
560
637
  #
561
638
  # @return [nil]
639
+ #
640
+ # @raise [RdkafkaError] When polling fails
562
641
  def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
563
642
  closed_consumer_check(__method__)
564
643
  slice = []
@@ -595,10 +674,6 @@ module Rdkafka
595
674
  end
596
675
 
597
676
  private
598
- def monotonic_now
599
- # needed because Time.now can go backwards
600
- Process.clock_gettime(Process::CLOCK_MONOTONIC)
601
- end
602
677
 
603
678
  def closed_consumer_check(method)
604
679
  raise Rdkafka::ClosedConsumerError.new(method) if closed?
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Namespace for some small utilities used in multiple components
5
+ module Helpers
6
+ # Time related methods used across Karafka
7
+ module Time
8
+ # @return [Float] current monotonic time in seconds with microsecond precision
9
+ def monotonic_now
10
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ end
12
+ end
13
+ end
14
+ end
@@ -4,7 +4,18 @@ module Rdkafka
4
4
  class Metadata
5
5
  attr_reader :brokers, :topics
6
6
 
7
- def initialize(native_client, topic_name = nil, timeout_ms = 250)
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
8
19
  native_topic = if topic_name
9
20
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
10
21
  end
@@ -22,6 +33,16 @@ module Rdkafka
22
33
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
23
34
 
24
35
  metadata_from_native(ptr.read_pointer)
36
+ rescue ::Rdkafka::RdkafkaError => e
37
+ raise unless RETRIED_ERRORS.include?(e.code)
38
+ raise if attempt > 10
39
+
40
+ backoff_factor = 2**attempt
41
+ timeout = backoff_factor * 0.1
42
+
43
+ sleep(timeout)
44
+
45
+ retry
25
46
  ensure
26
47
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
27
48
  Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
@@ -4,8 +4,9 @@ module Rdkafka
4
4
  # @private
5
5
  # A wrapper around a native kafka that polls and cleanly exits
6
6
  class NativeKafka
7
- def initialize(inner, run_polling_thread:)
7
+ def initialize(inner, run_polling_thread:, opaque:)
8
8
  @inner = inner
9
+ @opaque = opaque
9
10
  # Lock around external access
10
11
  @access_mutex = Mutex.new
11
12
  # Lock around internal polling
@@ -27,6 +28,9 @@ module Rdkafka
27
28
  # counter for operations in progress using inner
28
29
  @operations_in_progress = 0
29
30
 
31
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
32
+ Rdkafka::Bindings.rd_kafka_poll(inner, 0)
33
+
30
34
  if run_polling_thread
31
35
  # Start thread to poll client for delivery callbacks,
32
36
  # not used in consumer.
@@ -109,6 +113,7 @@ module Rdkafka
109
113
 
110
114
  Rdkafka::Bindings.rd_kafka_destroy(@inner)
111
115
  @inner = nil
116
+ @opaque = nil
112
117
  end
113
118
  end
114
119
  end
@@ -1,10 +1,15 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "objspace"
4
-
5
3
  module Rdkafka
6
4
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
7
5
  class Producer
6
+ include Helpers::Time
7
+
8
+ # Cache partitions count for 30 seconds
9
+ PARTITIONS_COUNT_TTL = 30
10
+
11
+ private_constant :PARTITIONS_COUNT_TTL
12
+
8
13
  # @private
9
14
  # Returns the current delivery callback, by default this is nil.
10
15
  #
@@ -24,6 +29,26 @@ module Rdkafka
24
29
 
25
30
  # Makes sure, that native kafka gets closed before it gets GCed by Ruby
26
31
  ObjectSpace.define_finalizer(self, native_kafka.finalizer)
32
+
33
+ @_partitions_count_cache = Hash.new do |cache, topic|
34
+ topic_metadata = nil
35
+
36
+ @native_kafka.with_inner do |inner|
37
+ topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
38
+ end
39
+
40
+ cache[topic] = [
41
+ monotonic_now,
42
+ topic_metadata ? topic_metadata[:partition_count] : nil
43
+ ]
44
+ end
45
+ end
46
+
47
+ # @return [String] producer name
48
+ def name
49
+ @name ||= @native_kafka.with_inner do |inner|
50
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
51
+ end
27
52
  end
28
53
 
29
54
  # Set a callback that will be called every time a message is successfully produced.
@@ -54,25 +79,77 @@ module Rdkafka
54
79
  # in seconds. Call this before closing a producer to ensure delivery of all messages.
55
80
  #
56
81
  # @param timeout_ms [Integer] how long should we wait for flush of all messages
82
+ # @return [Boolean] true if no more data and all was flushed, false in case there are still
83
+ # outgoing messages after the timeout
84
+ #
85
+ # @note We raise an exception for other errors because based on the librdkafka docs, there
86
+ # should be no other errors.
87
+ #
88
+ # @note For `timed_out` we do not raise an error to keep it backwards compatible
57
89
  def flush(timeout_ms=5_000)
58
90
  closed_producer_check(__method__)
59
91
 
92
+ code = nil
93
+
60
94
  @native_kafka.with_inner do |inner|
61
- Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
95
+ code = Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
62
96
  end
97
+
98
+ # Early skip not to build the error message
99
+ return true if code.zero?
100
+
101
+ error = Rdkafka::RdkafkaError.new(code)
102
+
103
+ return false if error.code == :timed_out
104
+
105
+ raise(error)
106
+ end
107
+
108
+ # Purges the outgoing queue and releases all resources.
109
+ #
110
+ # Useful when closing the producer with outgoing messages to unstable clusters or when for
111
+ # any other reasons waiting cannot go on anymore. This purges both the queue and all the
112
+ # inflight requests + updates the delivery handles statuses so they can be materialized into
113
+ # `purge_queue` errors.
114
+ def purge
115
+ closed_producer_check(__method__)
116
+
117
+ code = nil
118
+
119
+ @native_kafka.with_inner do |inner|
120
+ code = Bindings.rd_kafka_purge(
121
+ inner,
122
+ Bindings::RD_KAFKA_PURGE_F_QUEUE | Bindings::RD_KAFKA_PURGE_F_INFLIGHT
123
+ )
124
+ end
125
+
126
+ code.zero? || raise(Rdkafka::RdkafkaError.new(code))
127
+
128
+ # Wait for the purge to affect everything
129
+ sleep(0.001) until flush(100)
130
+
131
+ true
63
132
  end
64
133
 
65
134
  # Partition count for a given topic.
66
- # NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
67
135
  #
68
136
  # @param topic [String] The topic name.
137
+ # @return [Integer] partition count for a given topic
138
+ #
139
+ # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
140
+ # auto-created after returning nil.
69
141
  #
70
- # @return partition count [Integer,nil]
142
+ # @note We cache the partition count for a given topic for given time.
143
+ # This prevents us in case someone uses `partition_key` from querying for the count with
144
+ # each message. Instead we query once every 30 seconds at most
71
145
  def partition_count(topic)
72
146
  closed_producer_check(__method__)
73
- @native_kafka.with_inner do |inner|
74
- Rdkafka::Metadata.new(inner, topic).topics&.first[:partition_count]
147
+
148
+ @_partitions_count_cache.delete_if do |_, cached|
149
+ monotonic_now - cached.first > PARTITIONS_COUNT_TTL
75
150
  end
151
+
152
+ @_partitions_count_cache[topic].last
76
153
  end
77
154
 
78
155
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -88,9 +165,9 @@ module Rdkafka
88
165
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
89
166
  # @param headers [Hash<String,String>] Optional message headers
90
167
  #
91
- # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
92
- #
93
168
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
169
+ #
170
+ # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
94
171
  def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
95
172
  closed_producer_check(__method__)
96
173
 
@@ -193,6 +270,7 @@ module Rdkafka
193
270
  end
194
271
 
195
272
  private
273
+
196
274
  def closed_producer_check(method)
197
275
  raise Rdkafka::ClosedProducerError.new(method) if closed?
198
276
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.13.0"
5
- LIBRDKAFKA_VERSION = "2.0.2"
6
- LIBRDKAFKA_SOURCE_SHA256 = "f321bcb1e015a34114c83cf1aa7b99ee260236aab096b85c003170c90a47ca9d"
4
+ VERSION = "0.15.0"
5
+ LIBRDKAFKA_VERSION = "2.3.0"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
7
7
  end
data/lib/rdkafka.rb CHANGED
@@ -1,13 +1,29 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "rdkafka/version"
3
+ require "logger"
4
+ require "objspace"
5
+ require "ffi"
6
+ require "json"
4
7
 
8
+ require "rdkafka/version"
9
+ require "rdkafka/helpers/time"
5
10
  require "rdkafka/abstract_handle"
6
11
  require "rdkafka/admin"
7
12
  require "rdkafka/admin/create_topic_handle"
8
13
  require "rdkafka/admin/create_topic_report"
14
+ require "rdkafka/admin/delete_groups_handle"
15
+ require "rdkafka/admin/delete_groups_report"
9
16
  require "rdkafka/admin/delete_topic_handle"
10
17
  require "rdkafka/admin/delete_topic_report"
18
+ require "rdkafka/admin/create_partitions_handle"
19
+ require "rdkafka/admin/create_partitions_report"
20
+ require "rdkafka/admin/create_acl_handle"
21
+ require "rdkafka/admin/create_acl_report"
22
+ require "rdkafka/admin/delete_acl_handle"
23
+ require "rdkafka/admin/delete_acl_report"
24
+ require "rdkafka/admin/describe_acl_handle"
25
+ require "rdkafka/admin/describe_acl_report"
26
+ require "rdkafka/admin/acl_binding_result"
11
27
  require "rdkafka/bindings"
12
28
  require "rdkafka/callbacks"
13
29
  require "rdkafka/config"
@@ -22,3 +38,7 @@ require "rdkafka/native_kafka"
22
38
  require "rdkafka/producer"
23
39
  require "rdkafka/producer/delivery_handle"
24
40
  require "rdkafka/producer/delivery_report"
41
+
42
+ # Main Rdkafka namespace of this gem
43
+ module Rdkafka
44
+ end
data/rdkafka.gemspec CHANGED
@@ -4,11 +4,10 @@ require File.expand_path('lib/rdkafka/version', __dir__)
4
4
 
5
5
  Gem::Specification.new do |gem|
6
6
  gem.authors = ['Thijs Cadier']
7
- gem.email = ["thijs@appsignal.com"]
7
+ gem.email = ["contact@karafka.io"]
8
8
  gem.description = "Modern Kafka client library for Ruby based on librdkafka"
9
9
  gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.4+."
10
10
  gem.license = 'MIT'
11
- gem.homepage = 'https://github.com/thijsc/rdkafka-ruby'
12
11
 
13
12
  gem.files = `git ls-files`.split($\)
14
13
  gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
@@ -16,8 +15,13 @@ Gem::Specification.new do |gem|
16
15
  gem.name = 'rdkafka'
17
16
  gem.require_paths = ['lib']
18
17
  gem.version = Rdkafka::VERSION
19
- gem.required_ruby_version = '>= 2.6'
18
+ gem.required_ruby_version = '>= 2.7'
20
19
  gem.extensions = %w(ext/Rakefile)
20
+ gem.cert_chain = %w[certs/cert_chain.pem]
21
+
22
+ if $PROGRAM_NAME.end_with?('gem')
23
+ gem.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
24
+ end
21
25
 
22
26
  gem.add_dependency 'ffi', '~> 1.15'
23
27
  gem.add_dependency 'mini_portile2', '~> 2.6'
@@ -29,4 +33,14 @@ Gem::Specification.new do |gem|
29
33
  gem.add_development_dependency 'simplecov'
30
34
  gem.add_development_dependency 'guard'
31
35
  gem.add_development_dependency 'guard-rspec'
36
+
37
+ gem.metadata = {
38
+ 'funding_uri' => 'https://karafka.io/#become-pro',
39
+ 'homepage_uri' => 'https://karafka.io',
40
+ 'changelog_uri' => 'https://github.com/karafka/rdkafka-ruby/blob/main/CHANGELOG.md',
41
+ 'bug_tracker_uri' => 'https://github.com/karafka/rdkafka-ruby/issues',
42
+ 'source_code_uri' => 'https://github.com/karafka/rdkafka-ruby',
43
+ 'documentation_uri' => 'https://github.com/karafka/rdkafka-ruby/blob/main/README.md',
44
+ 'rubygems_mfa_required' => 'true'
45
+ }
32
46
  end
data/renovate.json ADDED
@@ -0,0 +1,6 @@
1
+ {
2
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3
+ "extends": [
4
+ "config:base"
5
+ ]
6
+ }