rdkafka 0.12.0 → 0.15.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +57 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +155 -93
  10. data/Gemfile +2 -0
  11. data/{LICENSE → MIT-LICENSE} +2 -1
  12. data/README.md +76 -29
  13. data/Rakefile +2 -0
  14. data/certs/cert_chain.pem +26 -0
  15. data/docker-compose.yml +18 -15
  16. data/ext/README.md +1 -1
  17. data/ext/Rakefile +46 -27
  18. data/lib/rdkafka/abstract_handle.rb +41 -25
  19. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  20. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  21. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  22. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  23. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  24. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  25. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  26. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  29. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  30. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  31. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  32. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  33. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  34. data/lib/rdkafka/admin.rb +494 -35
  35. data/lib/rdkafka/bindings.rb +180 -41
  36. data/lib/rdkafka/callbacks.rb +202 -1
  37. data/lib/rdkafka/config.rb +62 -25
  38. data/lib/rdkafka/consumer/headers.rb +24 -9
  39. data/lib/rdkafka/consumer/message.rb +3 -1
  40. data/lib/rdkafka/consumer/partition.rb +2 -0
  41. data/lib/rdkafka/consumer/topic_partition_list.rb +13 -8
  42. data/lib/rdkafka/consumer.rb +243 -111
  43. data/lib/rdkafka/error.rb +15 -0
  44. data/lib/rdkafka/helpers/time.rb +14 -0
  45. data/lib/rdkafka/metadata.rb +25 -2
  46. data/lib/rdkafka/native_kafka.rb +120 -0
  47. data/lib/rdkafka/producer/delivery_handle.rb +16 -2
  48. data/lib/rdkafka/producer/delivery_report.rb +22 -2
  49. data/lib/rdkafka/producer.rb +151 -21
  50. data/lib/rdkafka/version.rb +5 -3
  51. data/lib/rdkafka.rb +24 -2
  52. data/rdkafka.gemspec +21 -5
  53. data/renovate.json +6 -0
  54. data/spec/rdkafka/abstract_handle_spec.rb +1 -1
  55. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  56. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  57. data/spec/rdkafka/admin/create_topic_handle_spec.rb +1 -1
  58. data/spec/rdkafka/admin/create_topic_report_spec.rb +1 -1
  59. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  60. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  61. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +1 -1
  62. data/spec/rdkafka/admin/delete_topic_report_spec.rb +1 -1
  63. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  64. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  65. data/spec/rdkafka/admin_spec.rb +209 -5
  66. data/spec/rdkafka/bindings_spec.rb +2 -1
  67. data/spec/rdkafka/callbacks_spec.rb +1 -1
  68. data/spec/rdkafka/config_spec.rb +24 -3
  69. data/spec/rdkafka/consumer/headers_spec.rb +60 -0
  70. data/spec/rdkafka/consumer/message_spec.rb +1 -1
  71. data/spec/rdkafka/consumer/partition_spec.rb +1 -1
  72. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +20 -1
  73. data/spec/rdkafka/consumer_spec.rb +352 -61
  74. data/spec/rdkafka/error_spec.rb +1 -1
  75. data/spec/rdkafka/metadata_spec.rb +4 -3
  76. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -35
  77. data/spec/rdkafka/producer/delivery_handle_spec.rb +4 -1
  78. data/spec/rdkafka/producer/delivery_report_spec.rb +11 -3
  79. data/spec/rdkafka/producer_spec.rb +234 -22
  80. data/spec/spec_helper.rb +20 -2
  81. data.tar.gz.sig +0 -0
  82. metadata +81 -17
  83. metadata.gz.sig +0 -0
  84. data/.semaphore/semaphore.yml +0 -23
  85. data/bin/console +0 -11
  86. data/lib/rdkafka/producer/client.rb +0 -47
@@ -1,8 +1,21 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Metadata
3
5
  attr_reader :brokers, :topics
4
6
 
5
- def initialize(native_client, topic_name = nil)
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
6
19
  native_topic = if topic_name
7
20
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
21
  end
@@ -14,12 +27,22 @@ module Rdkafka
14
27
  topic_flag = topic_name.nil? ? 1 : 0
15
28
 
16
29
  # Retrieve the Metadata
17
- result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
30
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
18
31
 
19
32
  # Error Handling
20
33
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
21
34
 
22
35
  metadata_from_native(ptr.read_pointer)
36
+ rescue ::Rdkafka::RdkafkaError => e
37
+ raise unless RETRIED_ERRORS.include?(e.code)
38
+ raise if attempt > 10
39
+
40
+ backoff_factor = 2**attempt
41
+ timeout = backoff_factor * 0.1
42
+
43
+ sleep(timeout)
44
+
45
+ retry
23
46
  ensure
24
47
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
25
48
  Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
@@ -0,0 +1,120 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner, run_polling_thread:, opaque:)
8
+ @inner = inner
9
+ @opaque = opaque
10
+ # Lock around external access
11
+ @access_mutex = Mutex.new
12
+ # Lock around internal polling
13
+ @poll_mutex = Mutex.new
14
+ # Lock around decrementing the operations in progress counter
15
+ # We have two mutexes - one for increment (`@access_mutex`) and one for decrement mutex
16
+ # because they serve different purposes:
17
+ #
18
+ # - `@access_mutex` allows us to lock the execution and make sure that any operation within
19
+ # the `#synchronize` is the only one running and that there are no other running
20
+ # operations.
21
+ # - `@decrement_mutex` ensures, that our decrement operation is thread-safe for any Ruby
22
+ # implementation.
23
+ #
24
+ # We do not use the same mutex, because it could create a deadlock when an already
25
+ # incremented operation cannot decrement because `@access_lock` is now owned by a different
26
+ # thread in a synchronized mode and the synchronized mode is waiting on the decrement.
27
+ @decrement_mutex = Mutex.new
28
+ # counter for operations in progress using inner
29
+ @operations_in_progress = 0
30
+
31
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
32
+ Rdkafka::Bindings.rd_kafka_poll(inner, 0)
33
+
34
+ if run_polling_thread
35
+ # Start thread to poll client for delivery callbacks,
36
+ # not used in consumer.
37
+ @polling_thread = Thread.new do
38
+ loop do
39
+ @poll_mutex.synchronize do
40
+ Rdkafka::Bindings.rd_kafka_poll(inner, 100)
41
+ end
42
+
43
+ # Exit thread if closing and the poll queue is empty
44
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
45
+ break
46
+ end
47
+ end
48
+ end
49
+
50
+ @polling_thread.abort_on_exception = true
51
+ @polling_thread[:closing] = false
52
+ end
53
+
54
+ @closing = false
55
+ end
56
+
57
+ def with_inner
58
+ if @access_mutex.owned?
59
+ @operations_in_progress += 1
60
+ else
61
+ @access_mutex.synchronize { @operations_in_progress += 1 }
62
+ end
63
+
64
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
65
+ ensure
66
+ @decrement_mutex.synchronize { @operations_in_progress -= 1 }
67
+ end
68
+
69
+ def synchronize(&block)
70
+ @access_mutex.synchronize do
71
+ # Wait for any commands using the inner to finish
72
+ # This can take a while on blocking operations like polling but is essential not to proceed
73
+ # with certain types of operations like resources destruction as it can cause the process
74
+ # to hang or crash
75
+ sleep(0.01) until @operations_in_progress.zero?
76
+
77
+ with_inner(&block)
78
+ end
79
+ end
80
+
81
+ def finalizer
82
+ ->(_) { close }
83
+ end
84
+
85
+ def closed?
86
+ @closing || @inner.nil?
87
+ end
88
+
89
+ def close(object_id=nil)
90
+ return if closed?
91
+
92
+ synchronize do
93
+ # Indicate to the outside world that we are closing
94
+ @closing = true
95
+
96
+ if @polling_thread
97
+ # Indicate to polling thread that we're closing
98
+ @polling_thread[:closing] = true
99
+
100
+ # Wait for the polling thread to finish up,
101
+ # this can be aborted in practice if this
102
+ # code runs from a finalizer.
103
+ @polling_thread.join
104
+ end
105
+
106
+ # Destroy the client after locking both mutexes
107
+ @poll_mutex.lock
108
+
109
+ # This check prevents a race condition, where we would enter the close in two threads
110
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
111
+ # and would continue to run, trying to destroy inner twice
112
+ return unless @inner
113
+
114
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
115
+ @inner = nil
116
+ @opaque = nil
117
+ end
118
+ end
119
+ end
120
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Handle to wait for a delivery report which is returned when
@@ -6,7 +8,11 @@ module Rdkafka
6
8
  layout :pending, :bool,
7
9
  :response, :int,
8
10
  :partition, :int,
9
- :offset, :int64
11
+ :offset, :int64,
12
+ :topic_name, :pointer
13
+
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
10
16
 
11
17
  # @return [String] the name of the operation (e.g. "delivery")
12
18
  def operation_name
@@ -15,7 +21,15 @@ module Rdkafka
15
21
 
16
22
  # @return [DeliveryReport] a report on the delivery of the message
17
23
  def create_result
18
- DeliveryReport.new(self[:partition], self[:offset])
24
+ DeliveryReport.new(
25
+ self[:partition],
26
+ self[:offset],
27
+ # For part of errors, we will not get a topic name reference and in cases like this
28
+ # we should not return it
29
+ self[:topic_name].null? ? nil : self[:topic_name].read_string,
30
+ self[:response] != 0 ? RdkafkaError.new(self[:response]) : nil,
31
+ label
32
+ )
19
33
  end
20
34
  end
21
35
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Delivery report for a successfully produced message.
@@ -10,16 +12,34 @@ module Rdkafka
10
12
  # @return [Integer]
11
13
  attr_reader :offset
12
14
 
15
+ # The name of the topic this message was produced to or nil in case of reports with errors
16
+ # where topic was not reached.
17
+ #
18
+ # @return [String, nil]
19
+ attr_reader :topic_name
20
+
13
21
  # Error in case happen during produce.
14
- # @return [String]
22
+ # @return [Integer]
15
23
  attr_reader :error
16
24
 
25
+ # @return [Object, nil] label set during message production or nil by default
26
+ attr_reader :label
27
+
28
+ # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
29
+ # where the topic name is under `#topic` method. That way we have a consistent name that
30
+ # is present in both places
31
+ #
32
+ # We do not remove the original `#topic_name` because of backwards compatibility
33
+ alias topic topic_name
34
+
17
35
  private
18
36
 
19
- def initialize(partition, offset, error = nil)
37
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
20
38
  @partition = partition
21
39
  @offset = offset
40
+ @topic_name = topic_name
22
41
  @error = error
42
+ @label = label
23
43
  end
24
44
  end
25
45
  end
@@ -1,8 +1,15 @@
1
- require "objspace"
1
+ # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
4
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
5
5
  class Producer
6
+ include Helpers::Time
7
+
8
+ # Cache partitions count for 30 seconds
9
+ PARTITIONS_COUNT_TTL = 30
10
+
11
+ private_constant :PARTITIONS_COUNT_TTL
12
+
6
13
  # @private
7
14
  # Returns the current delivery callback, by default this is nil.
8
15
  #
@@ -16,12 +23,41 @@ module Rdkafka
16
23
  attr_reader :delivery_callback_arity
17
24
 
18
25
  # @private
19
- def initialize(client, partitioner_name)
20
- @client = client
26
+ # @param native_kafka [NativeKafka]
27
+ # @param partitioner_name [String, nil] name of the partitioner we want to use or nil to use
28
+ # the "consistent_random" default
29
+ def initialize(native_kafka, partitioner_name)
30
+ @native_kafka = native_kafka
21
31
  @partitioner_name = partitioner_name || "consistent_random"
22
32
 
23
- # Makes sure, that the producer gets closed before it gets GCed by Ruby
24
- ObjectSpace.define_finalizer(self, client.finalizer)
33
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
34
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
35
+
36
+ @_partitions_count_cache = Hash.new do |cache, topic|
37
+ topic_metadata = nil
38
+
39
+ @native_kafka.with_inner do |inner|
40
+ topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
41
+ end
42
+
43
+ partition_count = topic_metadata ? topic_metadata[:partition_count] : -1
44
+
45
+ # This approach caches the failure to fetch only for 1 second. This will make sure, that
46
+ # we do not cache the failure for too long but also "buys" us a bit of time in case there
47
+ # would be issues in the cluster so we won't overaload it with consecutive requests
48
+ cache[topic] = if partition_count.positive?
49
+ [monotonic_now, partition_count]
50
+ else
51
+ [monotonic_now - PARTITIONS_COUNT_TTL + 5, partition_count]
52
+ end
53
+ end
54
+ end
55
+
56
+ # @return [String] producer name
57
+ def name
58
+ @name ||= @native_kafka.with_inner do |inner|
59
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
60
+ end
25
61
  end
26
62
 
27
63
  # Set a callback that will be called every time a message is successfully produced.
@@ -38,21 +74,92 @@ module Rdkafka
38
74
 
39
75
  # Close this producer and wait for the internal poll queue to empty.
40
76
  def close
77
+ return if closed?
41
78
  ObjectSpace.undefine_finalizer(self)
79
+ @native_kafka.close
80
+ end
81
+
82
+ # Whether this producer has closed
83
+ def closed?
84
+ @native_kafka.closed?
85
+ end
86
+
87
+ # Wait until all outstanding producer requests are completed, with the given timeout
88
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
89
+ #
90
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
91
+ # @return [Boolean] true if no more data and all was flushed, false in case there are still
92
+ # outgoing messages after the timeout
93
+ #
94
+ # @note We raise an exception for other errors because based on the librdkafka docs, there
95
+ # should be no other errors.
96
+ #
97
+ # @note For `timed_out` we do not raise an error to keep it backwards compatible
98
+ def flush(timeout_ms=5_000)
99
+ closed_producer_check(__method__)
100
+
101
+ code = nil
102
+
103
+ @native_kafka.with_inner do |inner|
104
+ code = Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
105
+ end
106
+
107
+ # Early skip not to build the error message
108
+ return true if code.zero?
109
+
110
+ error = Rdkafka::RdkafkaError.new(code)
42
111
 
43
- @client.close
112
+ return false if error.code == :timed_out
113
+
114
+ raise(error)
115
+ end
116
+
117
+ # Purges the outgoing queue and releases all resources.
118
+ #
119
+ # Useful when closing the producer with outgoing messages to unstable clusters or when for
120
+ # any other reasons waiting cannot go on anymore. This purges both the queue and all the
121
+ # inflight requests + updates the delivery handles statuses so they can be materialized into
122
+ # `purge_queue` errors.
123
+ def purge
124
+ closed_producer_check(__method__)
125
+
126
+ code = nil
127
+
128
+ @native_kafka.with_inner do |inner|
129
+ code = Bindings.rd_kafka_purge(
130
+ inner,
131
+ Bindings::RD_KAFKA_PURGE_F_QUEUE | Bindings::RD_KAFKA_PURGE_F_INFLIGHT
132
+ )
133
+ end
134
+
135
+ code.zero? || raise(Rdkafka::RdkafkaError.new(code))
136
+
137
+ # Wait for the purge to affect everything
138
+ sleep(0.001) until flush(100)
139
+
140
+ true
44
141
  end
45
142
 
46
143
  # Partition count for a given topic.
47
- # NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
48
144
  #
49
145
  # @param topic [String] The topic name.
146
+ # @return [Integer] partition count for a given topic or `-1` if it could not be obtained.
50
147
  #
51
- # @return partition count [Integer,nil]
148
+ # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
149
+ # auto-created after returning nil.
52
150
  #
151
+ # @note We cache the partition count for a given topic for given time.
152
+ # This prevents us in case someone uses `partition_key` from querying for the count with
153
+ # each message. Instead we query once every 30 seconds at most if we have a valid partition
154
+ # count or every 5 seconds in case we were not able to obtain number of partitions
53
155
  def partition_count(topic)
54
156
  closed_producer_check(__method__)
55
- Rdkafka::Metadata.new(@client.native, topic).topics&.first[:partition_count]
157
+
158
+ @_partitions_count_cache.delete_if do |_, cached|
159
+ monotonic_now - cached.first > PARTITIONS_COUNT_TTL
160
+ end
161
+
162
+ @_partitions_count_cache[topic].last
56
163
  end
57
164
 
58
165
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -67,11 +174,12 @@ module Rdkafka
67
174
  # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
68
175
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
69
176
  # @param headers [Hash<String,String>] Optional message headers
70
- #
71
- # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
177
+ # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
72
178
  #
73
179
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
74
- def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
180
+ #
181
+ # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
182
+ def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil, label: nil)
75
183
  closed_producer_check(__method__)
76
184
 
77
185
  # Start by checking and converting the input
@@ -93,7 +201,7 @@ module Rdkafka
93
201
  if partition_key
94
202
  partition_count = partition_count(topic)
95
203
  # If the topic is not present, set to -1
96
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count
204
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count.positive?
97
205
  end
98
206
 
99
207
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -113,6 +221,7 @@ module Rdkafka
113
221
  end
114
222
 
115
223
  delivery_handle = DeliveryHandle.new
224
+ delivery_handle.label = label
116
225
  delivery_handle[:pending] = true
117
226
  delivery_handle[:response] = -1
118
227
  delivery_handle[:partition] = -1
@@ -143,10 +252,12 @@ module Rdkafka
143
252
  args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
144
253
 
145
254
  # Produce the message
146
- response = Rdkafka::Bindings.rd_kafka_producev(
147
- @client.native,
148
- *args
149
- )
255
+ response = @native_kafka.with_inner do |inner|
256
+ Rdkafka::Bindings.rd_kafka_producev(
257
+ inner,
258
+ *args
259
+ )
260
+ end
150
261
 
151
262
  # Raise error if the produce call was not successful
152
263
  if response != 0
@@ -157,22 +268,41 @@ module Rdkafka
157
268
  delivery_handle
158
269
  end
159
270
 
160
- # @private
271
+ # Calls (if registered) the delivery callback
272
+ #
273
+ # @param delivery_report [Producer::DeliveryReport]
274
+ # @param delivery_handle [Producer::DeliveryHandle]
161
275
  def call_delivery_callback(delivery_report, delivery_handle)
162
276
  return unless @delivery_callback
163
277
 
164
- args = [delivery_report, delivery_handle].take(@delivery_callback_arity)
165
- @delivery_callback.call(*args)
278
+ case @delivery_callback_arity
279
+ when 0
280
+ @delivery_callback.call
281
+ when 1
282
+ @delivery_callback.call(delivery_report)
283
+ else
284
+ @delivery_callback.call(delivery_report, delivery_handle)
285
+ end
166
286
  end
167
287
 
288
+ # Figures out the arity of a given block/method
289
+ #
290
+ # @param callback [#call, Proc]
291
+ # @return [Integer] arity of the provided block/method
168
292
  def arity(callback)
169
293
  return callback.arity if callback.respond_to?(:arity)
170
294
 
171
295
  callback.method(:call).arity
172
296
  end
173
297
 
298
+ private
299
+
300
+ # Ensures, no operations can happen on a closed producer
301
+ #
302
+ # @param method [Symbol] name of the method that invoked producer
303
+ # @raise [Rdkafka::ClosedProducerError]
174
304
  def closed_producer_check(method)
175
- raise Rdkafka::ClosedProducerError.new(method) if @client.closed?
305
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
176
306
  end
177
307
  end
178
308
  end
@@ -1,5 +1,7 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
- VERSION = "0.12.0"
3
- LIBRDKAFKA_VERSION = "1.9.0"
4
- LIBRDKAFKA_SOURCE_SHA256 = "59b6088b69ca6cf278c3f9de5cd6b7f3fd604212cd1c59870bc531c54147e889"
4
+ VERSION = "0.15.1"
5
+ LIBRDKAFKA_VERSION = "2.3.0"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
5
7
  end
data/lib/rdkafka.rb CHANGED
@@ -1,11 +1,29 @@
1
- require "rdkafka/version"
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+ require "objspace"
5
+ require "ffi"
6
+ require "json"
2
7
 
8
+ require "rdkafka/version"
9
+ require "rdkafka/helpers/time"
3
10
  require "rdkafka/abstract_handle"
4
11
  require "rdkafka/admin"
5
12
  require "rdkafka/admin/create_topic_handle"
6
13
  require "rdkafka/admin/create_topic_report"
14
+ require "rdkafka/admin/delete_groups_handle"
15
+ require "rdkafka/admin/delete_groups_report"
7
16
  require "rdkafka/admin/delete_topic_handle"
8
17
  require "rdkafka/admin/delete_topic_report"
18
+ require "rdkafka/admin/create_partitions_handle"
19
+ require "rdkafka/admin/create_partitions_report"
20
+ require "rdkafka/admin/create_acl_handle"
21
+ require "rdkafka/admin/create_acl_report"
22
+ require "rdkafka/admin/delete_acl_handle"
23
+ require "rdkafka/admin/delete_acl_report"
24
+ require "rdkafka/admin/describe_acl_handle"
25
+ require "rdkafka/admin/describe_acl_report"
26
+ require "rdkafka/admin/acl_binding_result"
9
27
  require "rdkafka/bindings"
10
28
  require "rdkafka/callbacks"
11
29
  require "rdkafka/config"
@@ -16,7 +34,11 @@ require "rdkafka/consumer/partition"
16
34
  require "rdkafka/consumer/topic_partition_list"
17
35
  require "rdkafka/error"
18
36
  require "rdkafka/metadata"
37
+ require "rdkafka/native_kafka"
19
38
  require "rdkafka/producer"
20
- require "rdkafka/producer/client"
21
39
  require "rdkafka/producer/delivery_handle"
22
40
  require "rdkafka/producer/delivery_report"
41
+
42
+ # Main Rdkafka namespace of this gem
43
+ module Rdkafka
44
+ end
data/rdkafka.gemspec CHANGED
@@ -1,12 +1,13 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('lib/rdkafka/version', __dir__)
2
4
 
3
5
  Gem::Specification.new do |gem|
4
- gem.authors = ['Thijs Cadier']
5
- gem.email = ["thijs@appsignal.com"]
6
+ gem.authors = ['Thijs Cadier', 'Maciej Mensfeld']
7
+ gem.email = ["contact@karafka.io"]
6
8
  gem.description = "Modern Kafka client library for Ruby based on librdkafka"
7
- gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.4+."
9
+ gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.7+."
8
10
  gem.license = 'MIT'
9
- gem.homepage = 'https://github.com/thijsc/rdkafka-ruby'
10
11
 
11
12
  gem.files = `git ls-files`.split($\)
12
13
  gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
@@ -14,8 +15,13 @@ Gem::Specification.new do |gem|
14
15
  gem.name = 'rdkafka'
15
16
  gem.require_paths = ['lib']
16
17
  gem.version = Rdkafka::VERSION
17
- gem.required_ruby_version = '>= 2.6'
18
+ gem.required_ruby_version = '>= 2.7'
18
19
  gem.extensions = %w(ext/Rakefile)
20
+ gem.cert_chain = %w[certs/cert_chain.pem]
21
+
22
+ if $PROGRAM_NAME.end_with?('gem')
23
+ gem.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
24
+ end
19
25
 
20
26
  gem.add_dependency 'ffi', '~> 1.15'
21
27
  gem.add_dependency 'mini_portile2', '~> 2.6'
@@ -27,4 +33,14 @@ Gem::Specification.new do |gem|
27
33
  gem.add_development_dependency 'simplecov'
28
34
  gem.add_development_dependency 'guard'
29
35
  gem.add_development_dependency 'guard-rspec'
36
+
37
+ gem.metadata = {
38
+ 'funding_uri' => 'https://karafka.io/#become-pro',
39
+ 'homepage_uri' => 'https://karafka.io',
40
+ 'changelog_uri' => 'https://github.com/karafka/rdkafka-ruby/blob/main/CHANGELOG.md',
41
+ 'bug_tracker_uri' => 'https://github.com/karafka/rdkafka-ruby/issues',
42
+ 'source_code_uri' => 'https://github.com/karafka/rdkafka-ruby',
43
+ 'documentation_uri' => 'https://github.com/karafka/rdkafka-ruby/blob/main/README.md',
44
+ 'rubygems_mfa_required' => 'true'
45
+ }
30
46
  end
data/renovate.json ADDED
@@ -0,0 +1,6 @@
1
+ {
2
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3
+ "extends": [
4
+ "config:base"
5
+ ]
6
+ }
@@ -1,4 +1,4 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
2
 
3
3
  describe Rdkafka::AbstractHandle do
4
4
  let(:response) { 0 }
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Admin::CreateAclHandle do
6
+ # If create acl was successful there is no error object
7
+ # the error code is set to RD_KAFKA_RESP_ERR_NO_ERRORa
8
+ # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L169
9
+ let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR }
10
+
11
+ subject do
12
+ Rdkafka::Admin::CreateAclHandle.new.tap do |handle|
13
+ handle[:pending] = pending_handle
14
+ handle[:response] = response
15
+ # If create acl was successful there is no error object and the error_string is set to ""
16
+ # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L178
17
+ handle[:response_string] = FFI::MemoryPointer.from_string("")
18
+ end
19
+ end
20
+
21
+ describe "#wait" do
22
+ let(:pending_handle) { true }
23
+
24
+ it "should wait until the timeout and then raise an error" do
25
+ expect {
26
+ subject.wait(max_wait_timeout: 0.1)
27
+ }.to raise_error Rdkafka::Admin::CreateAclHandle::WaitTimeoutError, /create acl/
28
+ end
29
+
30
+ context "when not pending anymore and no error" do
31
+ let(:pending_handle) { false }
32
+
33
+ it "should return a create acl report" do
34
+ report = subject.wait
35
+
36
+ expect(report.rdkafka_response_string).to eq("")
37
+ end
38
+
39
+ it "should wait without a timeout" do
40
+ report = subject.wait(max_wait_timeout: nil)
41
+
42
+ expect(report.rdkafka_response_string).to eq("")
43
+ end
44
+ end
45
+ end
46
+
47
+ describe "#raise_error" do
48
+ let(:pending_handle) { false }
49
+
50
+ it "should raise the appropriate error" do
51
+ expect {
52
+ subject.raise_error
53
+ }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/)
54
+ end
55
+ end
56
+ end