rdkafka 0.12.0 → 0.15.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +2 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +58 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +141 -93
  10. data/Gemfile +2 -0
  11. data/{LICENSE → MIT-LICENSE} +2 -1
  12. data/README.md +64 -29
  13. data/Rakefile +2 -0
  14. data/certs/cert_chain.pem +26 -0
  15. data/docker-compose.yml +18 -15
  16. data/ext/README.md +1 -1
  17. data/ext/Rakefile +3 -1
  18. data/lib/rdkafka/abstract_handle.rb +41 -25
  19. data/lib/rdkafka/admin/acl_binding_result.rb +37 -0
  20. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  21. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  22. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  23. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  24. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  25. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  26. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  29. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  30. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  31. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  32. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  33. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  34. data/lib/rdkafka/admin.rb +494 -35
  35. data/lib/rdkafka/bindings.rb +175 -40
  36. data/lib/rdkafka/callbacks.rb +194 -1
  37. data/lib/rdkafka/config.rb +62 -25
  38. data/lib/rdkafka/consumer/headers.rb +24 -9
  39. data/lib/rdkafka/consumer/message.rb +3 -1
  40. data/lib/rdkafka/consumer/partition.rb +2 -0
  41. data/lib/rdkafka/consumer/topic_partition_list.rb +13 -8
  42. data/lib/rdkafka/consumer.rb +219 -102
  43. data/lib/rdkafka/error.rb +15 -0
  44. data/lib/rdkafka/helpers/time.rb +14 -0
  45. data/lib/rdkafka/metadata.rb +25 -2
  46. data/lib/rdkafka/native_kafka.rb +120 -0
  47. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  48. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  49. data/lib/rdkafka/producer.rb +117 -17
  50. data/lib/rdkafka/version.rb +5 -3
  51. data/lib/rdkafka.rb +24 -2
  52. data/rdkafka.gemspec +19 -3
  53. data/renovate.json +6 -0
  54. data/spec/rdkafka/abstract_handle_spec.rb +1 -1
  55. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  56. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  57. data/spec/rdkafka/admin/create_topic_handle_spec.rb +1 -1
  58. data/spec/rdkafka/admin/create_topic_report_spec.rb +1 -1
  59. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  60. data/spec/rdkafka/admin/delete_acl_report_spec.rb +71 -0
  61. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +1 -1
  62. data/spec/rdkafka/admin/delete_topic_report_spec.rb +1 -1
  63. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  64. data/spec/rdkafka/admin/describe_acl_report_spec.rb +72 -0
  65. data/spec/rdkafka/admin_spec.rb +209 -5
  66. data/spec/rdkafka/bindings_spec.rb +2 -1
  67. data/spec/rdkafka/callbacks_spec.rb +1 -1
  68. data/spec/rdkafka/config_spec.rb +24 -3
  69. data/spec/rdkafka/consumer/headers_spec.rb +60 -0
  70. data/spec/rdkafka/consumer/message_spec.rb +1 -1
  71. data/spec/rdkafka/consumer/partition_spec.rb +1 -1
  72. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +20 -1
  73. data/spec/rdkafka/consumer_spec.rb +332 -61
  74. data/spec/rdkafka/error_spec.rb +1 -1
  75. data/spec/rdkafka/metadata_spec.rb +4 -3
  76. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -35
  77. data/spec/rdkafka/producer/delivery_handle_spec.rb +4 -1
  78. data/spec/rdkafka/producer/delivery_report_spec.rb +7 -3
  79. data/spec/rdkafka/producer_spec.rb +208 -20
  80. data/spec/spec_helper.rb +20 -2
  81. data.tar.gz.sig +3 -0
  82. metadata +79 -16
  83. metadata.gz.sig +3 -0
  84. data/.semaphore/semaphore.yml +0 -23
  85. data/bin/console +0 -11
  86. data/lib/rdkafka/producer/client.rb +0 -47
@@ -0,0 +1,120 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner, run_polling_thread:, opaque:)
8
+ @inner = inner
9
+ @opaque = opaque
10
+ # Lock around external access
11
+ @access_mutex = Mutex.new
12
+ # Lock around internal polling
13
+ @poll_mutex = Mutex.new
14
+ # Lock around decrementing the operations in progress counter
15
+ # We have two mutexes - one for increment (`@access_mutex`) and one for decrement mutex
16
+ # because they serve different purposes:
17
+ #
18
+ # - `@access_mutex` allows us to lock the execution and make sure that any operation within
19
+ # the `#synchronize` is the only one running and that there are no other running
20
+ # operations.
21
+ # - `@decrement_mutex` ensures, that our decrement operation is thread-safe for any Ruby
22
+ # implementation.
23
+ #
24
+ # We do not use the same mutex, because it could create a deadlock when an already
25
+ # incremented operation cannot decrement because `@access_lock` is now owned by a different
26
+ # thread in a synchronized mode and the synchronized mode is waiting on the decrement.
27
+ @decrement_mutex = Mutex.new
28
+ # counter for operations in progress using inner
29
+ @operations_in_progress = 0
30
+
31
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
32
+ Rdkafka::Bindings.rd_kafka_poll(inner, 0)
33
+
34
+ if run_polling_thread
35
+ # Start thread to poll client for delivery callbacks,
36
+ # not used in consumer.
37
+ @polling_thread = Thread.new do
38
+ loop do
39
+ @poll_mutex.synchronize do
40
+ Rdkafka::Bindings.rd_kafka_poll(inner, 100)
41
+ end
42
+
43
+ # Exit thread if closing and the poll queue is empty
44
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
45
+ break
46
+ end
47
+ end
48
+ end
49
+
50
+ @polling_thread.abort_on_exception = true
51
+ @polling_thread[:closing] = false
52
+ end
53
+
54
+ @closing = false
55
+ end
56
+
57
+ def with_inner
58
+ if @access_mutex.owned?
59
+ @operations_in_progress += 1
60
+ else
61
+ @access_mutex.synchronize { @operations_in_progress += 1 }
62
+ end
63
+
64
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
65
+ ensure
66
+ @decrement_mutex.synchronize { @operations_in_progress -= 1 }
67
+ end
68
+
69
+ def synchronize(&block)
70
+ @access_mutex.synchronize do
71
+ # Wait for any commands using the inner to finish
72
+ # This can take a while on blocking operations like polling but is essential not to proceed
73
+ # with certain types of operations like resources destruction as it can cause the process
74
+ # to hang or crash
75
+ sleep(0.01) until @operations_in_progress.zero?
76
+
77
+ with_inner(&block)
78
+ end
79
+ end
80
+
81
+ def finalizer
82
+ ->(_) { close }
83
+ end
84
+
85
+ def closed?
86
+ @closing || @inner.nil?
87
+ end
88
+
89
+ def close(object_id=nil)
90
+ return if closed?
91
+
92
+ synchronize do
93
+ # Indicate to the outside world that we are closing
94
+ @closing = true
95
+
96
+ if @polling_thread
97
+ # Indicate to polling thread that we're closing
98
+ @polling_thread[:closing] = true
99
+
100
+ # Wait for the polling thread to finish up,
101
+ # this can be aborted in practice if this
102
+ # code runs from a finalizer.
103
+ @polling_thread.join
104
+ end
105
+
106
+ # Destroy the client after locking both mutexes
107
+ @poll_mutex.lock
108
+
109
+ # This check prevents a race condition, where we would enter the close in two threads
110
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
111
+ # and would continue to run, trying to destroy inner twice
112
+ return unless @inner
113
+
114
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
115
+ @inner = nil
116
+ @opaque = nil
117
+ end
118
+ end
119
+ end
120
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Handle to wait for a delivery report which is returned when
@@ -6,7 +8,8 @@ module Rdkafka
6
8
  layout :pending, :bool,
7
9
  :response, :int,
8
10
  :partition, :int,
9
- :offset, :int64
11
+ :offset, :int64,
12
+ :topic_name, :pointer
10
13
 
11
14
  # @return [String] the name of the operation (e.g. "delivery")
12
15
  def operation_name
@@ -15,7 +18,7 @@ module Rdkafka
15
18
 
16
19
  # @return [DeliveryReport] a report on the delivery of the message
17
20
  def create_result
18
- DeliveryReport.new(self[:partition], self[:offset])
21
+ DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
19
22
  end
20
23
  end
21
24
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Delivery report for a successfully produced message.
@@ -10,15 +12,20 @@ module Rdkafka
10
12
  # @return [Integer]
11
13
  attr_reader :offset
12
14
 
13
- # Error in case happen during produce.
15
+ # The name of the topic this message was produced to.
14
16
  # @return [String]
17
+ attr_reader :topic_name
18
+
19
+ # Error in case happen during produce.
20
+ # @return [Integer]
15
21
  attr_reader :error
16
22
 
17
23
  private
18
24
 
19
- def initialize(partition, offset, error = nil)
25
+ def initialize(partition, offset, topic_name = nil, error = nil)
20
26
  @partition = partition
21
27
  @offset = offset
28
+ @topic_name = topic_name
22
29
  @error = error
23
30
  end
24
31
  end
@@ -1,8 +1,15 @@
1
- require "objspace"
1
+ # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
4
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
5
5
  class Producer
6
+ include Helpers::Time
7
+
8
+ # Cache partitions count for 30 seconds
9
+ PARTITIONS_COUNT_TTL = 30
10
+
11
+ private_constant :PARTITIONS_COUNT_TTL
12
+
6
13
  # @private
7
14
  # Returns the current delivery callback, by default this is nil.
8
15
  #
@@ -16,12 +23,32 @@ module Rdkafka
16
23
  attr_reader :delivery_callback_arity
17
24
 
18
25
  # @private
19
- def initialize(client, partitioner_name)
20
- @client = client
26
+ def initialize(native_kafka, partitioner_name)
27
+ @native_kafka = native_kafka
21
28
  @partitioner_name = partitioner_name || "consistent_random"
22
29
 
23
- # Makes sure, that the producer gets closed before it gets GCed by Ruby
24
- ObjectSpace.define_finalizer(self, client.finalizer)
30
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
31
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
32
+
33
+ @_partitions_count_cache = Hash.new do |cache, topic|
34
+ topic_metadata = nil
35
+
36
+ @native_kafka.with_inner do |inner|
37
+ topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
38
+ end
39
+
40
+ cache[topic] = [
41
+ monotonic_now,
42
+ topic_metadata ? topic_metadata[:partition_count] : nil
43
+ ]
44
+ end
45
+ end
46
+
47
+ # @return [String] producer name
48
+ def name
49
+ @name ||= @native_kafka.with_inner do |inner|
50
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
51
+ end
25
52
  end
26
53
 
27
54
  # Set a callback that will be called every time a message is successfully produced.
@@ -38,21 +65,91 @@ module Rdkafka
38
65
 
39
66
  # Close this producer and wait for the internal poll queue to empty.
40
67
  def close
68
+ return if closed?
41
69
  ObjectSpace.undefine_finalizer(self)
70
+ @native_kafka.close
71
+ end
42
72
 
43
- @client.close
73
+ # Whether this producer has closed
74
+ def closed?
75
+ @native_kafka.closed?
76
+ end
77
+
78
+ # Wait until all outstanding producer requests are completed, with the given timeout
79
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
80
+ #
81
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
82
+ # @return [Boolean] true if no more data and all was flushed, false in case there are still
83
+ # outgoing messages after the timeout
84
+ #
85
+ # @note We raise an exception for other errors because based on the librdkafka docs, there
86
+ # should be no other errors.
87
+ #
88
+ # @note For `timed_out` we do not raise an error to keep it backwards compatible
89
+ def flush(timeout_ms=5_000)
90
+ closed_producer_check(__method__)
91
+
92
+ code = nil
93
+
94
+ @native_kafka.with_inner do |inner|
95
+ code = Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
96
+ end
97
+
98
+ # Early skip not to build the error message
99
+ return true if code.zero?
100
+
101
+ error = Rdkafka::RdkafkaError.new(code)
102
+
103
+ return false if error.code == :timed_out
104
+
105
+ raise(error)
106
+ end
107
+
108
+ # Purges the outgoing queue and releases all resources.
109
+ #
110
+ # Useful when closing the producer with outgoing messages to unstable clusters or when for
111
+ # any other reasons waiting cannot go on anymore. This purges both the queue and all the
112
+ # inflight requests + updates the delivery handles statuses so they can be materialized into
113
+ # `purge_queue` errors.
114
+ def purge
115
+ closed_producer_check(__method__)
116
+
117
+ code = nil
118
+
119
+ @native_kafka.with_inner do |inner|
120
+ code = Bindings.rd_kafka_purge(
121
+ inner,
122
+ Bindings::RD_KAFKA_PURGE_F_QUEUE | Bindings::RD_KAFKA_PURGE_F_INFLIGHT
123
+ )
124
+ end
125
+
126
+ code.zero? || raise(Rdkafka::RdkafkaError.new(code))
127
+
128
+ # Wait for the purge to affect everything
129
+ sleep(0.001) until flush(100)
130
+
131
+ true
44
132
  end
45
133
 
46
134
  # Partition count for a given topic.
47
- # NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
48
135
  #
49
136
  # @param topic [String] The topic name.
137
+ # @return [Integer] partition count for a given topic
50
138
  #
51
- # @return partition count [Integer,nil]
139
+ # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
140
+ # auto-created after returning nil.
52
141
  #
142
+ # @note We cache the partition count for a given topic for given time.
143
+ # This prevents us in case someone uses `partition_key` from querying for the count with
144
+ # each message. Instead we query once every 30 seconds at most
53
145
  def partition_count(topic)
54
146
  closed_producer_check(__method__)
55
- Rdkafka::Metadata.new(@client.native, topic).topics&.first[:partition_count]
147
+
148
+ @_partitions_count_cache.delete_if do |_, cached|
149
+ monotonic_now - cached.first > PARTITIONS_COUNT_TTL
150
+ end
151
+
152
+ @_partitions_count_cache[topic].last
56
153
  end
57
154
 
58
155
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -68,9 +165,9 @@ module Rdkafka
68
165
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
69
166
  # @param headers [Hash<String,String>] Optional message headers
70
167
  #
71
- # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
72
- #
73
168
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
169
+ #
170
+ # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
74
171
  def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
75
172
  closed_producer_check(__method__)
76
173
 
@@ -143,10 +240,12 @@ module Rdkafka
143
240
  args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
144
241
 
145
242
  # Produce the message
146
- response = Rdkafka::Bindings.rd_kafka_producev(
147
- @client.native,
148
- *args
149
- )
243
+ response = @native_kafka.with_inner do |inner|
244
+ Rdkafka::Bindings.rd_kafka_producev(
245
+ inner,
246
+ *args
247
+ )
248
+ end
150
249
 
151
250
  # Raise error if the produce call was not successful
152
251
  if response != 0
@@ -157,7 +256,6 @@ module Rdkafka
157
256
  delivery_handle
158
257
  end
159
258
 
160
- # @private
161
259
  def call_delivery_callback(delivery_report, delivery_handle)
162
260
  return unless @delivery_callback
163
261
 
@@ -171,8 +269,10 @@ module Rdkafka
171
269
  callback.method(:call).arity
172
270
  end
173
271
 
272
+ private
273
+
174
274
  def closed_producer_check(method)
175
- raise Rdkafka::ClosedProducerError.new(method) if @client.closed?
275
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
176
276
  end
177
277
  end
178
278
  end
@@ -1,5 +1,7 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
- VERSION = "0.12.0"
3
- LIBRDKAFKA_VERSION = "1.9.0"
4
- LIBRDKAFKA_SOURCE_SHA256 = "59b6088b69ca6cf278c3f9de5cd6b7f3fd604212cd1c59870bc531c54147e889"
4
+ VERSION = "0.15.0"
5
+ LIBRDKAFKA_VERSION = "2.3.0"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
5
7
  end
data/lib/rdkafka.rb CHANGED
@@ -1,11 +1,29 @@
1
- require "rdkafka/version"
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+ require "objspace"
5
+ require "ffi"
6
+ require "json"
2
7
 
8
+ require "rdkafka/version"
9
+ require "rdkafka/helpers/time"
3
10
  require "rdkafka/abstract_handle"
4
11
  require "rdkafka/admin"
5
12
  require "rdkafka/admin/create_topic_handle"
6
13
  require "rdkafka/admin/create_topic_report"
14
+ require "rdkafka/admin/delete_groups_handle"
15
+ require "rdkafka/admin/delete_groups_report"
7
16
  require "rdkafka/admin/delete_topic_handle"
8
17
  require "rdkafka/admin/delete_topic_report"
18
+ require "rdkafka/admin/create_partitions_handle"
19
+ require "rdkafka/admin/create_partitions_report"
20
+ require "rdkafka/admin/create_acl_handle"
21
+ require "rdkafka/admin/create_acl_report"
22
+ require "rdkafka/admin/delete_acl_handle"
23
+ require "rdkafka/admin/delete_acl_report"
24
+ require "rdkafka/admin/describe_acl_handle"
25
+ require "rdkafka/admin/describe_acl_report"
26
+ require "rdkafka/admin/acl_binding_result"
9
27
  require "rdkafka/bindings"
10
28
  require "rdkafka/callbacks"
11
29
  require "rdkafka/config"
@@ -16,7 +34,11 @@ require "rdkafka/consumer/partition"
16
34
  require "rdkafka/consumer/topic_partition_list"
17
35
  require "rdkafka/error"
18
36
  require "rdkafka/metadata"
37
+ require "rdkafka/native_kafka"
19
38
  require "rdkafka/producer"
20
- require "rdkafka/producer/client"
21
39
  require "rdkafka/producer/delivery_handle"
22
40
  require "rdkafka/producer/delivery_report"
41
+
42
+ # Main Rdkafka namespace of this gem
43
+ module Rdkafka
44
+ end
data/rdkafka.gemspec CHANGED
@@ -1,12 +1,13 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('lib/rdkafka/version', __dir__)
2
4
 
3
5
  Gem::Specification.new do |gem|
4
6
  gem.authors = ['Thijs Cadier']
5
- gem.email = ["thijs@appsignal.com"]
7
+ gem.email = ["contact@karafka.io"]
6
8
  gem.description = "Modern Kafka client library for Ruby based on librdkafka"
7
9
  gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.4+."
8
10
  gem.license = 'MIT'
9
- gem.homepage = 'https://github.com/thijsc/rdkafka-ruby'
10
11
 
11
12
  gem.files = `git ls-files`.split($\)
12
13
  gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
@@ -14,8 +15,13 @@ Gem::Specification.new do |gem|
14
15
  gem.name = 'rdkafka'
15
16
  gem.require_paths = ['lib']
16
17
  gem.version = Rdkafka::VERSION
17
- gem.required_ruby_version = '>= 2.6'
18
+ gem.required_ruby_version = '>= 2.7'
18
19
  gem.extensions = %w(ext/Rakefile)
20
+ gem.cert_chain = %w[certs/cert_chain.pem]
21
+
22
+ if $PROGRAM_NAME.end_with?('gem')
23
+ gem.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
24
+ end
19
25
 
20
26
  gem.add_dependency 'ffi', '~> 1.15'
21
27
  gem.add_dependency 'mini_portile2', '~> 2.6'
@@ -27,4 +33,14 @@ Gem::Specification.new do |gem|
27
33
  gem.add_development_dependency 'simplecov'
28
34
  gem.add_development_dependency 'guard'
29
35
  gem.add_development_dependency 'guard-rspec'
36
+
37
+ gem.metadata = {
38
+ 'funding_uri' => 'https://karafka.io/#become-pro',
39
+ 'homepage_uri' => 'https://karafka.io',
40
+ 'changelog_uri' => 'https://github.com/karafka/rdkafka-ruby/blob/main/CHANGELOG.md',
41
+ 'bug_tracker_uri' => 'https://github.com/karafka/rdkafka-ruby/issues',
42
+ 'source_code_uri' => 'https://github.com/karafka/rdkafka-ruby',
43
+ 'documentation_uri' => 'https://github.com/karafka/rdkafka-ruby/blob/main/README.md',
44
+ 'rubygems_mfa_required' => 'true'
45
+ }
30
46
  end
data/renovate.json ADDED
@@ -0,0 +1,6 @@
1
+ {
2
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3
+ "extends": [
4
+ "config:base"
5
+ ]
6
+ }
@@ -1,4 +1,4 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
2
 
3
3
  describe Rdkafka::AbstractHandle do
4
4
  let(:response) { 0 }
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Admin::CreateAclHandle do
6
+ # If create acl was successful there is no error object
7
+ # the error code is set to RD_KAFKA_RESP_ERR_NO_ERRORa
8
+ # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L169
9
+ let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR }
10
+
11
+ subject do
12
+ Rdkafka::Admin::CreateAclHandle.new.tap do |handle|
13
+ handle[:pending] = pending_handle
14
+ handle[:response] = response
15
+ # If create acl was successful there is no error object and the error_string is set to ""
16
+ # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L178
17
+ handle[:response_string] = FFI::MemoryPointer.from_string("")
18
+ end
19
+ end
20
+
21
+ describe "#wait" do
22
+ let(:pending_handle) { true }
23
+
24
+ it "should wait until the timeout and then raise an error" do
25
+ expect {
26
+ subject.wait(max_wait_timeout: 0.1)
27
+ }.to raise_error Rdkafka::Admin::CreateAclHandle::WaitTimeoutError, /create acl/
28
+ end
29
+
30
+ context "when not pending anymore and no error" do
31
+ let(:pending_handle) { false }
32
+
33
+ it "should return a create acl report" do
34
+ report = subject.wait
35
+
36
+ expect(report.rdkafka_response_string).to eq("")
37
+ end
38
+
39
+ it "should wait without a timeout" do
40
+ report = subject.wait(max_wait_timeout: nil)
41
+
42
+ expect(report.rdkafka_response_string).to eq("")
43
+ end
44
+ end
45
+ end
46
+
47
+ describe "#raise_error" do
48
+ let(:pending_handle) { false }
49
+
50
+ it "should raise the appropriate error" do
51
+ expect {
52
+ subject.raise_error
53
+ }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/)
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Admin::CreateAclReport do
6
+ subject { Rdkafka::Admin::CreateAclReport.new(
7
+ rdkafka_response: Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR,
8
+ rdkafka_response_string: FFI::MemoryPointer.from_string("")
9
+ )}
10
+
11
+ it "should get RD_KAFKA_RESP_ERR_NO_ERROR " do
12
+ expect(subject.rdkafka_response).to eq(0)
13
+ end
14
+
15
+ it "should get empty string" do
16
+ expect(subject.rdkafka_response_string).to eq("")
17
+ end
18
+ end
@@ -1,4 +1,4 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
2
 
3
3
  describe Rdkafka::Admin::CreateTopicHandle do
4
4
  let(:response) { 0 }
@@ -1,4 +1,4 @@
1
- require "spec_helper"
1
+ # frozen_string_literal: true
2
2
 
3
3
  describe Rdkafka::Admin::CreateTopicReport do
4
4
  subject { Rdkafka::Admin::CreateTopicReport.new(
@@ -0,0 +1,85 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Admin::DeleteAclHandle do
6
+ let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR }
7
+ let(:resource_name) {"acl-test-topic"}
8
+ let(:resource_type) {Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC}
9
+ let(:resource_pattern_type) {Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL}
10
+ let(:principal) {"User:anonymous"}
11
+ let(:host) {"*"}
12
+ let(:operation) {Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ}
13
+ let(:permission_type) {Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW}
14
+ let(:delete_acl_ptr) {FFI::Pointer::NULL}
15
+
16
+ subject do
17
+ error_buffer = FFI::MemoryPointer.from_string(" " * 256)
18
+ delete_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new(
19
+ resource_type,
20
+ FFI::MemoryPointer.from_string(resource_name),
21
+ resource_pattern_type,
22
+ FFI::MemoryPointer.from_string(principal),
23
+ FFI::MemoryPointer.from_string(host),
24
+ operation,
25
+ permission_type,
26
+ error_buffer,
27
+ 256
28
+ )
29
+ if delete_acl_ptr.null?
30
+ raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
31
+ end
32
+ pointer_array = [delete_acl_ptr]
33
+ delete_acls_array_ptr = FFI::MemoryPointer.new(:pointer)
34
+ delete_acls_array_ptr.write_array_of_pointer(pointer_array)
35
+ Rdkafka::Admin::DeleteAclHandle.new.tap do |handle|
36
+ handle[:pending] = pending_handle
37
+ handle[:response] = response
38
+ handle[:response_string] = FFI::MemoryPointer.from_string("")
39
+ handle[:matching_acls] = delete_acls_array_ptr
40
+ handle[:matching_acls_count] = 1
41
+ end
42
+ end
43
+
44
+ after do
45
+ if delete_acl_ptr != FFI::Pointer::NULL
46
+ Rdkafka::Bindings.rd_kafka_AclBinding_destroy(delete_acl_ptr)
47
+ end
48
+ end
49
+
50
+ describe "#wait" do
51
+ let(:pending_handle) { true }
52
+
53
+ it "should wait until the timeout and then raise an error" do
54
+ expect {
55
+ subject.wait(max_wait_timeout: 0.1)
56
+ }.to raise_error Rdkafka::Admin::DeleteAclHandle::WaitTimeoutError, /delete acl/
57
+ end
58
+
59
+ context "when not pending anymore and no error" do
60
+ let(:pending_handle) { false }
61
+
62
+ it "should return a delete acl report" do
63
+ report = subject.wait
64
+
65
+ expect(report.deleted_acls.length).to eq(1)
66
+ end
67
+
68
+ it "should wait without a timeout" do
69
+ report = subject.wait(max_wait_timeout: nil)
70
+
71
+ expect(report.deleted_acls[0].matching_acl_resource_name).to eq(resource_name)
72
+ end
73
+ end
74
+ end
75
+
76
+ describe "#raise_error" do
77
+ let(:pending_handle) { false }
78
+
79
+ it "should raise the appropriate error" do
80
+ expect {
81
+ subject.raise_error
82
+ }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/)
83
+ end
84
+ end
85
+ end