rdkafka 0.21.0 → 0.22.0.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/CODEOWNERS +3 -0
- data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
- data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
- data/.github/workflows/ci_macos_arm64.yml +306 -0
- data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
- data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
- data/.github/workflows/push_macos_arm64.yml +54 -0
- data/.github/workflows/push_ruby.yml +37 -0
- data/.github/workflows/verify-action-pins.yml +16 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +17 -0
- data/README.md +2 -1
- data/Rakefile +0 -2
- data/docker-compose.yml +1 -1
- data/ext/Rakefile +1 -1
- data/ext/build_common.sh +361 -0
- data/ext/build_linux_x86_64_gnu.sh +306 -0
- data/ext/build_linux_x86_64_musl.sh +763 -0
- data/ext/build_macos_arm64.sh +550 -0
- data/lib/rdkafka/bindings.rb +30 -3
- data/lib/rdkafka/config.rb +8 -4
- data/lib/rdkafka/consumer/headers.rb +14 -3
- data/lib/rdkafka/native_kafka.rb +8 -2
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +59 -35
- data/lib/rdkafka/version.rb +1 -1
- data/lib/rdkafka.rb +1 -0
- data/rdkafka.gemspec +27 -8
- data/renovate.json +87 -1
- data/spec/rdkafka/admin_spec.rb +27 -11
- data/spec/rdkafka/bindings_spec.rb +0 -9
- data/spec/rdkafka/config_spec.rb +17 -15
- data/spec/rdkafka/consumer/headers_spec.rb +26 -10
- data/spec/rdkafka/consumer_spec.rb +74 -15
- data/spec/rdkafka/metadata_spec.rb +2 -2
- data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +237 -7
- data/spec/spec_helper.rb +30 -7
- metadata +45 -87
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +0 -83
- data/Guardfile +0 -19
- data/certs/cert.pem +0 -26
- data.tar.gz.sig +0 -0
- metadata.gz.sig +0 -0
data/lib/rdkafka/native_kafka.rb
CHANGED
@@ -4,7 +4,7 @@ module Rdkafka
|
|
4
4
|
# @private
|
5
5
|
# A wrapper around a native kafka that polls and cleanly exits
|
6
6
|
class NativeKafka
|
7
|
-
def initialize(inner, run_polling_thread:, opaque:, auto_start: true)
|
7
|
+
def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: 100)
|
8
8
|
@inner = inner
|
9
9
|
@opaque = opaque
|
10
10
|
# Lock around external access
|
@@ -30,6 +30,8 @@ module Rdkafka
|
|
30
30
|
|
31
31
|
@run_polling_thread = run_polling_thread
|
32
32
|
|
33
|
+
@timeout_ms = timeout_ms
|
34
|
+
|
33
35
|
start if auto_start
|
34
36
|
|
35
37
|
@closing = false
|
@@ -50,7 +52,7 @@ module Rdkafka
|
|
50
52
|
@polling_thread = Thread.new do
|
51
53
|
loop do
|
52
54
|
@poll_mutex.synchronize do
|
53
|
-
Rdkafka::Bindings.rd_kafka_poll(@inner,
|
55
|
+
Rdkafka::Bindings.rd_kafka_poll(@inner, @timeout_ms)
|
54
56
|
end
|
55
57
|
|
56
58
|
# Exit thread if closing and the poll queue is empty
|
@@ -124,9 +126,13 @@ module Rdkafka
|
|
124
126
|
# and would continue to run, trying to destroy inner twice
|
125
127
|
return unless @inner
|
126
128
|
|
129
|
+
yield if block_given?
|
130
|
+
|
127
131
|
Rdkafka::Bindings.rd_kafka_destroy(@inner)
|
128
132
|
@inner = nil
|
129
133
|
@opaque = nil
|
134
|
+
@poll_mutex.unlock
|
135
|
+
@poll_mutex = nil
|
130
136
|
end
|
131
137
|
end
|
132
138
|
end
|
@@ -0,0 +1,216 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Rdkafka
|
4
|
+
class Producer
|
5
|
+
# Caching mechanism for Kafka topic partition counts to avoid frequent cluster queries
|
6
|
+
#
|
7
|
+
# This cache is designed to optimize the process of obtaining partition counts for topics.
|
8
|
+
# It uses several strategies to minimize Kafka cluster queries:
|
9
|
+
#
|
10
|
+
# @note Design considerations:
|
11
|
+
#
|
12
|
+
# 1. Statistics-based updates
|
13
|
+
# When statistics callbacks are enabled (via `statistics.interval.ms`), we leverage
|
14
|
+
# this data to proactively update the partition counts cache. This approach costs
|
15
|
+
# approximately 0.02ms of processing time during each statistics interval (typically
|
16
|
+
# every 5 seconds) but eliminates the need for explicit blocking metadata queries.
|
17
|
+
#
|
18
|
+
# 2. Edge case handling
|
19
|
+
# If a user configures `statistics.interval.ms` much higher than the default cache TTL
|
20
|
+
# (30 seconds), the cache will still function correctly. When statistics updates don't
|
21
|
+
# occur frequently enough, the cache entries will expire naturally, triggering a
|
22
|
+
# blocking refresh when needed.
|
23
|
+
#
|
24
|
+
# 3. User configuration awareness
|
25
|
+
# The cache respects user-defined settings. If `topic.metadata.refresh.interval.ms` is
|
26
|
+
# set very high, the responsibility for potentially stale data falls on the user. This
|
27
|
+
# is an explicit design choice to honor user configuration preferences and align with
|
28
|
+
# librdkafka settings.
|
29
|
+
#
|
30
|
+
# 4. Process-wide efficiency
|
31
|
+
# Since this cache is shared across all Rdkafka producers and consumers within a process,
|
32
|
+
# having multiple clients improves overall efficiency. Each client contributes to keeping
|
33
|
+
# the cache updated, benefiting all other clients.
|
34
|
+
#
|
35
|
+
# 5. Thread-safety approach
|
36
|
+
# The implementation uses fine-grained locking with per-topic mutexes to minimize
|
37
|
+
# contention in multi-threaded environments while ensuring data consistency.
|
38
|
+
#
|
39
|
+
# 6. Topic recreation handling
|
40
|
+
# If a topic is deleted and recreated with fewer partitions, the cache will continue to
|
41
|
+
# report the higher count until either the TTL expires or the process is restarted. This
|
42
|
+
# design choice simplifies the implementation while relying on librdkafka's error handling
|
43
|
+
# for edge cases. In production environments, topic recreation with different partition
|
44
|
+
# counts is typically accompanied by application restarts to handle structural changes.
|
45
|
+
# This also aligns with the previous cache implementation.
|
46
|
+
class PartitionsCountCache
|
47
|
+
include Helpers::Time
|
48
|
+
|
49
|
+
# Default time-to-live for cached partition counts in seconds
|
50
|
+
#
|
51
|
+
# @note This default was chosen to balance freshness of metadata with performance
|
52
|
+
# optimization. Most Kafka cluster topology changes are planned operations, making 30
|
53
|
+
# seconds a reasonable compromise.
|
54
|
+
DEFAULT_TTL = 30
|
55
|
+
|
56
|
+
# Creates a new partition count cache
|
57
|
+
#
|
58
|
+
# @param ttl [Integer] Time-to-live in seconds for cached values
|
59
|
+
def initialize(ttl = DEFAULT_TTL)
|
60
|
+
@counts = {}
|
61
|
+
@mutex_hash = {}
|
62
|
+
# Used only for @mutex_hash access to ensure thread-safety when creating new mutexes
|
63
|
+
@mutex_for_hash = Mutex.new
|
64
|
+
@ttl = ttl
|
65
|
+
end
|
66
|
+
|
67
|
+
# Reads partition count for a topic with automatic refresh when expired
|
68
|
+
#
|
69
|
+
# This method will return the cached partition count if available and not expired.
|
70
|
+
# If the value is expired or not available, it will execute the provided block
|
71
|
+
# to fetch the current value from Kafka.
|
72
|
+
#
|
73
|
+
# @param topic [String] Kafka topic name
|
74
|
+
# @yield Block that returns the current partition count when cache needs refreshing
|
75
|
+
# @yieldreturn [Integer] Current partition count retrieved from Kafka
|
76
|
+
# @return [Integer] Partition count for the topic
|
77
|
+
#
|
78
|
+
# @note The implementation prioritizes read performance over write consistency
|
79
|
+
# since partition counts typically only increase during normal operation.
|
80
|
+
def get(topic)
|
81
|
+
current_info = @counts[topic]
|
82
|
+
|
83
|
+
if current_info.nil? || expired?(current_info[0])
|
84
|
+
new_count = yield
|
85
|
+
|
86
|
+
if current_info.nil?
|
87
|
+
# No existing data, create a new entry with mutex
|
88
|
+
set(topic, new_count)
|
89
|
+
|
90
|
+
return new_count
|
91
|
+
else
|
92
|
+
current_count = current_info[1]
|
93
|
+
|
94
|
+
if new_count > current_count
|
95
|
+
# Higher value needs mutex to update both timestamp and count
|
96
|
+
set(topic, new_count)
|
97
|
+
|
98
|
+
return new_count
|
99
|
+
else
|
100
|
+
# Same or lower value, just update timestamp without mutex
|
101
|
+
refresh_timestamp(topic)
|
102
|
+
|
103
|
+
return current_count
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
current_info[1]
|
109
|
+
end
|
110
|
+
|
111
|
+
# Update partition count for a topic when needed
|
112
|
+
#
|
113
|
+
# This method updates the partition count for a topic in the cache.
|
114
|
+
# It uses a mutex to ensure thread-safety during updates.
|
115
|
+
#
|
116
|
+
# @param topic [String] Kafka topic name
|
117
|
+
# @param new_count [Integer] New partition count value
|
118
|
+
#
|
119
|
+
# @note We prioritize higher partition counts and only accept them when using
|
120
|
+
# a mutex to ensure consistency. This design decision is based on the fact that
|
121
|
+
# partition counts in Kafka only increase during normal operation.
|
122
|
+
def set(topic, new_count)
|
123
|
+
# First check outside mutex to avoid unnecessary locking
|
124
|
+
current_info = @counts[topic]
|
125
|
+
|
126
|
+
# For lower values, we don't update count but might need to refresh timestamp
|
127
|
+
if current_info && new_count < current_info[1]
|
128
|
+
refresh_timestamp(topic)
|
129
|
+
|
130
|
+
return
|
131
|
+
end
|
132
|
+
|
133
|
+
# Only lock the specific topic mutex
|
134
|
+
mutex_for(topic).synchronize do
|
135
|
+
# Check again inside the lock as another thread might have updated
|
136
|
+
current_info = @counts[topic]
|
137
|
+
|
138
|
+
if current_info.nil?
|
139
|
+
# Create new entry
|
140
|
+
@counts[topic] = [monotonic_now, new_count]
|
141
|
+
else
|
142
|
+
current_count = current_info[1]
|
143
|
+
|
144
|
+
if new_count > current_count
|
145
|
+
# Update to higher count value
|
146
|
+
current_info[0] = monotonic_now
|
147
|
+
current_info[1] = new_count
|
148
|
+
else
|
149
|
+
# Same or lower count, update timestamp only
|
150
|
+
current_info[0] = monotonic_now
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
# @return [Hash] hash with ttls and partitions counts array
|
157
|
+
def to_h
|
158
|
+
@counts
|
159
|
+
end
|
160
|
+
|
161
|
+
private
|
162
|
+
|
163
|
+
# Get or create a mutex for a specific topic
|
164
|
+
#
|
165
|
+
# This method ensures that each topic has its own mutex,
|
166
|
+
# allowing operations on different topics to proceed in parallel.
|
167
|
+
#
|
168
|
+
# @param topic [String] Kafka topic name
|
169
|
+
# @return [Mutex] Mutex for the specified topic
|
170
|
+
#
|
171
|
+
# @note We use a separate mutex (@mutex_for_hash) to protect the creation
|
172
|
+
# of new topic mutexes. This pattern allows fine-grained locking while
|
173
|
+
# maintaining thread-safety.
|
174
|
+
def mutex_for(topic)
|
175
|
+
mutex = @mutex_hash[topic]
|
176
|
+
|
177
|
+
return mutex if mutex
|
178
|
+
|
179
|
+
# Use a separate mutex to protect the creation of new topic mutexes
|
180
|
+
@mutex_for_hash.synchronize do
|
181
|
+
# Check again in case another thread created it
|
182
|
+
@mutex_hash[topic] ||= Mutex.new
|
183
|
+
end
|
184
|
+
|
185
|
+
@mutex_hash[topic]
|
186
|
+
end
|
187
|
+
|
188
|
+
# Update the timestamp without acquiring the mutex
|
189
|
+
#
|
190
|
+
# This is an optimization that allows refreshing the TTL of existing entries
|
191
|
+
# without the overhead of mutex acquisition.
|
192
|
+
#
|
193
|
+
# @param topic [String] Kafka topic name
|
194
|
+
#
|
195
|
+
# @note This method is safe for refreshing existing data regardless of count
|
196
|
+
# because it only updates the timestamp, which doesn't affect the correctness
|
197
|
+
# of concurrent operations.
|
198
|
+
def refresh_timestamp(topic)
|
199
|
+
current_info = @counts[topic]
|
200
|
+
|
201
|
+
return unless current_info
|
202
|
+
|
203
|
+
# Update the timestamp in-place
|
204
|
+
current_info[0] = monotonic_now
|
205
|
+
end
|
206
|
+
|
207
|
+
# Check if a timestamp has expired based on the TTL
|
208
|
+
#
|
209
|
+
# @param timestamp [Float] Monotonic timestamp to check
|
210
|
+
# @return [Boolean] true if expired, false otherwise
|
211
|
+
def expired?(timestamp)
|
212
|
+
monotonic_now - timestamp > @ttl
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
end
|
data/lib/rdkafka/producer.rb
CHANGED
@@ -6,13 +6,31 @@ module Rdkafka
|
|
6
6
|
include Helpers::Time
|
7
7
|
include Helpers::OAuth
|
8
8
|
|
9
|
-
# Cache partitions count for 30 seconds
|
10
|
-
PARTITIONS_COUNT_TTL = 30
|
11
|
-
|
12
9
|
# Empty hash used as a default
|
13
10
|
EMPTY_HASH = {}.freeze
|
14
11
|
|
15
|
-
|
12
|
+
# @private
|
13
|
+
@@partitions_count_cache = PartitionsCountCache.new
|
14
|
+
|
15
|
+
# Global (process wide) partitions cache. We use it to store number of topics partitions,
|
16
|
+
# either from the librdkafka statistics (if enabled) or via direct inline calls every now and
|
17
|
+
# then. Since the partitions count can only grow and should be same for all consumers and
|
18
|
+
# producers, we can use a global cache as long as we ensure that updates only move up.
|
19
|
+
#
|
20
|
+
# @note It is critical to remember, that not all users may have statistics callbacks enabled,
|
21
|
+
# hence we should not make assumption that this cache is always updated from the stats.
|
22
|
+
#
|
23
|
+
# @return [Rdkafka::Producer::PartitionsCountCache]
|
24
|
+
def self.partitions_count_cache
|
25
|
+
@@partitions_count_cache
|
26
|
+
end
|
27
|
+
|
28
|
+
# @param partitions_count_cache [Rdkafka::Producer::PartitionsCountCache]
|
29
|
+
def self.partitions_count_cache=(partitions_count_cache)
|
30
|
+
@@partitions_count_cache = partitions_count_cache
|
31
|
+
end
|
32
|
+
|
33
|
+
private_constant :EMPTY_HASH
|
16
34
|
|
17
35
|
# Raised when there was a critical issue when invoking rd_kafka_topic_new
|
18
36
|
# This is a temporary solution until https://github.com/karafka/rdkafka-ruby/issues/451 is
|
@@ -43,25 +61,6 @@ module Rdkafka
|
|
43
61
|
|
44
62
|
# Makes sure, that native kafka gets closed before it gets GCed by Ruby
|
45
63
|
ObjectSpace.define_finalizer(self, native_kafka.finalizer)
|
46
|
-
|
47
|
-
@_partitions_count_cache = Hash.new do |cache, topic|
|
48
|
-
topic_metadata = nil
|
49
|
-
|
50
|
-
@native_kafka.with_inner do |inner|
|
51
|
-
topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
|
52
|
-
end
|
53
|
-
|
54
|
-
partition_count = topic_metadata ? topic_metadata[:partition_count] : -1
|
55
|
-
|
56
|
-
# This approach caches the failure to fetch only for 1 second. This will make sure, that
|
57
|
-
# we do not cache the failure for too long but also "buys" us a bit of time in case there
|
58
|
-
# would be issues in the cluster so we won't overaload it with consecutive requests
|
59
|
-
cache[topic] = if partition_count.positive?
|
60
|
-
[monotonic_now, partition_count]
|
61
|
-
else
|
62
|
-
[monotonic_now - PARTITIONS_COUNT_TTL + 5, partition_count]
|
63
|
-
end
|
64
|
-
end
|
65
64
|
end
|
66
65
|
|
67
66
|
# Sets alternative set of configuration details that can be set per topic
|
@@ -222,18 +221,31 @@ module Rdkafka
|
|
222
221
|
# @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
|
223
222
|
# auto-created after returning nil.
|
224
223
|
#
|
225
|
-
# @note We cache the partition count for a given topic for given time.
|
224
|
+
# @note We cache the partition count for a given topic for given time. If statistics are
|
225
|
+
# enabled for any producer or consumer, it will take precedence over per instance fetching.
|
226
|
+
#
|
226
227
|
# This prevents us in case someone uses `partition_key` from querying for the count with
|
227
|
-
# each message. Instead we query once every 30 seconds at most if we have a valid
|
228
|
-
# count or every 5 seconds in case we were not able to obtain number of partitions
|
228
|
+
# each message. Instead we query at most once every 30 seconds at most if we have a valid
|
229
|
+
# partition count or every 5 seconds in case we were not able to obtain number of partitions.
|
229
230
|
def partition_count(topic)
|
230
231
|
closed_producer_check(__method__)
|
231
232
|
|
232
|
-
|
233
|
-
|
233
|
+
self.class.partitions_count_cache.get(topic) do
|
234
|
+
topic_metadata = nil
|
235
|
+
|
236
|
+
@native_kafka.with_inner do |inner|
|
237
|
+
topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
|
238
|
+
end
|
239
|
+
|
240
|
+
topic_metadata ? topic_metadata[:partition_count] : -1
|
234
241
|
end
|
242
|
+
rescue Rdkafka::RdkafkaError => e
|
243
|
+
# If the topic does not exist, it will be created or if not allowed another error will be
|
244
|
+
# raised. We here return -1 so this can happen without early error happening on metadata
|
245
|
+
# discovery.
|
246
|
+
return -1 if e.code == :unknown_topic_or_part
|
235
247
|
|
236
|
-
|
248
|
+
raise(e)
|
237
249
|
end
|
238
250
|
|
239
251
|
# Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
|
@@ -247,7 +259,7 @@ module Rdkafka
|
|
247
259
|
# @param partition [Integer,nil] Optional partition to produce to
|
248
260
|
# @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
|
249
261
|
# @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
|
250
|
-
# @param headers [Hash<String,String
|
262
|
+
# @param headers [Hash<String,String|Array<String>>] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
|
251
263
|
# @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
|
252
264
|
# @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
|
253
265
|
#
|
@@ -339,11 +351,23 @@ module Rdkafka
|
|
339
351
|
if headers
|
340
352
|
headers.each do |key0, value0|
|
341
353
|
key = key0.to_s
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
354
|
+
if value0.is_a?(Array)
|
355
|
+
# Handle array of values per KIP-82
|
356
|
+
value0.each do |value|
|
357
|
+
value = value.to_s
|
358
|
+
args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
|
359
|
+
args << :string << key
|
360
|
+
args << :pointer << value
|
361
|
+
args << :size_t << value.bytesize
|
362
|
+
end
|
363
|
+
else
|
364
|
+
# Handle single value
|
365
|
+
value = value0.to_s
|
366
|
+
args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
|
367
|
+
args << :string << key
|
368
|
+
args << :pointer << value
|
369
|
+
args << :size_t << value.bytesize
|
370
|
+
end
|
347
371
|
end
|
348
372
|
end
|
349
373
|
|
data/lib/rdkafka/version.rb
CHANGED
data/lib/rdkafka.rb
CHANGED
@@ -42,6 +42,7 @@ require "rdkafka/consumer/topic_partition_list"
|
|
42
42
|
require "rdkafka/error"
|
43
43
|
require "rdkafka/metadata"
|
44
44
|
require "rdkafka/native_kafka"
|
45
|
+
require "rdkafka/producer/partitions_count_cache"
|
45
46
|
require "rdkafka/producer"
|
46
47
|
require "rdkafka/producer/delivery_handle"
|
47
48
|
require "rdkafka/producer/delivery_report"
|
data/rdkafka.gemspec
CHANGED
@@ -9,7 +9,6 @@ Gem::Specification.new do |gem|
|
|
9
9
|
gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.7+."
|
10
10
|
gem.license = 'MIT'
|
11
11
|
|
12
|
-
gem.files = `git ls-files`.split($\)
|
13
12
|
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
|
14
13
|
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
|
15
14
|
gem.name = 'rdkafka'
|
@@ -17,30 +16,50 @@ Gem::Specification.new do |gem|
|
|
17
16
|
gem.version = Rdkafka::VERSION
|
18
17
|
gem.required_ruby_version = '>= 3.1'
|
19
18
|
gem.extensions = %w(ext/Rakefile)
|
20
|
-
gem.cert_chain = %w[certs/cert.pem]
|
21
19
|
|
22
|
-
if
|
23
|
-
gem.
|
20
|
+
if ENV['RUBY_PLATFORM']
|
21
|
+
gem.platform = ENV['RUBY_PLATFORM']
|
22
|
+
gem.files = `git ls-files`.split($\)
|
23
|
+
|
24
|
+
# Do not include the source code for librdkafka as it should be precompiled already per
|
25
|
+
# platform. Same applies to any possible patches.
|
26
|
+
gem.files = gem.files.reject do |file|
|
27
|
+
file.match?(%r{^dist/librdkafka-.*\.tar\.gz$}) ||
|
28
|
+
file.match?(%r{^dist/patches/})
|
29
|
+
end
|
30
|
+
|
31
|
+
# Add the compiled extensions that exist (not in git)
|
32
|
+
if File.exist?('ext/librdkafka.so')
|
33
|
+
gem.files << 'ext/librdkafka.so'
|
34
|
+
end
|
35
|
+
|
36
|
+
if File.exist?('ext/librdkafka.dylib')
|
37
|
+
gem.files << 'ext/librdkafka.dylib'
|
38
|
+
end
|
39
|
+
else
|
40
|
+
gem.platform = Gem::Platform::RUBY
|
41
|
+
gem.files = `git ls-files`.split($\)
|
42
|
+
gem.extensions = %w(ext/Rakefile)
|
24
43
|
end
|
25
44
|
|
26
45
|
gem.add_dependency 'ffi', '~> 1.15'
|
46
|
+
gem.add_dependency 'logger'
|
27
47
|
gem.add_dependency 'mini_portile2', '~> 2.6'
|
28
48
|
gem.add_dependency 'rake', '> 12'
|
29
49
|
|
50
|
+
gem.add_development_dependency 'ostruct'
|
30
51
|
gem.add_development_dependency 'pry'
|
31
52
|
gem.add_development_dependency 'rspec', '~> 3.5'
|
32
53
|
gem.add_development_dependency 'rake'
|
33
54
|
gem.add_development_dependency 'simplecov'
|
34
|
-
gem.add_development_dependency 'guard'
|
35
|
-
gem.add_development_dependency 'guard-rspec'
|
36
55
|
|
37
56
|
gem.metadata = {
|
38
57
|
'funding_uri' => 'https://karafka.io/#become-pro',
|
39
58
|
'homepage_uri' => 'https://karafka.io',
|
40
|
-
'changelog_uri' => 'https://
|
59
|
+
'changelog_uri' => 'https://karafka.io/docs/Changelog-Rdkafka',
|
41
60
|
'bug_tracker_uri' => 'https://github.com/karafka/rdkafka-ruby/issues',
|
42
61
|
'source_code_uri' => 'https://github.com/karafka/rdkafka-ruby',
|
43
|
-
'documentation_uri' => 'https://
|
62
|
+
'documentation_uri' => 'https://karafka.io/docs',
|
44
63
|
'rubygems_mfa_required' => 'true'
|
45
64
|
}
|
46
65
|
end
|
data/renovate.json
CHANGED
@@ -1,6 +1,92 @@
|
|
1
1
|
{
|
2
2
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
3
3
|
"extends": [
|
4
|
-
"config:
|
4
|
+
"config:recommended"
|
5
|
+
],
|
6
|
+
"github-actions": {
|
7
|
+
"enabled": true,
|
8
|
+
"pinDigests": true
|
9
|
+
},
|
10
|
+
"packageRules": [
|
11
|
+
{
|
12
|
+
"matchManagers": [
|
13
|
+
"github-actions"
|
14
|
+
],
|
15
|
+
"minimumReleaseAge": "7 days"
|
16
|
+
}
|
17
|
+
],
|
18
|
+
"customManagers": [
|
19
|
+
{
|
20
|
+
"customType": "regex",
|
21
|
+
"managerFilePatterns": [
|
22
|
+
"/^ext/build_common\\.sh$/"
|
23
|
+
],
|
24
|
+
"matchStrings": [
|
25
|
+
"readonly OPENSSL_VERSION=\"(?<currentValue>.*)\""
|
26
|
+
],
|
27
|
+
"depNameTemplate": "openssl/openssl",
|
28
|
+
"datasourceTemplate": "github-releases",
|
29
|
+
"extractVersionTemplate": "^OpenSSL_(?<version>.*)$"
|
30
|
+
},
|
31
|
+
{
|
32
|
+
"customType": "regex",
|
33
|
+
"managerFilePatterns": [
|
34
|
+
"/^ext/build_common\\.sh$/"
|
35
|
+
],
|
36
|
+
"matchStrings": [
|
37
|
+
"readonly CYRUS_SASL_VERSION=\"(?<currentValue>.*)\""
|
38
|
+
],
|
39
|
+
"depNameTemplate": "cyrusimap/cyrus-sasl",
|
40
|
+
"datasourceTemplate": "github-releases",
|
41
|
+
"extractVersionTemplate": "^cyrus-sasl-(?<version>.*)$"
|
42
|
+
},
|
43
|
+
{
|
44
|
+
"customType": "regex",
|
45
|
+
"managerFilePatterns": [
|
46
|
+
"/^ext/build_common\\.sh$/"
|
47
|
+
],
|
48
|
+
"matchStrings": [
|
49
|
+
"readonly ZLIB_VERSION=\"(?<currentValue>.*)\""
|
50
|
+
],
|
51
|
+
"depNameTemplate": "madler/zlib",
|
52
|
+
"datasourceTemplate": "github-releases",
|
53
|
+
"extractVersionTemplate": "^v(?<version>.*)$"
|
54
|
+
},
|
55
|
+
{
|
56
|
+
"customType": "regex",
|
57
|
+
"managerFilePatterns": [
|
58
|
+
"/^ext/build_common\\.sh$/"
|
59
|
+
],
|
60
|
+
"matchStrings": [
|
61
|
+
"readonly ZSTD_VERSION=\"(?<currentValue>.*)\""
|
62
|
+
],
|
63
|
+
"depNameTemplate": "facebook/zstd",
|
64
|
+
"datasourceTemplate": "github-releases",
|
65
|
+
"extractVersionTemplate": "^v(?<version>.*)$"
|
66
|
+
},
|
67
|
+
{
|
68
|
+
"customType": "regex",
|
69
|
+
"managerFilePatterns": [
|
70
|
+
"/^ext/build_common\\.sh$/"
|
71
|
+
],
|
72
|
+
"matchStrings": [
|
73
|
+
"readonly KRB5_VERSION=\"(?<currentValue>.*)\""
|
74
|
+
],
|
75
|
+
"depNameTemplate": "krb5/krb5",
|
76
|
+
"datasourceTemplate": "github-releases",
|
77
|
+
"extractVersionTemplate": "^krb5-(?<version>.*)$"
|
78
|
+
},
|
79
|
+
{
|
80
|
+
"customType": "regex",
|
81
|
+
"managerFilePatterns": [
|
82
|
+
"/^ext/build_common\\.sh$/"
|
83
|
+
],
|
84
|
+
"matchStrings": [
|
85
|
+
"readonly LIBRDKAFKA_VERSION=\"(?<currentValue>.*)\""
|
86
|
+
],
|
87
|
+
"depNameTemplate": "confluentinc/librdkafka",
|
88
|
+
"datasourceTemplate": "github-releases",
|
89
|
+
"extractVersionTemplate": "^v(?<version>.*)$"
|
90
|
+
}
|
5
91
|
]
|
6
92
|
}
|
data/spec/rdkafka/admin_spec.rb
CHANGED
@@ -295,6 +295,8 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
295
295
|
expect(resources_results.first.type).to eq(2)
|
296
296
|
expect(resources_results.first.name).to eq(topic_name)
|
297
297
|
|
298
|
+
sleep(1)
|
299
|
+
|
298
300
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
299
301
|
config.name == 'delete.retention.ms'
|
300
302
|
end
|
@@ -325,6 +327,9 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
325
327
|
expect(resources_results.size).to eq(1)
|
326
328
|
expect(resources_results.first.type).to eq(2)
|
327
329
|
expect(resources_results.first.name).to eq(topic_name)
|
330
|
+
|
331
|
+
sleep(1)
|
332
|
+
|
328
333
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
329
334
|
config.name == 'delete.retention.ms'
|
330
335
|
end
|
@@ -356,6 +361,8 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
356
361
|
expect(resources_results.first.type).to eq(2)
|
357
362
|
expect(resources_results.first.name).to eq(topic_name)
|
358
363
|
|
364
|
+
sleep(1)
|
365
|
+
|
359
366
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
360
367
|
config.name == 'cleanup.policy'
|
361
368
|
end
|
@@ -387,6 +394,8 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
387
394
|
expect(resources_results.first.type).to eq(2)
|
388
395
|
expect(resources_results.first.name).to eq(topic_name)
|
389
396
|
|
397
|
+
sleep(1)
|
398
|
+
|
390
399
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
391
400
|
config.name == 'cleanup.policy'
|
392
401
|
end
|
@@ -622,7 +631,12 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
622
631
|
|
623
632
|
consumer.subscribe(topic_name)
|
624
633
|
wait_for_assignment(consumer)
|
625
|
-
|
634
|
+
|
635
|
+
message = nil
|
636
|
+
|
637
|
+
10.times do
|
638
|
+
message ||= consumer.poll(100)
|
639
|
+
end
|
626
640
|
|
627
641
|
expect(message).to_not be_nil
|
628
642
|
|
@@ -738,17 +752,19 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
738
752
|
end
|
739
753
|
end
|
740
754
|
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
755
|
+
unless RUBY_PLATFORM == 'java'
|
756
|
+
context "when operating from a fork" do
|
757
|
+
# @see https://github.com/ffi/ffi/issues/1114
|
758
|
+
it 'expect to be able to create topics and run other admin operations without hanging' do
|
759
|
+
# If the FFI issue is not mitigated, this will hang forever
|
760
|
+
pid = fork do
|
761
|
+
admin
|
762
|
+
.create_topic(topic_name, topic_partition_count, topic_replication_factor)
|
763
|
+
.wait
|
764
|
+
end
|
750
765
|
|
751
|
-
|
766
|
+
Process.wait(pid)
|
767
|
+
end
|
752
768
|
end
|
753
769
|
end
|
754
770
|
end
|
@@ -149,15 +149,6 @@ describe Rdkafka::Bindings do
|
|
149
149
|
end
|
150
150
|
|
151
151
|
describe "oauthbearer set token" do
|
152
|
-
|
153
|
-
context "without args" do
|
154
|
-
it "should raise argument error" do
|
155
|
-
expect {
|
156
|
-
Rdkafka::Bindings.rd_kafka_oauthbearer_set_token
|
157
|
-
}.to raise_error(ArgumentError)
|
158
|
-
end
|
159
|
-
end
|
160
|
-
|
161
152
|
context "with args" do
|
162
153
|
before do
|
163
154
|
DEFAULT_TOKEN_EXPIRY_SECONDS = 900
|