rdkafka 0.20.0 → 0.21.1.alpha2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/CODEOWNERS +3 -0
- data/.github/workflows/ci.yml +27 -11
- data/.github/workflows/push.yml +38 -0
- data/.github/workflows/verify-action-pins.yml +16 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +12 -0
- data/README.md +1 -0
- data/dist/{librdkafka-2.6.1.tar.gz → librdkafka-2.8.0.tar.gz} +0 -0
- data/docker-compose.yml +1 -1
- data/lib/rdkafka/bindings.rb +25 -1
- data/lib/rdkafka/config.rb +8 -4
- data/lib/rdkafka/consumer/headers.rb +14 -3
- data/lib/rdkafka/native_kafka.rb +8 -2
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +53 -36
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +1 -0
- data/rdkafka.gemspec +2 -7
- data/renovate.json +13 -1
- data/spec/rdkafka/admin_spec.rb +12 -10
- data/spec/rdkafka/bindings_spec.rb +0 -9
- data/spec/rdkafka/config_spec.rb +17 -15
- data/spec/rdkafka/consumer/headers_spec.rb +26 -10
- data/spec/rdkafka/producer/partitions_count_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +172 -3
- data/spec/spec_helper.rb +9 -0
- metadata +13 -35
- checksums.yaml.gz.sig +0 -0
- data/certs/cert.pem +0 -26
- data.tar.gz.sig +0 -0
- metadata.gz.sig +0 -0
data/lib/rdkafka/producer.rb
CHANGED
@@ -6,13 +6,31 @@ module Rdkafka
|
|
6
6
|
include Helpers::Time
|
7
7
|
include Helpers::OAuth
|
8
8
|
|
9
|
-
# Cache partitions count for 30 seconds
|
10
|
-
PARTITIONS_COUNT_TTL = 30
|
11
|
-
|
12
9
|
# Empty hash used as a default
|
13
10
|
EMPTY_HASH = {}.freeze
|
14
11
|
|
15
|
-
|
12
|
+
# @private
|
13
|
+
@@partitions_count_cache = PartitionsCountCache.new
|
14
|
+
|
15
|
+
# Global (process wide) partitions cache. We use it to store number of topics partitions,
|
16
|
+
# either from the librdkafka statistics (if enabled) or via direct inline calls every now and
|
17
|
+
# then. Since the partitions count can only grow and should be same for all consumers and
|
18
|
+
# producers, we can use a global cache as long as we ensure that updates only move up.
|
19
|
+
#
|
20
|
+
# @note It is critical to remember, that not all users may have statistics callbacks enabled,
|
21
|
+
# hence we should not make assumption that this cache is always updated from the stats.
|
22
|
+
#
|
23
|
+
# @return [Rdkafka::Producer::PartitionsCountCache]
|
24
|
+
def self.partitions_count_cache
|
25
|
+
@@partitions_count_cache
|
26
|
+
end
|
27
|
+
|
28
|
+
# @param partitions_count_cache [Rdkafka::Producer::PartitionsCountCache]
|
29
|
+
def self.partitions_count_cache=(partitions_count_cache)
|
30
|
+
@@partitions_count_cache = partitions_count_cache
|
31
|
+
end
|
32
|
+
|
33
|
+
private_constant :EMPTY_HASH
|
16
34
|
|
17
35
|
# Raised when there was a critical issue when invoking rd_kafka_topic_new
|
18
36
|
# This is a temporary solution until https://github.com/karafka/rdkafka-ruby/issues/451 is
|
@@ -43,25 +61,6 @@ module Rdkafka
|
|
43
61
|
|
44
62
|
# Makes sure, that native kafka gets closed before it gets GCed by Ruby
|
45
63
|
ObjectSpace.define_finalizer(self, native_kafka.finalizer)
|
46
|
-
|
47
|
-
@_partitions_count_cache = Hash.new do |cache, topic|
|
48
|
-
topic_metadata = nil
|
49
|
-
|
50
|
-
@native_kafka.with_inner do |inner|
|
51
|
-
topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
|
52
|
-
end
|
53
|
-
|
54
|
-
partition_count = topic_metadata ? topic_metadata[:partition_count] : -1
|
55
|
-
|
56
|
-
# This approach caches the failure to fetch only for 1 second. This will make sure, that
|
57
|
-
# we do not cache the failure for too long but also "buys" us a bit of time in case there
|
58
|
-
# would be issues in the cluster so we won't overaload it with consecutive requests
|
59
|
-
cache[topic] = if partition_count.positive?
|
60
|
-
[monotonic_now, partition_count]
|
61
|
-
else
|
62
|
-
[monotonic_now - PARTITIONS_COUNT_TTL + 5, partition_count]
|
63
|
-
end
|
64
|
-
end
|
65
64
|
end
|
66
65
|
|
67
66
|
# Sets alternative set of configuration details that can be set per topic
|
@@ -222,18 +221,24 @@ module Rdkafka
|
|
222
221
|
# @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
|
223
222
|
# auto-created after returning nil.
|
224
223
|
#
|
225
|
-
# @note We cache the partition count for a given topic for given time.
|
224
|
+
# @note We cache the partition count for a given topic for given time. If statistics are
|
225
|
+
# enabled for any producer or consumer, it will take precedence over per instance fetching.
|
226
|
+
#
|
226
227
|
# This prevents us in case someone uses `partition_key` from querying for the count with
|
227
|
-
# each message. Instead we query once every 30 seconds at most if we have a valid
|
228
|
-
# count or every 5 seconds in case we were not able to obtain number of partitions
|
228
|
+
# each message. Instead we query at most once every 30 seconds at most if we have a valid
|
229
|
+
# partition count or every 5 seconds in case we were not able to obtain number of partitions.
|
229
230
|
def partition_count(topic)
|
230
231
|
closed_producer_check(__method__)
|
231
232
|
|
232
|
-
|
233
|
-
|
234
|
-
end
|
233
|
+
self.class.partitions_count_cache.get(topic) do
|
234
|
+
topic_metadata = nil
|
235
235
|
|
236
|
-
|
236
|
+
@native_kafka.with_inner do |inner|
|
237
|
+
topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
|
238
|
+
end
|
239
|
+
|
240
|
+
topic_metadata ? topic_metadata[:partition_count] : -1
|
241
|
+
end
|
237
242
|
end
|
238
243
|
|
239
244
|
# Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
|
@@ -247,7 +252,7 @@ module Rdkafka
|
|
247
252
|
# @param partition [Integer,nil] Optional partition to produce to
|
248
253
|
# @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
|
249
254
|
# @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
|
250
|
-
# @param headers [Hash<String,String
|
255
|
+
# @param headers [Hash<String,String|Array<String>>] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
|
251
256
|
# @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
|
252
257
|
# @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
|
253
258
|
#
|
@@ -339,11 +344,23 @@ module Rdkafka
|
|
339
344
|
if headers
|
340
345
|
headers.each do |key0, value0|
|
341
346
|
key = key0.to_s
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
+
if value0.is_a?(Array)
|
348
|
+
# Handle array of values per KIP-82
|
349
|
+
value0.each do |value|
|
350
|
+
value = value.to_s
|
351
|
+
args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
|
352
|
+
args << :string << key
|
353
|
+
args << :pointer << value
|
354
|
+
args << :size_t << value.bytesize
|
355
|
+
end
|
356
|
+
else
|
357
|
+
# Handle single value
|
358
|
+
value = value0.to_s
|
359
|
+
args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
|
360
|
+
args << :string << key
|
361
|
+
args << :pointer << value
|
362
|
+
args << :size_t << value.bytesize
|
363
|
+
end
|
347
364
|
end
|
348
365
|
end
|
349
366
|
|
data/lib/rdkafka/version.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Rdkafka
|
4
|
-
VERSION = "0.
|
5
|
-
LIBRDKAFKA_VERSION = "2.
|
6
|
-
LIBRDKAFKA_SOURCE_SHA256 = "
|
4
|
+
VERSION = "0.21.1.alpha2"
|
5
|
+
LIBRDKAFKA_VERSION = "2.8.0"
|
6
|
+
LIBRDKAFKA_SOURCE_SHA256 = "5bd1c46f63265f31c6bfcedcde78703f77d28238eadf23821c2b43fc30be3e25"
|
7
7
|
end
|
data/lib/rdkafka.rb
CHANGED
@@ -42,6 +42,7 @@ require "rdkafka/consumer/topic_partition_list"
|
|
42
42
|
require "rdkafka/error"
|
43
43
|
require "rdkafka/metadata"
|
44
44
|
require "rdkafka/native_kafka"
|
45
|
+
require "rdkafka/producer/partitions_count_cache"
|
45
46
|
require "rdkafka/producer"
|
46
47
|
require "rdkafka/producer/delivery_handle"
|
47
48
|
require "rdkafka/producer/delivery_report"
|
data/rdkafka.gemspec
CHANGED
@@ -17,11 +17,6 @@ Gem::Specification.new do |gem|
|
|
17
17
|
gem.version = Rdkafka::VERSION
|
18
18
|
gem.required_ruby_version = '>= 3.1'
|
19
19
|
gem.extensions = %w(ext/Rakefile)
|
20
|
-
gem.cert_chain = %w[certs/cert.pem]
|
21
|
-
|
22
|
-
if $PROGRAM_NAME.end_with?('gem')
|
23
|
-
gem.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
|
24
|
-
end
|
25
20
|
|
26
21
|
gem.add_dependency 'ffi', '~> 1.15'
|
27
22
|
gem.add_dependency 'mini_portile2', '~> 2.6'
|
@@ -37,10 +32,10 @@ Gem::Specification.new do |gem|
|
|
37
32
|
gem.metadata = {
|
38
33
|
'funding_uri' => 'https://karafka.io/#become-pro',
|
39
34
|
'homepage_uri' => 'https://karafka.io',
|
40
|
-
'changelog_uri' => 'https://
|
35
|
+
'changelog_uri' => 'https://karafka.io/docs/Changelog-Rdkafka',
|
41
36
|
'bug_tracker_uri' => 'https://github.com/karafka/rdkafka-ruby/issues',
|
42
37
|
'source_code_uri' => 'https://github.com/karafka/rdkafka-ruby',
|
43
|
-
'documentation_uri' => 'https://
|
38
|
+
'documentation_uri' => 'https://karafka.io/docs',
|
44
39
|
'rubygems_mfa_required' => 'true'
|
45
40
|
}
|
46
41
|
end
|
data/renovate.json
CHANGED
@@ -1,6 +1,18 @@
|
|
1
1
|
{
|
2
2
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
3
3
|
"extends": [
|
4
|
-
"config:
|
4
|
+
"config:recommended"
|
5
|
+
],
|
6
|
+
"github-actions": {
|
7
|
+
"enabled": true,
|
8
|
+
"pinDigests": true
|
9
|
+
},
|
10
|
+
"packageRules": [
|
11
|
+
{
|
12
|
+
"matchManagers": [
|
13
|
+
"github-actions"
|
14
|
+
],
|
15
|
+
"minimumReleaseAge": "7 days"
|
16
|
+
}
|
5
17
|
]
|
6
18
|
}
|
data/spec/rdkafka/admin_spec.rb
CHANGED
@@ -738,17 +738,19 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
738
738
|
end
|
739
739
|
end
|
740
740
|
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
741
|
+
unless RUBY_PLATFORM == 'java'
|
742
|
+
context "when operating from a fork" do
|
743
|
+
# @see https://github.com/ffi/ffi/issues/1114
|
744
|
+
it 'expect to be able to create topics and run other admin operations without hanging' do
|
745
|
+
# If the FFI issue is not mitigated, this will hang forever
|
746
|
+
pid = fork do
|
747
|
+
admin
|
748
|
+
.create_topic(topic_name, topic_partition_count, topic_replication_factor)
|
749
|
+
.wait
|
750
|
+
end
|
750
751
|
|
751
|
-
|
752
|
+
Process.wait(pid)
|
753
|
+
end
|
752
754
|
end
|
753
755
|
end
|
754
756
|
end
|
@@ -149,15 +149,6 @@ describe Rdkafka::Bindings do
|
|
149
149
|
end
|
150
150
|
|
151
151
|
describe "oauthbearer set token" do
|
152
|
-
|
153
|
-
context "without args" do
|
154
|
-
it "should raise argument error" do
|
155
|
-
expect {
|
156
|
-
Rdkafka::Bindings.rd_kafka_oauthbearer_set_token
|
157
|
-
}.to raise_error(ArgumentError)
|
158
|
-
end
|
159
|
-
end
|
160
|
-
|
161
152
|
context "with args" do
|
162
153
|
before do
|
163
154
|
DEFAULT_TOKEN_EXPIRY_SECONDS = 900
|
data/spec/rdkafka/config_spec.rb
CHANGED
@@ -33,23 +33,25 @@ describe Rdkafka::Config do
|
|
33
33
|
expect(log.string).to include "FATAL -- : I love testing"
|
34
34
|
end
|
35
35
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
36
|
+
unless RUBY_PLATFORM == 'java'
|
37
|
+
it "expect to start new logger thread after fork and work" do
|
38
|
+
reader, writer = IO.pipe
|
39
|
+
|
40
|
+
pid = fork do
|
41
|
+
$stdout.reopen(writer)
|
42
|
+
Rdkafka::Config.logger = Logger.new($stdout)
|
43
|
+
reader.close
|
44
|
+
producer = rdkafka_producer_config(debug: 'all').producer
|
45
|
+
producer.close
|
46
|
+
writer.close
|
47
|
+
sleep(1)
|
48
|
+
end
|
49
|
+
|
45
50
|
writer.close
|
46
|
-
|
51
|
+
Process.wait(pid)
|
52
|
+
output = reader.read
|
53
|
+
expect(output.split("\n").size).to be >= 20
|
47
54
|
end
|
48
|
-
|
49
|
-
writer.close
|
50
|
-
Process.wait(pid)
|
51
|
-
output = reader.read
|
52
|
-
expect(output.split("\n").size).to be >= 20
|
53
55
|
end
|
54
56
|
end
|
55
57
|
|
@@ -3,7 +3,7 @@
|
|
3
3
|
describe Rdkafka::Consumer::Headers do
|
4
4
|
let(:headers) do
|
5
5
|
{ # Note String keys!
|
6
|
-
"version" => "2.1.3",
|
6
|
+
"version" => ["2.1.3", "2.1.4"],
|
7
7
|
"type" => "String"
|
8
8
|
}
|
9
9
|
end
|
@@ -17,27 +17,39 @@ describe Rdkafka::Consumer::Headers do
|
|
17
17
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
18
18
|
end
|
19
19
|
|
20
|
+
# First version header
|
20
21
|
expect(Rdkafka::Bindings).to \
|
21
22
|
receive(:rd_kafka_header_get_all)
|
22
23
|
.with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
23
|
-
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null:
|
24
|
-
expect(size_ptr).to receive(:[]).with(:value).and_return(headers
|
25
|
-
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers
|
24
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: "version"))
|
25
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][0].size)
|
26
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers["version"][0]))
|
26
27
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
27
28
|
end
|
28
29
|
|
30
|
+
# Second version header
|
29
31
|
expect(Rdkafka::Bindings).to \
|
30
32
|
receive(:rd_kafka_header_get_all)
|
31
33
|
.with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
32
|
-
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null:
|
33
|
-
expect(size_ptr).to receive(:[]).with(:value).and_return(headers
|
34
|
-
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers
|
34
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: "version"))
|
35
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][1].size)
|
36
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers["version"][1]))
|
35
37
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
36
38
|
end
|
37
39
|
|
40
|
+
# Single type header
|
38
41
|
expect(Rdkafka::Bindings).to \
|
39
42
|
receive(:rd_kafka_header_get_all)
|
40
|
-
.with(headers_ptr, 2, anything, anything, anything)
|
43
|
+
.with(headers_ptr, 2, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
44
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 2", read_string_to_null: "type"))
|
45
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["type"].size)
|
46
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 2", read_string: headers["type"]))
|
47
|
+
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
48
|
+
end
|
49
|
+
|
50
|
+
expect(Rdkafka::Bindings).to \
|
51
|
+
receive(:rd_kafka_header_get_all)
|
52
|
+
.with(headers_ptr, 3, anything, anything, anything)
|
41
53
|
.and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
|
42
54
|
end
|
43
55
|
|
@@ -46,8 +58,12 @@ describe Rdkafka::Consumer::Headers do
|
|
46
58
|
it { is_expected.to eq(headers) }
|
47
59
|
it { is_expected.to be_frozen }
|
48
60
|
|
49
|
-
it '
|
50
|
-
expect(subject['version']).to eq("2.1.3")
|
61
|
+
it 'returns array for duplicate headers' do
|
62
|
+
expect(subject['version']).to eq(["2.1.3", "2.1.4"])
|
63
|
+
end
|
64
|
+
|
65
|
+
it 'returns string for single headers' do
|
66
|
+
expect(subject['type']).to eq("String")
|
51
67
|
end
|
52
68
|
|
53
69
|
it 'does not support symbols mappings' do
|