karafka-rdkafka 0.19.5 → 0.20.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +3 -3
- data/.ruby-version +1 -1
- data/CHANGELOG.md +2 -14
- data/README.md +3 -1
- data/Rakefile +2 -0
- data/certs/cert.pem +26 -0
- data/dist/{librdkafka-2.8.0.tar.gz → librdkafka-2.10.0.tar.gz} +0 -0
- data/docker-compose.yml +1 -1
- data/karafka-rdkafka.gemspec +6 -1
- data/lib/rdkafka/bindings.rb +1 -25
- data/lib/rdkafka/error.rb +1 -8
- data/lib/rdkafka/native_kafka.rb +0 -4
- data/lib/rdkafka/producer.rb +28 -40
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +0 -1
- data/spec/rdkafka/admin_spec.rb +1 -1
- data/spec/rdkafka/consumer_spec.rb +3 -3
- data/spec/rdkafka/producer_spec.rb +3 -192
- data/spec/spec_helper.rb +0 -9
- data.tar.gz.sig +0 -0
- metadata +32 -10
- metadata.gz.sig +0 -0
- data/.github/workflows/push.yml +0 -37
- data/lib/rdkafka/producer/partitions_count_cache.rb +0 -216
- data/spec/rdkafka/producer/partitions_count_cache_spec.rb +0 -359
- data/spec/rdkafka/producer/partitions_count_spec.rb +0 -359
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c5b81585e7271750f4b6c5110df05e25765fd6f86c991ce7f27f3a4714ec94ae
|
4
|
+
data.tar.gz: 387f5d228af380f6be3e4df9e1ea0d8842f4e65450f0d2dfc60ca68df9613a41
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: '0190399ef8742b240a04d4fe55de8263d6b7ff015556ca094227bbf7838ce1d47c7f152e0224bbc9a4ed288651a6a2a4d2ec161b1f8219570f9affdfaa93e1a3'
|
7
|
+
data.tar.gz: 4d2ab1b51e28b07e852de063b3ad163e90a5e7191c70b3b0c7d88591b063fbd3b15f2bee452b6cb8de9a6e1032e633a64f013fd0ff10a0ff5f5f58e3ce1b3c18
|
checksums.yaml.gz.sig
ADDED
Binary file
|
data/.github/workflows/ci.yml
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
name:
|
1
|
+
name: ci
|
2
2
|
|
3
3
|
concurrency:
|
4
4
|
group: ${{ github.workflow }}-${{ github.ref }}
|
@@ -51,7 +51,7 @@ jobs:
|
|
51
51
|
docker compose up -d || (sleep 5 && docker compose up -d)
|
52
52
|
|
53
53
|
- name: Set up Ruby
|
54
|
-
uses: ruby/setup-ruby@
|
54
|
+
uses: ruby/setup-ruby@ca041f971d66735f3e5ff1e21cc13e2d51e7e535 # v1.233.0
|
55
55
|
with:
|
56
56
|
ruby-version: ${{matrix.ruby}}
|
57
57
|
bundler-cache: true
|
@@ -86,7 +86,7 @@ jobs:
|
|
86
86
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
87
87
|
|
88
88
|
- name: Set up Ruby
|
89
|
-
uses: ruby/setup-ruby@
|
89
|
+
uses: ruby/setup-ruby@ca041f971d66735f3e5ff1e21cc13e2d51e7e535 # v1.233.0
|
90
90
|
with:
|
91
91
|
ruby-version: ${{matrix.ruby}}
|
92
92
|
bundler-cache: false
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.4.
|
1
|
+
3.4.3
|
data/CHANGELOG.md
CHANGED
@@ -1,20 +1,8 @@
|
|
1
1
|
# Rdkafka Changelog
|
2
2
|
|
3
|
-
## 0.
|
4
|
-
- [Enhancement]
|
5
|
-
|
6
|
-
## 0.19.4 (2025-05-23)
|
7
|
-
- [Change] Move to trusted-publishers and remove signing since no longer needed.
|
8
|
-
|
9
|
-
## 0.19.3 (2025-05-23)
|
10
|
-
- [Enhancement] Include broker message in the error full message if provided.
|
11
|
-
|
12
|
-
## 0.19.2 (2025-05-20)
|
13
|
-
- [Enhancement] Replace TTL-based partition count cache with a global cache that reuses `librdkafka` statistics data when possible.
|
3
|
+
## 0.20.0 (Unreleased)
|
4
|
+
- [Enhancement] Bump librdkafka to 2.10.0
|
14
5
|
- [Enhancement] Roll out experimental jruby support.
|
15
|
-
- [Fix] Fix issue where post-closed producer C topics refs would not be cleaned.
|
16
|
-
- [Fix] Fiber causes Segmentation Fault.
|
17
|
-
- [Change] Move to trusted-publishers and remove signing since no longer needed.
|
18
6
|
|
19
7
|
## 0.19.1 (2025-04-07)
|
20
8
|
- [Enhancement] Support producing and consuming of headers with mulitple values (KIP-82).
|
data/README.md
CHANGED
@@ -163,7 +163,9 @@ bundle exec rake produce_messages
|
|
163
163
|
|
164
164
|
| rdkafka-ruby | librdkafka | patches |
|
165
165
|
|-|-|-|
|
166
|
-
| 0.
|
166
|
+
| 0.20.0 (Unreleased) | 2.10.0 (2025-04-18) | yes |
|
167
|
+
| 0.19.1 (2025-04-07) | 2.8.0 (2025-01-07) | yes |
|
168
|
+
| 0.19.0 (2025-01-20) | 2.8.0 (2025-01-07) | yes |
|
167
169
|
| 0.18.0 (2024-11-26) | 2.6.1 (2024-11-18) | yes |
|
168
170
|
| 0.17.4 (2024-09-02) | 2.5.3 (2024-09-02) | yes |
|
169
171
|
| 0.17.0 (2024-08-01) | 2.5.0 (2024-07-10) | yes |
|
data/Rakefile
CHANGED
data/certs/cert.pem
ADDED
@@ -0,0 +1,26 @@
|
|
1
|
+
-----BEGIN CERTIFICATE-----
|
2
|
+
MIIEcDCCAtigAwIBAgIBATANBgkqhkiG9w0BAQsFADA/MRAwDgYDVQQDDAdjb250
|
3
|
+
YWN0MRcwFQYKCZImiZPyLGQBGRYHa2FyYWZrYTESMBAGCgmSJomT8ixkARkWAmlv
|
4
|
+
MB4XDTI0MDgyMzEwMTkyMFoXDTQ5MDgxNzEwMTkyMFowPzEQMA4GA1UEAwwHY29u
|
5
|
+
dGFjdDEXMBUGCgmSJomT8ixkARkWB2thcmFma2ExEjAQBgoJkiaJk/IsZAEZFgJp
|
6
|
+
bzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKjLhLjQqUlNayxkXnO+
|
7
|
+
PsmCDs/KFIzhrsYMfLZRZNaWmzV3ujljMOdDjd4snM2X06C41iVdQPWjpe3j8vVe
|
8
|
+
ZXEWR/twSbOP6Eeg8WVH2wCOo0x5i7yhVn4UBLH4JpfEMCbemVcWQ9ry9OMg4WpH
|
9
|
+
Uu4dRwxFV7hzCz3p0QfNLRI4miAxnGWcnlD98IJRjBAksTuR1Llj0vbOrDGsL9ZT
|
10
|
+
JeXP2gdRLd8SqzAFJEWrbeTBCBU7gfSh3oMg5SVDLjaqf7Kz5wC/8bDZydzanOxB
|
11
|
+
T6CDXPsCnllmvTNx2ei2T5rGYJOzJeNTmJLLK6hJWUlAvaQSvCwZRvFJ0tVGLEoS
|
12
|
+
flqSr6uGyyl1eMUsNmsH4BqPEYcAV6P2PKTv2vUR8AP0raDvZ3xL1TKvfRb8xRpo
|
13
|
+
vPopCGlY5XBWEc6QERHfVLTIVsjnls2/Ujj4h8/TSfqqYnaHKefIMLbuD/tquMjD
|
14
|
+
iWQsW2qStBV0T+U7FijKxVfrfqZP7GxQmDAc9o1iiyAa3QIDAQABo3cwdTAJBgNV
|
15
|
+
HRMEAjAAMAsGA1UdDwQEAwIEsDAdBgNVHQ4EFgQU3O4dTXmvE7YpAkszGzR9DdL9
|
16
|
+
sbEwHQYDVR0RBBYwFIESY29udGFjdEBrYXJhZmthLmlvMB0GA1UdEgQWMBSBEmNv
|
17
|
+
bnRhY3RAa2FyYWZrYS5pbzANBgkqhkiG9w0BAQsFAAOCAYEAVKTfoLXn7mqdSxIR
|
18
|
+
eqxcR6Huudg1jes81s1+X0uiRTR3hxxKZ3Y82cPsee9zYWyBrN8TA4KA0WILTru7
|
19
|
+
Ygxvzha0SRPsSiaKLmgOJ+61ebI4+bOORzIJLpD6GxCxu1r7MI4+0r1u1xe0EWi8
|
20
|
+
agkVo1k4Vi8cKMLm6Gl9b3wG9zQBw6fcgKwmpjKiNnOLP+OytzUANrIUJjoq6oal
|
21
|
+
TC+f/Uc0TLaRqUaW/bejxzDWWHoM3SU6aoLPuerglzp9zZVzihXwx3jPLUVKDFpF
|
22
|
+
Rl2lcBDxlpYGueGo0/oNzGJAAy6js8jhtHC9+19PD53vk7wHtFTZ/0ugDQYnwQ+x
|
23
|
+
oml2fAAuVWpTBCgOVFe6XCQpMKopzoxQ1PjKztW2KYxgJdIBX87SnL3aWuBQmhRd
|
24
|
+
i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
|
25
|
+
ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
|
26
|
+
-----END CERTIFICATE-----
|
Binary file
|
data/docker-compose.yml
CHANGED
data/karafka-rdkafka.gemspec
CHANGED
@@ -17,6 +17,11 @@ Gem::Specification.new do |gem|
|
|
17
17
|
gem.version = Rdkafka::VERSION
|
18
18
|
gem.required_ruby_version = '>= 3.1'
|
19
19
|
gem.extensions = %w(ext/Rakefile)
|
20
|
+
gem.cert_chain = %w[certs/cert.pem]
|
21
|
+
|
22
|
+
if $PROGRAM_NAME.end_with?('gem')
|
23
|
+
gem.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
|
24
|
+
end
|
20
25
|
|
21
26
|
gem.add_dependency 'ffi', '~> 1.15'
|
22
27
|
gem.add_dependency 'mini_portile2', '~> 2.6'
|
@@ -32,7 +37,7 @@ Gem::Specification.new do |gem|
|
|
32
37
|
gem.metadata = {
|
33
38
|
'funding_uri' => 'https://karafka.io/#become-pro',
|
34
39
|
'homepage_uri' => 'https://karafka.io',
|
35
|
-
'changelog_uri' => 'https://
|
40
|
+
'changelog_uri' => 'https://github.com/karafka/karafka-rdkafka/blob/master/CHANGELOG.md',
|
36
41
|
'bug_tracker_uri' => 'https://github.com/karafka/karafka-rdkafka/issues',
|
37
42
|
'source_code_uri' => 'https://github.com/karafka/karafka-rdkafka',
|
38
43
|
'documentation_uri' => 'https://karafka.io/docs',
|
data/lib/rdkafka/bindings.rb
CHANGED
@@ -35,8 +35,6 @@ module Rdkafka
|
|
35
35
|
RD_KAFKA_OFFSET_STORED = -1000
|
36
36
|
RD_KAFKA_OFFSET_INVALID = -1001
|
37
37
|
|
38
|
-
EMPTY_HASH = {}.freeze
|
39
|
-
|
40
38
|
class SizePtr < FFI::Struct
|
41
39
|
layout :value, :size_t
|
42
40
|
end
|
@@ -217,31 +215,9 @@ module Rdkafka
|
|
217
215
|
StatsCallback = FFI::Function.new(
|
218
216
|
:int, [:pointer, :string, :int, :pointer]
|
219
217
|
) do |_client_ptr, json, _json_len, _opaque|
|
218
|
+
# Pass the stats hash to callback in config
|
220
219
|
if Rdkafka::Config.statistics_callback
|
221
220
|
stats = JSON.parse(json)
|
222
|
-
|
223
|
-
# If user requested statistics callbacks, we can use the statistics data to get the
|
224
|
-
# partitions count for each topic when this data is published. That way we do not have
|
225
|
-
# to query this information when user is using `partition_key`. This takes around 0.02ms
|
226
|
-
# every statistics interval period (most likely every 5 seconds) and saves us from making
|
227
|
-
# any queries to the cluster for the partition count.
|
228
|
-
#
|
229
|
-
# One edge case is if user would set the `statistics.interval.ms` much higher than the
|
230
|
-
# default current partition count refresh (30 seconds). This is taken care of as the lack
|
231
|
-
# of reporting to the partitions cache will cause cache expire and blocking refresh.
|
232
|
-
#
|
233
|
-
# If user sets `topic.metadata.refresh.interval.ms` too high this is on the user.
|
234
|
-
#
|
235
|
-
# Since this cache is shared, having few consumers and/or producers in one process will
|
236
|
-
# automatically improve the querying times even with low refresh times.
|
237
|
-
(stats['topics'] || EMPTY_HASH).each do |topic_name, details|
|
238
|
-
partitions_count = details['partitions'].keys.reject { |k| k == '-1' }.size
|
239
|
-
|
240
|
-
next unless partitions_count.positive?
|
241
|
-
|
242
|
-
Rdkafka::Producer.partitions_count_cache.set(topic_name, partitions_count)
|
243
|
-
end
|
244
|
-
|
245
221
|
Rdkafka::Config.statistics_callback.call(stats)
|
246
222
|
end
|
247
223
|
|
data/lib/rdkafka/error.rb
CHANGED
@@ -126,14 +126,7 @@ module Rdkafka
|
|
126
126
|
else
|
127
127
|
''
|
128
128
|
end
|
129
|
-
|
130
|
-
err_str = Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)
|
131
|
-
base = "#{message_prefix_part}#{err_str} (#{code})"
|
132
|
-
|
133
|
-
return base if broker_message.nil?
|
134
|
-
return base if broker_message.empty?
|
135
|
-
|
136
|
-
"#{base}\n#{broker_message}"
|
129
|
+
"#{message_prefix_part}#{Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)} (#{code})"
|
137
130
|
end
|
138
131
|
|
139
132
|
# Whether this error indicates the partition is EOF.
|
data/lib/rdkafka/native_kafka.rb
CHANGED
@@ -126,13 +126,9 @@ module Rdkafka
|
|
126
126
|
# and would continue to run, trying to destroy inner twice
|
127
127
|
return unless @inner
|
128
128
|
|
129
|
-
yield if block_given?
|
130
|
-
|
131
129
|
Rdkafka::Bindings.rd_kafka_destroy(@inner)
|
132
130
|
@inner = nil
|
133
131
|
@opaque = nil
|
134
|
-
@poll_mutex.unlock
|
135
|
-
@poll_mutex = nil
|
136
132
|
end
|
137
133
|
end
|
138
134
|
end
|
data/lib/rdkafka/producer.rb
CHANGED
@@ -6,31 +6,13 @@ module Rdkafka
|
|
6
6
|
include Helpers::Time
|
7
7
|
include Helpers::OAuth
|
8
8
|
|
9
|
-
#
|
10
|
-
|
11
|
-
|
12
|
-
# Global (process wide) partitions cache. We use it to store number of topics partitions,
|
13
|
-
# either from the librdkafka statistics (if enabled) or via direct inline calls every now and
|
14
|
-
# then. Since the partitions count can only grow and should be same for all consumers and
|
15
|
-
# producers, we can use a global cache as long as we ensure that updates only move up.
|
16
|
-
#
|
17
|
-
# @note It is critical to remember, that not all users may have statistics callbacks enabled,
|
18
|
-
# hence we should not make assumption that this cache is always updated from the stats.
|
19
|
-
#
|
20
|
-
# @return [Rdkafka::Producer::PartitionsCountCache]
|
21
|
-
def self.partitions_count_cache
|
22
|
-
@@partitions_count_cache
|
23
|
-
end
|
24
|
-
|
25
|
-
# @param partitions_count_cache [Rdkafka::Producer::PartitionsCountCache]
|
26
|
-
def self.partitions_count_cache=(partitions_count_cache)
|
27
|
-
@@partitions_count_cache = partitions_count_cache
|
28
|
-
end
|
9
|
+
# Cache partitions count for 30 seconds
|
10
|
+
PARTITIONS_COUNT_TTL = 30
|
29
11
|
|
30
12
|
# Empty hash used as a default
|
31
13
|
EMPTY_HASH = {}.freeze
|
32
14
|
|
33
|
-
private_constant :EMPTY_HASH
|
15
|
+
private_constant :PARTITIONS_COUNT_TTL, :EMPTY_HASH
|
34
16
|
|
35
17
|
# Raised when there was a critical issue when invoking rd_kafka_topic_new
|
36
18
|
# This is a temporary solution until https://github.com/karafka/rdkafka-ruby/issues/451 is
|
@@ -61,6 +43,25 @@ module Rdkafka
|
|
61
43
|
|
62
44
|
# Makes sure, that native kafka gets closed before it gets GCed by Ruby
|
63
45
|
ObjectSpace.define_finalizer(self, native_kafka.finalizer)
|
46
|
+
|
47
|
+
@_partitions_count_cache = Hash.new do |cache, topic|
|
48
|
+
topic_metadata = nil
|
49
|
+
|
50
|
+
@native_kafka.with_inner do |inner|
|
51
|
+
topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
|
52
|
+
end
|
53
|
+
|
54
|
+
partition_count = topic_metadata ? topic_metadata[:partition_count] : -1
|
55
|
+
|
56
|
+
# This approach caches the failure to fetch only for 1 second. This will make sure, that
|
57
|
+
# we do not cache the failure for too long but also "buys" us a bit of time in case there
|
58
|
+
# would be issues in the cluster so we won't overaload it with consecutive requests
|
59
|
+
cache[topic] = if partition_count.positive?
|
60
|
+
[monotonic_now, partition_count]
|
61
|
+
else
|
62
|
+
[monotonic_now - PARTITIONS_COUNT_TTL + 5, partition_count]
|
63
|
+
end
|
64
|
+
end
|
64
65
|
end
|
65
66
|
|
66
67
|
# Sets alternative set of configuration details that can be set per topic
|
@@ -283,31 +284,18 @@ module Rdkafka
|
|
283
284
|
# @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
|
284
285
|
# auto-created after returning nil.
|
285
286
|
#
|
286
|
-
# @note We cache the partition count for a given topic for given time.
|
287
|
-
# enabled for any producer or consumer, it will take precedence over per instance fetching.
|
288
|
-
#
|
287
|
+
# @note We cache the partition count for a given topic for given time.
|
289
288
|
# This prevents us in case someone uses `partition_key` from querying for the count with
|
290
|
-
# each message. Instead we query
|
291
|
-
#
|
289
|
+
# each message. Instead we query once every 30 seconds at most if we have a valid partition
|
290
|
+
# count or every 5 seconds in case we were not able to obtain number of partitions
|
292
291
|
def partition_count(topic)
|
293
292
|
closed_producer_check(__method__)
|
294
293
|
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
@native_kafka.with_inner do |inner|
|
299
|
-
topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
|
300
|
-
end
|
301
|
-
|
302
|
-
topic_metadata ? topic_metadata[:partition_count] : -1
|
294
|
+
@_partitions_count_cache.delete_if do |_, cached|
|
295
|
+
monotonic_now - cached.first > PARTITIONS_COUNT_TTL
|
303
296
|
end
|
304
|
-
rescue Rdkafka::RdkafkaError => e
|
305
|
-
# If the topic does not exist, it will be created or if not allowed another error will be
|
306
|
-
# raised. We here return -1 so this can happen without early error happening on metadata
|
307
|
-
# discovery.
|
308
|
-
return -1 if e.code == :unknown_topic_or_part
|
309
297
|
|
310
|
-
|
298
|
+
@_partitions_count_cache[topic].last
|
311
299
|
end
|
312
300
|
|
313
301
|
# Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
|
data/lib/rdkafka/version.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Rdkafka
|
4
|
-
VERSION = "0.
|
5
|
-
LIBRDKAFKA_VERSION = "2.
|
6
|
-
LIBRDKAFKA_SOURCE_SHA256 = "
|
4
|
+
VERSION = "0.20.0.rc1"
|
5
|
+
LIBRDKAFKA_VERSION = "2.10.0"
|
6
|
+
LIBRDKAFKA_SOURCE_SHA256 = "004b1cc2685d1d6d416b90b426a0a9d27327a214c6b807df6f9ea5887346ba3a"
|
7
7
|
end
|
data/lib/rdkafka.rb
CHANGED
@@ -42,7 +42,6 @@ require "rdkafka/consumer/topic_partition_list"
|
|
42
42
|
require "rdkafka/error"
|
43
43
|
require "rdkafka/metadata"
|
44
44
|
require "rdkafka/native_kafka"
|
45
|
-
require "rdkafka/producer/partitions_count_cache"
|
46
45
|
require "rdkafka/producer"
|
47
46
|
require "rdkafka/producer/delivery_handle"
|
48
47
|
require "rdkafka/producer/delivery_report"
|
data/spec/rdkafka/admin_spec.rb
CHANGED
@@ -34,7 +34,7 @@ describe Rdkafka::Admin do
|
|
34
34
|
describe '#describe_errors' do
|
35
35
|
let(:errors) { admin.class.describe_errors }
|
36
36
|
|
37
|
-
it { expect(errors.size).to eq(
|
37
|
+
it { expect(errors.size).to eq(172) }
|
38
38
|
it { expect(errors[-184]).to eq(code: -184, description: 'Local: Queue full', name: '_QUEUE_FULL') }
|
39
39
|
it { expect(errors[21]).to eq(code: 21, description: 'Broker: Invalid required acks value', name: 'INVALID_REQUIRED_ACKS') }
|
40
40
|
end
|
@@ -842,9 +842,9 @@ describe Rdkafka::Consumer do
|
|
842
842
|
missing_topic = SecureRandom.uuid
|
843
843
|
consumer.subscribe(missing_topic)
|
844
844
|
|
845
|
-
|
846
|
-
|
847
|
-
}.
|
845
|
+
# @note it used to raise "Subscribed topic not available" in previous librdkafka versions
|
846
|
+
# but this behaviour has been changed
|
847
|
+
expect { consumer.poll(1_000) }.not_to raise_error
|
848
848
|
end
|
849
849
|
end
|
850
850
|
|
@@ -364,48 +364,6 @@ describe Rdkafka::Producer do
|
|
364
364
|
expect(message.key).to eq "key utf8"
|
365
365
|
end
|
366
366
|
|
367
|
-
it "should produce a message to a non-existing topic with key and partition key" do
|
368
|
-
new_topic = "it-#{SecureRandom.uuid}"
|
369
|
-
|
370
|
-
handle = producer.produce(
|
371
|
-
# Needs to be a new topic each time
|
372
|
-
topic: new_topic,
|
373
|
-
payload: "payload",
|
374
|
-
key: "key",
|
375
|
-
partition_key: "partition_key",
|
376
|
-
label: "label"
|
377
|
-
)
|
378
|
-
|
379
|
-
# Should be pending at first
|
380
|
-
expect(handle.pending?).to be true
|
381
|
-
expect(handle.label).to eq "label"
|
382
|
-
|
383
|
-
# Check delivery handle and report
|
384
|
-
report = handle.wait(max_wait_timeout: 5)
|
385
|
-
expect(handle.pending?).to be false
|
386
|
-
expect(report).not_to be_nil
|
387
|
-
expect(report.partition).to eq 0
|
388
|
-
expect(report.offset).to be >= 0
|
389
|
-
expect(report.label).to eq "label"
|
390
|
-
|
391
|
-
# Flush and close producer
|
392
|
-
producer.flush
|
393
|
-
producer.close
|
394
|
-
|
395
|
-
# Consume message and verify its content
|
396
|
-
message = wait_for_message(
|
397
|
-
topic: new_topic,
|
398
|
-
delivery_report: report,
|
399
|
-
consumer: consumer
|
400
|
-
)
|
401
|
-
expect(message.partition).to eq 0
|
402
|
-
expect(message.payload).to eq "payload"
|
403
|
-
expect(message.key).to eq "key"
|
404
|
-
# Since api.version.request is on by default we will get
|
405
|
-
# the message creation timestamp if it's not set.
|
406
|
-
expect(message.timestamp).to be_within(10).of(Time.now)
|
407
|
-
end
|
408
|
-
|
409
367
|
context "timestamp" do
|
410
368
|
it "should raise a type error if not nil, integer or time" do
|
411
369
|
expect {
|
@@ -679,25 +637,6 @@ describe Rdkafka::Producer do
|
|
679
637
|
end
|
680
638
|
end
|
681
639
|
|
682
|
-
context "when topic does not exist and allow.auto.create.topics is false" do
|
683
|
-
let(:producer) do
|
684
|
-
rdkafka_producer_config(
|
685
|
-
"bootstrap.servers": "localhost:9092",
|
686
|
-
"message.timeout.ms": 100,
|
687
|
-
"allow.auto.create.topics": false
|
688
|
-
).producer
|
689
|
-
end
|
690
|
-
|
691
|
-
it "should contain the error in the response when not deliverable" do
|
692
|
-
handler = producer.produce(topic: "it-#{SecureRandom.uuid}", payload: nil, label: 'na')
|
693
|
-
# Wait for the async callbacks and delivery registry to update
|
694
|
-
sleep(2)
|
695
|
-
expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
|
696
|
-
expect(handler.create_result.error.code).to eq(:msg_timed_out)
|
697
|
-
expect(handler.create_result.label).to eq('na')
|
698
|
-
end
|
699
|
-
end
|
700
|
-
|
701
640
|
describe '#partition_count' do
|
702
641
|
it { expect(producer.partition_count('example_topic')).to eq(1) }
|
703
642
|
|
@@ -715,11 +654,12 @@ describe Rdkafka::Producer do
|
|
715
654
|
|
716
655
|
context 'when the partition count value was cached but time expired' do
|
717
656
|
before do
|
718
|
-
::
|
657
|
+
allow(::Process).to receive(:clock_gettime).and_return(0, 30.02)
|
658
|
+
producer.partition_count('example_topic')
|
719
659
|
allow(::Rdkafka::Metadata).to receive(:new).and_call_original
|
720
660
|
end
|
721
661
|
|
722
|
-
it 'expect to query it again' do
|
662
|
+
it 'expect not to query it again' do
|
723
663
|
producer.partition_count('example_topic')
|
724
664
|
expect(::Rdkafka::Metadata).to have_received(:new)
|
725
665
|
end
|
@@ -1102,133 +1042,4 @@ describe Rdkafka::Producer do
|
|
1102
1042
|
expect(message.headers['version']).to eq('2.1.3')
|
1103
1043
|
end
|
1104
1044
|
end
|
1105
|
-
|
1106
|
-
describe 'with active statistics callback' do
|
1107
|
-
let(:producer) do
|
1108
|
-
rdkafka_producer_config('statistics.interval.ms': 1_000).producer
|
1109
|
-
end
|
1110
|
-
|
1111
|
-
let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
|
1112
|
-
let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1113
|
-
let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1114
|
-
|
1115
|
-
context "when using partition key" do
|
1116
|
-
before do
|
1117
|
-
Rdkafka::Config.statistics_callback = ->(*) {}
|
1118
|
-
|
1119
|
-
# This call will make a blocking request to the metadata cache
|
1120
|
-
producer.produce(
|
1121
|
-
topic: "produce_test_topic",
|
1122
|
-
payload: "payload headers",
|
1123
|
-
partition_key: "test"
|
1124
|
-
).wait
|
1125
|
-
|
1126
|
-
pre_statistics_ttl
|
1127
|
-
|
1128
|
-
# We wait to make sure that statistics are triggered and that there is a refresh
|
1129
|
-
sleep(1.5)
|
1130
|
-
|
1131
|
-
post_statistics_ttl
|
1132
|
-
end
|
1133
|
-
|
1134
|
-
it 'expect to update ttl on the partitions count cache via statistics' do
|
1135
|
-
expect(pre_statistics_ttl).to be < post_statistics_ttl
|
1136
|
-
end
|
1137
|
-
end
|
1138
|
-
|
1139
|
-
context "when not using partition key" do
|
1140
|
-
before do
|
1141
|
-
Rdkafka::Config.statistics_callback = ->(*) {}
|
1142
|
-
|
1143
|
-
# This call will make a blocking request to the metadata cache
|
1144
|
-
producer.produce(
|
1145
|
-
topic: "produce_test_topic",
|
1146
|
-
payload: "payload headers"
|
1147
|
-
).wait
|
1148
|
-
|
1149
|
-
pre_statistics_ttl
|
1150
|
-
|
1151
|
-
# We wait to make sure that statistics are triggered and that there is a refresh
|
1152
|
-
sleep(1.5)
|
1153
|
-
|
1154
|
-
# This will anyhow be populated from statistic
|
1155
|
-
post_statistics_ttl
|
1156
|
-
end
|
1157
|
-
|
1158
|
-
it 'expect not to update ttl on the partitions count cache via blocking but via use stats' do
|
1159
|
-
expect(pre_statistics_ttl).to be_nil
|
1160
|
-
expect(post_statistics_ttl).not_to be_nil
|
1161
|
-
end
|
1162
|
-
end
|
1163
|
-
end
|
1164
|
-
|
1165
|
-
describe 'without active statistics callback' do
|
1166
|
-
let(:producer) do
|
1167
|
-
rdkafka_producer_config('statistics.interval.ms': 1_000).producer
|
1168
|
-
end
|
1169
|
-
|
1170
|
-
let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
|
1171
|
-
let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1172
|
-
let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1173
|
-
|
1174
|
-
context "when using partition key" do
|
1175
|
-
before do
|
1176
|
-
# This call will make a blocking request to the metadata cache
|
1177
|
-
producer.produce(
|
1178
|
-
topic: "produce_test_topic",
|
1179
|
-
payload: "payload headers",
|
1180
|
-
partition_key: "test"
|
1181
|
-
).wait
|
1182
|
-
|
1183
|
-
pre_statistics_ttl
|
1184
|
-
|
1185
|
-
# We wait to make sure that statistics are triggered and that there is a refresh
|
1186
|
-
sleep(1.5)
|
1187
|
-
|
1188
|
-
post_statistics_ttl
|
1189
|
-
end
|
1190
|
-
|
1191
|
-
it 'expect not to update ttl on the partitions count cache via statistics' do
|
1192
|
-
expect(pre_statistics_ttl).to eq post_statistics_ttl
|
1193
|
-
end
|
1194
|
-
end
|
1195
|
-
|
1196
|
-
context "when not using partition key" do
|
1197
|
-
before do
|
1198
|
-
# This call will make a blocking request to the metadata cache
|
1199
|
-
producer.produce(
|
1200
|
-
topic: "produce_test_topic",
|
1201
|
-
payload: "payload headers"
|
1202
|
-
).wait
|
1203
|
-
|
1204
|
-
pre_statistics_ttl
|
1205
|
-
|
1206
|
-
# We wait to make sure that statistics are triggered and that there is a refresh
|
1207
|
-
sleep(1.5)
|
1208
|
-
|
1209
|
-
# This should not be populated because stats are not in use
|
1210
|
-
post_statistics_ttl
|
1211
|
-
end
|
1212
|
-
|
1213
|
-
it 'expect not to update ttl on the partitions count cache via anything' do
|
1214
|
-
expect(pre_statistics_ttl).to be_nil
|
1215
|
-
expect(post_statistics_ttl).to be_nil
|
1216
|
-
end
|
1217
|
-
end
|
1218
|
-
end
|
1219
|
-
|
1220
|
-
describe 'with other fiber closing' do
|
1221
|
-
context 'when we create many fibers and close producer in some of them' do
|
1222
|
-
it 'expect not to crash ruby' do
|
1223
|
-
10.times do |i|
|
1224
|
-
producer = rdkafka_producer_config.producer
|
1225
|
-
|
1226
|
-
Fiber.new do
|
1227
|
-
GC.start
|
1228
|
-
producer.close
|
1229
|
-
end.resume
|
1230
|
-
end
|
1231
|
-
end
|
1232
|
-
end
|
1233
|
-
end
|
1234
1045
|
end
|
data/spec/spec_helper.rb
CHANGED
@@ -18,9 +18,6 @@ def rdkafka_base_config
|
|
18
18
|
:"api.version.request" => false,
|
19
19
|
:"broker.version.fallback" => "1.0",
|
20
20
|
:"bootstrap.servers" => "localhost:9092",
|
21
|
-
# Display statistics and refresh often just to cover those in specs
|
22
|
-
:'statistics.interval.ms' => 1_000,
|
23
|
-
:'topic.metadata.refresh.interval.ms' => 1_000
|
24
21
|
}
|
25
22
|
end
|
26
23
|
|
@@ -128,12 +125,6 @@ RSpec.configure do |config|
|
|
128
125
|
config.filter_run focus: true
|
129
126
|
config.run_all_when_everything_filtered = true
|
130
127
|
|
131
|
-
config.before(:each) do
|
132
|
-
Rdkafka::Config.statistics_callback = nil
|
133
|
-
# We need to clear it so state does not leak between specs
|
134
|
-
Rdkafka::Producer.partitions_count_cache.to_h.clear
|
135
|
-
end
|
136
|
-
|
137
128
|
config.before(:suite) do
|
138
129
|
admin = rdkafka_config.admin
|
139
130
|
{
|
data.tar.gz.sig
ADDED
Binary file
|