karafka-rdkafka 0.19.1 → 0.19.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +13 -3
- data/.github/workflows/push.yml +38 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +13 -0
- data/README.md +11 -12
- data/docker-compose.yml +1 -1
- data/karafka-rdkafka.gemspec +1 -6
- data/lib/rdkafka/bindings.rb +25 -1
- data/lib/rdkafka/error.rb +8 -1
- data/lib/rdkafka/native_kafka.rb +4 -0
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +34 -29
- data/lib/rdkafka/version.rb +1 -1
- data/lib/rdkafka.rb +1 -0
- data/spec/rdkafka/admin_spec.rb +12 -10
- data/spec/rdkafka/bindings_spec.rb +0 -9
- data/spec/rdkafka/config_spec.rb +17 -15
- data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
- data/spec/rdkafka/producer/partitions_count_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +131 -3
- data/spec/spec_helper.rb +9 -0
- metadata +11 -33
- checksums.yaml.gz.sig +0 -0
- data/certs/cert.pem +0 -26
- data.tar.gz.sig +0 -3
- metadata.gz.sig +0 -0
@@ -0,0 +1,359 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'spec_helper'
|
4
|
+
|
5
|
+
RSpec.describe Rdkafka::Producer::PartitionsCountCache do
|
6
|
+
let(:default_ttl) { 1 } # Reduced from 30 to speed up tests
|
7
|
+
let(:custom_ttl) { 0.5 } # Half the default TTL
|
8
|
+
let(:cache) { described_class.new(default_ttl) }
|
9
|
+
let(:custom_ttl_cache) { described_class.new(custom_ttl) }
|
10
|
+
let(:topic) { "test_topic" }
|
11
|
+
let(:topic2) { "test_topic2" }
|
12
|
+
let(:partition_count) { 5 }
|
13
|
+
let(:higher_partition_count) { 10 }
|
14
|
+
let(:lower_partition_count) { 3 }
|
15
|
+
let(:even_higher_partition_count) { 15 }
|
16
|
+
|
17
|
+
describe "#initialize" do
|
18
|
+
it "creates a cache with default TTL when no TTL is specified" do
|
19
|
+
standard_cache = described_class.new
|
20
|
+
expect(standard_cache).to be_a(described_class)
|
21
|
+
end
|
22
|
+
|
23
|
+
it "creates a cache with custom TTL when specified" do
|
24
|
+
expect(custom_ttl_cache).to be_a(described_class)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
describe "#get" do
|
29
|
+
context "when cache is empty" do
|
30
|
+
it "yields to get the value and caches it" do
|
31
|
+
block_called = false
|
32
|
+
result = cache.get(topic) do
|
33
|
+
block_called = true
|
34
|
+
partition_count
|
35
|
+
end
|
36
|
+
|
37
|
+
expect(block_called).to be true
|
38
|
+
expect(result).to eq(partition_count)
|
39
|
+
|
40
|
+
# Verify caching by checking if block is called again
|
41
|
+
second_block_called = false
|
42
|
+
second_result = cache.get(topic) do
|
43
|
+
second_block_called = true
|
44
|
+
partition_count + 1 # Different value to ensure we get cached value
|
45
|
+
end
|
46
|
+
|
47
|
+
expect(second_block_called).to be false
|
48
|
+
expect(second_result).to eq(partition_count)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
context "when cache has a value" do
|
53
|
+
before do
|
54
|
+
# Seed the cache with a value
|
55
|
+
cache.get(topic) { partition_count }
|
56
|
+
end
|
57
|
+
|
58
|
+
it "returns cached value without yielding if not expired" do
|
59
|
+
block_called = false
|
60
|
+
result = cache.get(topic) do
|
61
|
+
block_called = true
|
62
|
+
partition_count + 1 # Different value to ensure we get cached one
|
63
|
+
end
|
64
|
+
|
65
|
+
expect(block_called).to be false
|
66
|
+
expect(result).to eq(partition_count)
|
67
|
+
end
|
68
|
+
|
69
|
+
it "yields to get new value when TTL has expired" do
|
70
|
+
# Wait for TTL to expire
|
71
|
+
sleep(default_ttl + 0.1)
|
72
|
+
|
73
|
+
block_called = false
|
74
|
+
new_count = partition_count + 1
|
75
|
+
result = cache.get(topic) do
|
76
|
+
block_called = true
|
77
|
+
new_count
|
78
|
+
end
|
79
|
+
|
80
|
+
expect(block_called).to be true
|
81
|
+
expect(result).to eq(new_count)
|
82
|
+
|
83
|
+
# Verify the new value is cached
|
84
|
+
second_block_called = false
|
85
|
+
second_result = cache.get(topic) do
|
86
|
+
second_block_called = true
|
87
|
+
new_count + 1 # Different value again
|
88
|
+
end
|
89
|
+
|
90
|
+
expect(second_block_called).to be false
|
91
|
+
expect(second_result).to eq(new_count)
|
92
|
+
end
|
93
|
+
|
94
|
+
it "respects a custom TTL" do
|
95
|
+
# Seed the custom TTL cache with a value
|
96
|
+
custom_ttl_cache.get(topic) { partition_count }
|
97
|
+
|
98
|
+
# Wait for custom TTL to expire but not default TTL
|
99
|
+
sleep(custom_ttl + 0.1)
|
100
|
+
|
101
|
+
# Custom TTL cache should refresh
|
102
|
+
custom_block_called = false
|
103
|
+
custom_result = custom_ttl_cache.get(topic) do
|
104
|
+
custom_block_called = true
|
105
|
+
higher_partition_count
|
106
|
+
end
|
107
|
+
|
108
|
+
expect(custom_block_called).to be true
|
109
|
+
expect(custom_result).to eq(higher_partition_count)
|
110
|
+
|
111
|
+
# Default TTL cache should not refresh yet
|
112
|
+
default_block_called = false
|
113
|
+
default_result = cache.get(topic) do
|
114
|
+
default_block_called = true
|
115
|
+
higher_partition_count
|
116
|
+
end
|
117
|
+
|
118
|
+
expect(default_block_called).to be false
|
119
|
+
expect(default_result).to eq(partition_count)
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
context "when new value is obtained" do
|
124
|
+
before do
|
125
|
+
# Seed the cache with initial value
|
126
|
+
cache.get(topic) { partition_count }
|
127
|
+
end
|
128
|
+
|
129
|
+
it "updates cache when new value is higher than cached value" do
|
130
|
+
# Wait for TTL to expire
|
131
|
+
sleep(default_ttl + 0.1)
|
132
|
+
|
133
|
+
# Get higher value
|
134
|
+
result = cache.get(topic) { higher_partition_count }
|
135
|
+
expect(result).to eq(higher_partition_count)
|
136
|
+
|
137
|
+
# Verify it was cached
|
138
|
+
second_result = cache.get(topic) { fail "Should not be called" }
|
139
|
+
expect(second_result).to eq(higher_partition_count)
|
140
|
+
end
|
141
|
+
|
142
|
+
it "preserves higher cached value when new value is lower" do
|
143
|
+
# First update to higher value
|
144
|
+
sleep(default_ttl + 0.1)
|
145
|
+
cache.get(topic) { higher_partition_count }
|
146
|
+
|
147
|
+
# Then try to update to lower value
|
148
|
+
sleep(default_ttl + 0.1)
|
149
|
+
result = cache.get(topic) { lower_partition_count }
|
150
|
+
|
151
|
+
expect(result).to eq(higher_partition_count)
|
152
|
+
|
153
|
+
# and subsequent gets should return the previously cached higher value
|
154
|
+
second_result = cache.get(topic) { fail "Should not be called" }
|
155
|
+
expect(second_result).to eq(higher_partition_count)
|
156
|
+
end
|
157
|
+
|
158
|
+
it "handles multiple topics independently" do
|
159
|
+
# Set up both topics with different values
|
160
|
+
cache.get(topic) { partition_count }
|
161
|
+
cache.get(topic2) { higher_partition_count }
|
162
|
+
|
163
|
+
# Wait for TTL to expire
|
164
|
+
sleep(default_ttl + 0.1)
|
165
|
+
|
166
|
+
# Update first topic
|
167
|
+
first_result = cache.get(topic) { even_higher_partition_count }
|
168
|
+
expect(first_result).to eq(even_higher_partition_count)
|
169
|
+
|
170
|
+
# Update second topic independently
|
171
|
+
second_updated = higher_partition_count + 3
|
172
|
+
second_result = cache.get(topic2) { second_updated }
|
173
|
+
expect(second_result).to eq(second_updated)
|
174
|
+
|
175
|
+
# Both topics should have their updated values
|
176
|
+
expect(cache.get(topic) { fail "Should not be called" }).to eq(even_higher_partition_count)
|
177
|
+
expect(cache.get(topic2) { fail "Should not be called" }).to eq(second_updated)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
describe "#set" do
|
183
|
+
context "when cache is empty" do
|
184
|
+
it "adds a new entry to the cache" do
|
185
|
+
cache.set(topic, partition_count)
|
186
|
+
|
187
|
+
# Verify through get
|
188
|
+
result = cache.get(topic) { fail "Should not be called" }
|
189
|
+
expect(result).to eq(partition_count)
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
context "when cache already has a value" do
|
194
|
+
before do
|
195
|
+
cache.set(topic, partition_count)
|
196
|
+
end
|
197
|
+
|
198
|
+
it "updates cache when new value is higher" do
|
199
|
+
cache.set(topic, higher_partition_count)
|
200
|
+
|
201
|
+
result = cache.get(topic) { fail "Should not be called" }
|
202
|
+
expect(result).to eq(higher_partition_count)
|
203
|
+
end
|
204
|
+
|
205
|
+
it "keeps original value when new value is lower" do
|
206
|
+
cache.set(topic, lower_partition_count)
|
207
|
+
|
208
|
+
result = cache.get(topic) { fail "Should not be called" }
|
209
|
+
expect(result).to eq(partition_count)
|
210
|
+
end
|
211
|
+
|
212
|
+
it "updates the timestamp even when keeping original value" do
|
213
|
+
# Set initial value
|
214
|
+
cache.set(topic, partition_count)
|
215
|
+
|
216
|
+
# Wait until close to TTL expiring
|
217
|
+
sleep(default_ttl - 0.2)
|
218
|
+
|
219
|
+
# Set lower value (should update timestamp but not value)
|
220
|
+
cache.set(topic, lower_partition_count)
|
221
|
+
|
222
|
+
# Wait a bit more, but still under the full TTL if timestamp was refreshed
|
223
|
+
sleep(0.3)
|
224
|
+
|
225
|
+
# Should still be valid due to timestamp refresh
|
226
|
+
result = cache.get(topic) { fail "Should not be called" }
|
227
|
+
expect(result).to eq(partition_count)
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
context "with concurrent access" do
|
232
|
+
it "correctly handles simultaneous updates to the same topic" do
|
233
|
+
# This test focuses on the final value after concurrent updates
|
234
|
+
threads = []
|
235
|
+
|
236
|
+
# Create 5 threads that all try to update the same topic with increasing values
|
237
|
+
5.times do |i|
|
238
|
+
threads << Thread.new do
|
239
|
+
value = 10 + i # Start at 10 to ensure all are higher than initial value
|
240
|
+
cache.set(topic, value)
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
# Wait for all threads to complete
|
245
|
+
threads.each(&:join)
|
246
|
+
|
247
|
+
# The highest value (14) should be stored and accessible through get
|
248
|
+
result = cache.get(topic) { fail "Should not be called" }
|
249
|
+
expect(result).to eq(14)
|
250
|
+
end
|
251
|
+
end
|
252
|
+
end
|
253
|
+
|
254
|
+
describe "TTL behavior" do
|
255
|
+
it "treats entries as expired when they exceed TTL" do
|
256
|
+
# Set initial value
|
257
|
+
cache.get(topic) { partition_count }
|
258
|
+
|
259
|
+
# Wait just under TTL
|
260
|
+
sleep(default_ttl - 0.1)
|
261
|
+
|
262
|
+
# Value should still be cached (block should not be called)
|
263
|
+
result = cache.get(topic) { fail "Should not be called when cache is valid" }
|
264
|
+
expect(result).to eq(partition_count)
|
265
|
+
|
266
|
+
# Now wait to exceed TTL
|
267
|
+
sleep(0.2) # Total sleep is now default_ttl + 0.1
|
268
|
+
|
269
|
+
# Cache should be expired, block should be called
|
270
|
+
block_called = false
|
271
|
+
new_value = partition_count + 3
|
272
|
+
result = cache.get(topic) do
|
273
|
+
block_called = true
|
274
|
+
new_value
|
275
|
+
end
|
276
|
+
|
277
|
+
expect(block_called).to be true
|
278
|
+
expect(result).to eq(new_value)
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
describe "comprehensive scenarios" do
|
283
|
+
it "handles a full lifecycle of cache operations" do
|
284
|
+
# 1. Initial cache miss, fetch and store
|
285
|
+
result1 = cache.get(topic) { partition_count }
|
286
|
+
expect(result1).to eq(partition_count)
|
287
|
+
|
288
|
+
# 2. Cache hit
|
289
|
+
result2 = cache.get(topic) { fail "Should not be called" }
|
290
|
+
expect(result2).to eq(partition_count)
|
291
|
+
|
292
|
+
# 3. Attempt to set lower value
|
293
|
+
cache.set(topic, lower_partition_count)
|
294
|
+
result3 = cache.get(topic) { fail "Should not be called" }
|
295
|
+
# Should still return the higher original value
|
296
|
+
expect(result3).to eq(partition_count)
|
297
|
+
|
298
|
+
# 4. Set higher value
|
299
|
+
cache.set(topic, higher_partition_count)
|
300
|
+
result4 = cache.get(topic) { fail "Should not be called" }
|
301
|
+
expect(result4).to eq(higher_partition_count)
|
302
|
+
|
303
|
+
# 5. TTL expires, new value provided is lower
|
304
|
+
sleep(default_ttl + 0.1)
|
305
|
+
result5 = cache.get(topic) { lower_partition_count }
|
306
|
+
# This returns the highest value
|
307
|
+
expect(result5).to eq(higher_partition_count)
|
308
|
+
|
309
|
+
# 6. But subsequent get should return the higher cached value
|
310
|
+
result6 = cache.get(topic) { fail "Should not be called" }
|
311
|
+
expect(result6).to eq(higher_partition_count)
|
312
|
+
|
313
|
+
# 7. Set new highest value directly
|
314
|
+
even_higher = higher_partition_count + 5
|
315
|
+
cache.set(topic, even_higher)
|
316
|
+
result7 = cache.get(topic) { fail "Should not be called" }
|
317
|
+
expect(result7).to eq(even_higher)
|
318
|
+
end
|
319
|
+
|
320
|
+
it "handles multiple topics with different TTLs correctly" do
|
321
|
+
# Set up initial values
|
322
|
+
cache.get(topic) { partition_count }
|
323
|
+
custom_ttl_cache.get(topic) { partition_count }
|
324
|
+
|
325
|
+
# Wait past custom TTL but not default TTL
|
326
|
+
sleep(custom_ttl + 0.1)
|
327
|
+
|
328
|
+
# Default cache should NOT refresh (still within default TTL)
|
329
|
+
default_result = cache.get(topic) { fail "Should not be called for default cache" }
|
330
|
+
# Original value should be maintained
|
331
|
+
expect(default_result).to eq(partition_count)
|
332
|
+
|
333
|
+
# Custom TTL cache SHOULD refresh (past custom TTL)
|
334
|
+
custom_cache_value = partition_count + 8
|
335
|
+
custom_block_called = false
|
336
|
+
custom_result = custom_ttl_cache.get(topic) do
|
337
|
+
custom_block_called = true
|
338
|
+
custom_cache_value
|
339
|
+
end
|
340
|
+
|
341
|
+
expect(custom_block_called).to be true
|
342
|
+
expect(custom_result).to eq(custom_cache_value)
|
343
|
+
|
344
|
+
# Now wait past default TTL
|
345
|
+
sleep(default_ttl - custom_ttl + 0.1)
|
346
|
+
|
347
|
+
# Now default cache should also refresh
|
348
|
+
default_block_called = false
|
349
|
+
new_default_value = partition_count + 10
|
350
|
+
new_default_result = cache.get(topic) do
|
351
|
+
default_block_called = true
|
352
|
+
new_default_value
|
353
|
+
end
|
354
|
+
|
355
|
+
expect(default_block_called).to be true
|
356
|
+
expect(new_default_result).to eq(new_default_value)
|
357
|
+
end
|
358
|
+
end
|
359
|
+
end
|
@@ -654,12 +654,11 @@ describe Rdkafka::Producer do
|
|
654
654
|
|
655
655
|
context 'when the partition count value was cached but time expired' do
|
656
656
|
before do
|
657
|
-
|
658
|
-
producer.partition_count('example_topic')
|
657
|
+
::Rdkafka::Producer.partitions_count_cache = Rdkafka::Producer::PartitionsCountCache.new
|
659
658
|
allow(::Rdkafka::Metadata).to receive(:new).and_call_original
|
660
659
|
end
|
661
660
|
|
662
|
-
it 'expect
|
661
|
+
it 'expect to query it again' do
|
663
662
|
producer.partition_count('example_topic')
|
664
663
|
expect(::Rdkafka::Metadata).to have_received(:new)
|
665
664
|
end
|
@@ -1042,4 +1041,133 @@ describe Rdkafka::Producer do
|
|
1042
1041
|
expect(message.headers['version']).to eq('2.1.3')
|
1043
1042
|
end
|
1044
1043
|
end
|
1044
|
+
|
1045
|
+
describe 'with active statistics callback' do
|
1046
|
+
let(:producer) do
|
1047
|
+
rdkafka_producer_config('statistics.interval.ms': 1_000).producer
|
1048
|
+
end
|
1049
|
+
|
1050
|
+
let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
|
1051
|
+
let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1052
|
+
let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1053
|
+
|
1054
|
+
context "when using partition key" do
|
1055
|
+
before do
|
1056
|
+
Rdkafka::Config.statistics_callback = ->(*) {}
|
1057
|
+
|
1058
|
+
# This call will make a blocking request to the metadata cache
|
1059
|
+
producer.produce(
|
1060
|
+
topic: "produce_test_topic",
|
1061
|
+
payload: "payload headers",
|
1062
|
+
partition_key: "test"
|
1063
|
+
).wait
|
1064
|
+
|
1065
|
+
pre_statistics_ttl
|
1066
|
+
|
1067
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
1068
|
+
sleep(1.5)
|
1069
|
+
|
1070
|
+
post_statistics_ttl
|
1071
|
+
end
|
1072
|
+
|
1073
|
+
it 'expect to update ttl on the partitions count cache via statistics' do
|
1074
|
+
expect(pre_statistics_ttl).to be < post_statistics_ttl
|
1075
|
+
end
|
1076
|
+
end
|
1077
|
+
|
1078
|
+
context "when not using partition key" do
|
1079
|
+
before do
|
1080
|
+
Rdkafka::Config.statistics_callback = ->(*) {}
|
1081
|
+
|
1082
|
+
# This call will make a blocking request to the metadata cache
|
1083
|
+
producer.produce(
|
1084
|
+
topic: "produce_test_topic",
|
1085
|
+
payload: "payload headers"
|
1086
|
+
).wait
|
1087
|
+
|
1088
|
+
pre_statistics_ttl
|
1089
|
+
|
1090
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
1091
|
+
sleep(1.5)
|
1092
|
+
|
1093
|
+
# This will anyhow be populated from statistic
|
1094
|
+
post_statistics_ttl
|
1095
|
+
end
|
1096
|
+
|
1097
|
+
it 'expect not to update ttl on the partitions count cache via blocking but via use stats' do
|
1098
|
+
expect(pre_statistics_ttl).to be_nil
|
1099
|
+
expect(post_statistics_ttl).not_to be_nil
|
1100
|
+
end
|
1101
|
+
end
|
1102
|
+
end
|
1103
|
+
|
1104
|
+
describe 'without active statistics callback' do
|
1105
|
+
let(:producer) do
|
1106
|
+
rdkafka_producer_config('statistics.interval.ms': 1_000).producer
|
1107
|
+
end
|
1108
|
+
|
1109
|
+
let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
|
1110
|
+
let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1111
|
+
let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
1112
|
+
|
1113
|
+
context "when using partition key" do
|
1114
|
+
before do
|
1115
|
+
# This call will make a blocking request to the metadata cache
|
1116
|
+
producer.produce(
|
1117
|
+
topic: "produce_test_topic",
|
1118
|
+
payload: "payload headers",
|
1119
|
+
partition_key: "test"
|
1120
|
+
).wait
|
1121
|
+
|
1122
|
+
pre_statistics_ttl
|
1123
|
+
|
1124
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
1125
|
+
sleep(1.5)
|
1126
|
+
|
1127
|
+
post_statistics_ttl
|
1128
|
+
end
|
1129
|
+
|
1130
|
+
it 'expect not to update ttl on the partitions count cache via statistics' do
|
1131
|
+
expect(pre_statistics_ttl).to eq post_statistics_ttl
|
1132
|
+
end
|
1133
|
+
end
|
1134
|
+
|
1135
|
+
context "when not using partition key" do
|
1136
|
+
before do
|
1137
|
+
# This call will make a blocking request to the metadata cache
|
1138
|
+
producer.produce(
|
1139
|
+
topic: "produce_test_topic",
|
1140
|
+
payload: "payload headers"
|
1141
|
+
).wait
|
1142
|
+
|
1143
|
+
pre_statistics_ttl
|
1144
|
+
|
1145
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
1146
|
+
sleep(1.5)
|
1147
|
+
|
1148
|
+
# This should not be populated because stats are not in use
|
1149
|
+
post_statistics_ttl
|
1150
|
+
end
|
1151
|
+
|
1152
|
+
it 'expect not to update ttl on the partitions count cache via anything' do
|
1153
|
+
expect(pre_statistics_ttl).to be_nil
|
1154
|
+
expect(post_statistics_ttl).to be_nil
|
1155
|
+
end
|
1156
|
+
end
|
1157
|
+
end
|
1158
|
+
|
1159
|
+
describe 'with other fiber closing' do
|
1160
|
+
context 'when we create many fibers and close producer in some of them' do
|
1161
|
+
it 'expect not to crash ruby' do
|
1162
|
+
10.times do |i|
|
1163
|
+
producer = rdkafka_producer_config.producer
|
1164
|
+
|
1165
|
+
Fiber.new do
|
1166
|
+
GC.start
|
1167
|
+
producer.close
|
1168
|
+
end.resume
|
1169
|
+
end
|
1170
|
+
end
|
1171
|
+
end
|
1172
|
+
end
|
1045
1173
|
end
|
data/spec/spec_helper.rb
CHANGED
@@ -18,6 +18,9 @@ def rdkafka_base_config
|
|
18
18
|
:"api.version.request" => false,
|
19
19
|
:"broker.version.fallback" => "1.0",
|
20
20
|
:"bootstrap.servers" => "localhost:9092",
|
21
|
+
# Display statistics and refresh often just to cover those in specs
|
22
|
+
:'statistics.interval.ms' => 1_000,
|
23
|
+
:'topic.metadata.refresh.interval.ms' => 1_000
|
21
24
|
}
|
22
25
|
end
|
23
26
|
|
@@ -125,6 +128,12 @@ RSpec.configure do |config|
|
|
125
128
|
config.filter_run focus: true
|
126
129
|
config.run_all_when_everything_filtered = true
|
127
130
|
|
131
|
+
config.before(:each) do
|
132
|
+
Rdkafka::Config.statistics_callback = nil
|
133
|
+
# We need to clear it so state does not leak between specs
|
134
|
+
Rdkafka::Producer.partitions_count_cache.to_h.clear
|
135
|
+
end
|
136
|
+
|
128
137
|
config.before(:suite) do
|
129
138
|
admin = rdkafka_config.admin
|
130
139
|
{
|
metadata
CHANGED
@@ -1,41 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka-rdkafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.19.
|
4
|
+
version: 0.19.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Thijs Cadier
|
8
8
|
- Maciej Mensfeld
|
9
9
|
bindir: bin
|
10
|
-
cert_chain:
|
11
|
-
-
|
12
|
-
-----BEGIN CERTIFICATE-----
|
13
|
-
MIIEcDCCAtigAwIBAgIBATANBgkqhkiG9w0BAQsFADA/MRAwDgYDVQQDDAdjb250
|
14
|
-
YWN0MRcwFQYKCZImiZPyLGQBGRYHa2FyYWZrYTESMBAGCgmSJomT8ixkARkWAmlv
|
15
|
-
MB4XDTI0MDgyMzEwMTkyMFoXDTQ5MDgxNzEwMTkyMFowPzEQMA4GA1UEAwwHY29u
|
16
|
-
dGFjdDEXMBUGCgmSJomT8ixkARkWB2thcmFma2ExEjAQBgoJkiaJk/IsZAEZFgJp
|
17
|
-
bzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKjLhLjQqUlNayxkXnO+
|
18
|
-
PsmCDs/KFIzhrsYMfLZRZNaWmzV3ujljMOdDjd4snM2X06C41iVdQPWjpe3j8vVe
|
19
|
-
ZXEWR/twSbOP6Eeg8WVH2wCOo0x5i7yhVn4UBLH4JpfEMCbemVcWQ9ry9OMg4WpH
|
20
|
-
Uu4dRwxFV7hzCz3p0QfNLRI4miAxnGWcnlD98IJRjBAksTuR1Llj0vbOrDGsL9ZT
|
21
|
-
JeXP2gdRLd8SqzAFJEWrbeTBCBU7gfSh3oMg5SVDLjaqf7Kz5wC/8bDZydzanOxB
|
22
|
-
T6CDXPsCnllmvTNx2ei2T5rGYJOzJeNTmJLLK6hJWUlAvaQSvCwZRvFJ0tVGLEoS
|
23
|
-
flqSr6uGyyl1eMUsNmsH4BqPEYcAV6P2PKTv2vUR8AP0raDvZ3xL1TKvfRb8xRpo
|
24
|
-
vPopCGlY5XBWEc6QERHfVLTIVsjnls2/Ujj4h8/TSfqqYnaHKefIMLbuD/tquMjD
|
25
|
-
iWQsW2qStBV0T+U7FijKxVfrfqZP7GxQmDAc9o1iiyAa3QIDAQABo3cwdTAJBgNV
|
26
|
-
HRMEAjAAMAsGA1UdDwQEAwIEsDAdBgNVHQ4EFgQU3O4dTXmvE7YpAkszGzR9DdL9
|
27
|
-
sbEwHQYDVR0RBBYwFIESY29udGFjdEBrYXJhZmthLmlvMB0GA1UdEgQWMBSBEmNv
|
28
|
-
bnRhY3RAa2FyYWZrYS5pbzANBgkqhkiG9w0BAQsFAAOCAYEAVKTfoLXn7mqdSxIR
|
29
|
-
eqxcR6Huudg1jes81s1+X0uiRTR3hxxKZ3Y82cPsee9zYWyBrN8TA4KA0WILTru7
|
30
|
-
Ygxvzha0SRPsSiaKLmgOJ+61ebI4+bOORzIJLpD6GxCxu1r7MI4+0r1u1xe0EWi8
|
31
|
-
agkVo1k4Vi8cKMLm6Gl9b3wG9zQBw6fcgKwmpjKiNnOLP+OytzUANrIUJjoq6oal
|
32
|
-
TC+f/Uc0TLaRqUaW/bejxzDWWHoM3SU6aoLPuerglzp9zZVzihXwx3jPLUVKDFpF
|
33
|
-
Rl2lcBDxlpYGueGo0/oNzGJAAy6js8jhtHC9+19PD53vk7wHtFTZ/0ugDQYnwQ+x
|
34
|
-
oml2fAAuVWpTBCgOVFe6XCQpMKopzoxQ1PjKztW2KYxgJdIBX87SnL3aWuBQmhRd
|
35
|
-
i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
|
36
|
-
ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
|
37
|
-
-----END CERTIFICATE-----
|
38
|
-
date: 2025-04-07 00:00:00.000000000 Z
|
10
|
+
cert_chain: []
|
11
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
39
12
|
dependencies:
|
40
13
|
- !ruby/object:Gem::Dependency
|
41
14
|
name: ffi
|
@@ -174,6 +147,7 @@ files:
|
|
174
147
|
- ".github/CODEOWNERS"
|
175
148
|
- ".github/FUNDING.yml"
|
176
149
|
- ".github/workflows/ci.yml"
|
150
|
+
- ".github/workflows/push.yml"
|
177
151
|
- ".github/workflows/verify-action-pins.yml"
|
178
152
|
- ".gitignore"
|
179
153
|
- ".rspec"
|
@@ -186,7 +160,6 @@ files:
|
|
186
160
|
- MIT-LICENSE
|
187
161
|
- README.md
|
188
162
|
- Rakefile
|
189
|
-
- certs/cert.pem
|
190
163
|
- dist/librdkafka-2.8.0.tar.gz
|
191
164
|
- dist/patches/rdkafka_global_init.patch
|
192
165
|
- docker-compose.yml
|
@@ -233,6 +206,7 @@ files:
|
|
233
206
|
- lib/rdkafka/producer.rb
|
234
207
|
- lib/rdkafka/producer/delivery_handle.rb
|
235
208
|
- lib/rdkafka/producer/delivery_report.rb
|
209
|
+
- lib/rdkafka/producer/partitions_count_cache.rb
|
236
210
|
- lib/rdkafka/version.rb
|
237
211
|
- renovate.json
|
238
212
|
- spec/rdkafka/abstract_handle_spec.rb
|
@@ -260,6 +234,8 @@ files:
|
|
260
234
|
- spec/rdkafka/native_kafka_spec.rb
|
261
235
|
- spec/rdkafka/producer/delivery_handle_spec.rb
|
262
236
|
- spec/rdkafka/producer/delivery_report_spec.rb
|
237
|
+
- spec/rdkafka/producer/partitions_count_cache_spec.rb
|
238
|
+
- spec/rdkafka/producer/partitions_count_spec.rb
|
263
239
|
- spec/rdkafka/producer_spec.rb
|
264
240
|
- spec/spec_helper.rb
|
265
241
|
licenses:
|
@@ -267,7 +243,7 @@ licenses:
|
|
267
243
|
metadata:
|
268
244
|
funding_uri: https://karafka.io/#become-pro
|
269
245
|
homepage_uri: https://karafka.io
|
270
|
-
changelog_uri: https://
|
246
|
+
changelog_uri: https://karafka.io/docs/Changelog-Karafka-Rdkafka/
|
271
247
|
bug_tracker_uri: https://github.com/karafka/karafka-rdkafka/issues
|
272
248
|
source_code_uri: https://github.com/karafka/karafka-rdkafka
|
273
249
|
documentation_uri: https://karafka.io/docs
|
@@ -286,7 +262,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
286
262
|
- !ruby/object:Gem::Version
|
287
263
|
version: '0'
|
288
264
|
requirements: []
|
289
|
-
rubygems_version: 3.6.
|
265
|
+
rubygems_version: 3.6.7
|
290
266
|
specification_version: 4
|
291
267
|
summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
|
292
268
|
It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
|
@@ -317,5 +293,7 @@ test_files:
|
|
317
293
|
- spec/rdkafka/native_kafka_spec.rb
|
318
294
|
- spec/rdkafka/producer/delivery_handle_spec.rb
|
319
295
|
- spec/rdkafka/producer/delivery_report_spec.rb
|
296
|
+
- spec/rdkafka/producer/partitions_count_cache_spec.rb
|
297
|
+
- spec/rdkafka/producer/partitions_count_spec.rb
|
320
298
|
- spec/rdkafka/producer_spec.rb
|
321
299
|
- spec/spec_helper.rb
|
checksums.yaml.gz.sig
DELETED
Binary file
|
data/certs/cert.pem
DELETED
@@ -1,26 +0,0 @@
|
|
1
|
-
-----BEGIN CERTIFICATE-----
|
2
|
-
MIIEcDCCAtigAwIBAgIBATANBgkqhkiG9w0BAQsFADA/MRAwDgYDVQQDDAdjb250
|
3
|
-
YWN0MRcwFQYKCZImiZPyLGQBGRYHa2FyYWZrYTESMBAGCgmSJomT8ixkARkWAmlv
|
4
|
-
MB4XDTI0MDgyMzEwMTkyMFoXDTQ5MDgxNzEwMTkyMFowPzEQMA4GA1UEAwwHY29u
|
5
|
-
dGFjdDEXMBUGCgmSJomT8ixkARkWB2thcmFma2ExEjAQBgoJkiaJk/IsZAEZFgJp
|
6
|
-
bzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKjLhLjQqUlNayxkXnO+
|
7
|
-
PsmCDs/KFIzhrsYMfLZRZNaWmzV3ujljMOdDjd4snM2X06C41iVdQPWjpe3j8vVe
|
8
|
-
ZXEWR/twSbOP6Eeg8WVH2wCOo0x5i7yhVn4UBLH4JpfEMCbemVcWQ9ry9OMg4WpH
|
9
|
-
Uu4dRwxFV7hzCz3p0QfNLRI4miAxnGWcnlD98IJRjBAksTuR1Llj0vbOrDGsL9ZT
|
10
|
-
JeXP2gdRLd8SqzAFJEWrbeTBCBU7gfSh3oMg5SVDLjaqf7Kz5wC/8bDZydzanOxB
|
11
|
-
T6CDXPsCnllmvTNx2ei2T5rGYJOzJeNTmJLLK6hJWUlAvaQSvCwZRvFJ0tVGLEoS
|
12
|
-
flqSr6uGyyl1eMUsNmsH4BqPEYcAV6P2PKTv2vUR8AP0raDvZ3xL1TKvfRb8xRpo
|
13
|
-
vPopCGlY5XBWEc6QERHfVLTIVsjnls2/Ujj4h8/TSfqqYnaHKefIMLbuD/tquMjD
|
14
|
-
iWQsW2qStBV0T+U7FijKxVfrfqZP7GxQmDAc9o1iiyAa3QIDAQABo3cwdTAJBgNV
|
15
|
-
HRMEAjAAMAsGA1UdDwQEAwIEsDAdBgNVHQ4EFgQU3O4dTXmvE7YpAkszGzR9DdL9
|
16
|
-
sbEwHQYDVR0RBBYwFIESY29udGFjdEBrYXJhZmthLmlvMB0GA1UdEgQWMBSBEmNv
|
17
|
-
bnRhY3RAa2FyYWZrYS5pbzANBgkqhkiG9w0BAQsFAAOCAYEAVKTfoLXn7mqdSxIR
|
18
|
-
eqxcR6Huudg1jes81s1+X0uiRTR3hxxKZ3Y82cPsee9zYWyBrN8TA4KA0WILTru7
|
19
|
-
Ygxvzha0SRPsSiaKLmgOJ+61ebI4+bOORzIJLpD6GxCxu1r7MI4+0r1u1xe0EWi8
|
20
|
-
agkVo1k4Vi8cKMLm6Gl9b3wG9zQBw6fcgKwmpjKiNnOLP+OytzUANrIUJjoq6oal
|
21
|
-
TC+f/Uc0TLaRqUaW/bejxzDWWHoM3SU6aoLPuerglzp9zZVzihXwx3jPLUVKDFpF
|
22
|
-
Rl2lcBDxlpYGueGo0/oNzGJAAy6js8jhtHC9+19PD53vk7wHtFTZ/0ugDQYnwQ+x
|
23
|
-
oml2fAAuVWpTBCgOVFe6XCQpMKopzoxQ1PjKztW2KYxgJdIBX87SnL3aWuBQmhRd
|
24
|
-
i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
|
25
|
-
ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
|
26
|
-
-----END CERTIFICATE-----
|
data.tar.gz.sig
DELETED
metadata.gz.sig
DELETED
Binary file
|