rdkafka 0.21.0 → 0.21.1.alpha2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/CODEOWNERS +3 -0
- data/.github/workflows/ci.yml +27 -11
- data/.github/workflows/push.yml +38 -0
- data/.github/workflows/verify-action-pins.yml +16 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +9 -0
- data/README.md +1 -1
- data/docker-compose.yml +1 -1
- data/lib/rdkafka/bindings.rb +25 -1
- data/lib/rdkafka/config.rb +8 -4
- data/lib/rdkafka/consumer/headers.rb +14 -3
- data/lib/rdkafka/native_kafka.rb +8 -2
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +53 -36
- data/lib/rdkafka/version.rb +1 -1
- data/lib/rdkafka.rb +1 -0
- data/rdkafka.gemspec +2 -7
- data/renovate.json +13 -1
- data/spec/rdkafka/admin_spec.rb +12 -10
- data/spec/rdkafka/bindings_spec.rb +0 -9
- data/spec/rdkafka/config_spec.rb +17 -15
- data/spec/rdkafka/consumer/headers_spec.rb +26 -10
- data/spec/rdkafka/producer/partitions_count_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +172 -3
- data/spec/spec_helper.rb +9 -0
- metadata +12 -34
- checksums.yaml.gz.sig +0 -0
- data/certs/cert.pem +0 -26
- data.tar.gz.sig +0 -0
- metadata.gz.sig +0 -0
@@ -0,0 +1,359 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'spec_helper'
|
4
|
+
|
5
|
+
RSpec.describe Rdkafka::Producer::PartitionsCountCache do
|
6
|
+
let(:default_ttl) { 1 } # Reduced from 30 to speed up tests
|
7
|
+
let(:custom_ttl) { 0.5 } # Half the default TTL
|
8
|
+
let(:cache) { described_class.new(default_ttl) }
|
9
|
+
let(:custom_ttl_cache) { described_class.new(custom_ttl) }
|
10
|
+
let(:topic) { "test_topic" }
|
11
|
+
let(:topic2) { "test_topic2" }
|
12
|
+
let(:partition_count) { 5 }
|
13
|
+
let(:higher_partition_count) { 10 }
|
14
|
+
let(:lower_partition_count) { 3 }
|
15
|
+
let(:even_higher_partition_count) { 15 }
|
16
|
+
|
17
|
+
describe "#initialize" do
|
18
|
+
it "creates a cache with default TTL when no TTL is specified" do
|
19
|
+
standard_cache = described_class.new
|
20
|
+
expect(standard_cache).to be_a(described_class)
|
21
|
+
end
|
22
|
+
|
23
|
+
it "creates a cache with custom TTL when specified" do
|
24
|
+
expect(custom_ttl_cache).to be_a(described_class)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
describe "#get" do
|
29
|
+
context "when cache is empty" do
|
30
|
+
it "yields to get the value and caches it" do
|
31
|
+
block_called = false
|
32
|
+
result = cache.get(topic) do
|
33
|
+
block_called = true
|
34
|
+
partition_count
|
35
|
+
end
|
36
|
+
|
37
|
+
expect(block_called).to be true
|
38
|
+
expect(result).to eq(partition_count)
|
39
|
+
|
40
|
+
# Verify caching by checking if block is called again
|
41
|
+
second_block_called = false
|
42
|
+
second_result = cache.get(topic) do
|
43
|
+
second_block_called = true
|
44
|
+
partition_count + 1 # Different value to ensure we get cached value
|
45
|
+
end
|
46
|
+
|
47
|
+
expect(second_block_called).to be false
|
48
|
+
expect(second_result).to eq(partition_count)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
context "when cache has a value" do
|
53
|
+
before do
|
54
|
+
# Seed the cache with a value
|
55
|
+
cache.get(topic) { partition_count }
|
56
|
+
end
|
57
|
+
|
58
|
+
it "returns cached value without yielding if not expired" do
|
59
|
+
block_called = false
|
60
|
+
result = cache.get(topic) do
|
61
|
+
block_called = true
|
62
|
+
partition_count + 1 # Different value to ensure we get cached one
|
63
|
+
end
|
64
|
+
|
65
|
+
expect(block_called).to be false
|
66
|
+
expect(result).to eq(partition_count)
|
67
|
+
end
|
68
|
+
|
69
|
+
it "yields to get new value when TTL has expired" do
|
70
|
+
# Wait for TTL to expire
|
71
|
+
sleep(default_ttl + 0.1)
|
72
|
+
|
73
|
+
block_called = false
|
74
|
+
new_count = partition_count + 1
|
75
|
+
result = cache.get(topic) do
|
76
|
+
block_called = true
|
77
|
+
new_count
|
78
|
+
end
|
79
|
+
|
80
|
+
expect(block_called).to be true
|
81
|
+
expect(result).to eq(new_count)
|
82
|
+
|
83
|
+
# Verify the new value is cached
|
84
|
+
second_block_called = false
|
85
|
+
second_result = cache.get(topic) do
|
86
|
+
second_block_called = true
|
87
|
+
new_count + 1 # Different value again
|
88
|
+
end
|
89
|
+
|
90
|
+
expect(second_block_called).to be false
|
91
|
+
expect(second_result).to eq(new_count)
|
92
|
+
end
|
93
|
+
|
94
|
+
it "respects a custom TTL" do
|
95
|
+
# Seed the custom TTL cache with a value
|
96
|
+
custom_ttl_cache.get(topic) { partition_count }
|
97
|
+
|
98
|
+
# Wait for custom TTL to expire but not default TTL
|
99
|
+
sleep(custom_ttl + 0.1)
|
100
|
+
|
101
|
+
# Custom TTL cache should refresh
|
102
|
+
custom_block_called = false
|
103
|
+
custom_result = custom_ttl_cache.get(topic) do
|
104
|
+
custom_block_called = true
|
105
|
+
higher_partition_count
|
106
|
+
end
|
107
|
+
|
108
|
+
expect(custom_block_called).to be true
|
109
|
+
expect(custom_result).to eq(higher_partition_count)
|
110
|
+
|
111
|
+
# Default TTL cache should not refresh yet
|
112
|
+
default_block_called = false
|
113
|
+
default_result = cache.get(topic) do
|
114
|
+
default_block_called = true
|
115
|
+
higher_partition_count
|
116
|
+
end
|
117
|
+
|
118
|
+
expect(default_block_called).to be false
|
119
|
+
expect(default_result).to eq(partition_count)
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
context "when new value is obtained" do
|
124
|
+
before do
|
125
|
+
# Seed the cache with initial value
|
126
|
+
cache.get(topic) { partition_count }
|
127
|
+
end
|
128
|
+
|
129
|
+
it "updates cache when new value is higher than cached value" do
|
130
|
+
# Wait for TTL to expire
|
131
|
+
sleep(default_ttl + 0.1)
|
132
|
+
|
133
|
+
# Get higher value
|
134
|
+
result = cache.get(topic) { higher_partition_count }
|
135
|
+
expect(result).to eq(higher_partition_count)
|
136
|
+
|
137
|
+
# Verify it was cached
|
138
|
+
second_result = cache.get(topic) { fail "Should not be called" }
|
139
|
+
expect(second_result).to eq(higher_partition_count)
|
140
|
+
end
|
141
|
+
|
142
|
+
it "preserves higher cached value when new value is lower" do
|
143
|
+
# First update to higher value
|
144
|
+
sleep(default_ttl + 0.1)
|
145
|
+
cache.get(topic) { higher_partition_count }
|
146
|
+
|
147
|
+
# Then try to update to lower value
|
148
|
+
sleep(default_ttl + 0.1)
|
149
|
+
result = cache.get(topic) { lower_partition_count }
|
150
|
+
|
151
|
+
expect(result).to eq(higher_partition_count)
|
152
|
+
|
153
|
+
# and subsequent gets should return the previously cached higher value
|
154
|
+
second_result = cache.get(topic) { fail "Should not be called" }
|
155
|
+
expect(second_result).to eq(higher_partition_count)
|
156
|
+
end
|
157
|
+
|
158
|
+
it "handles multiple topics independently" do
|
159
|
+
# Set up both topics with different values
|
160
|
+
cache.get(topic) { partition_count }
|
161
|
+
cache.get(topic2) { higher_partition_count }
|
162
|
+
|
163
|
+
# Wait for TTL to expire
|
164
|
+
sleep(default_ttl + 0.1)
|
165
|
+
|
166
|
+
# Update first topic
|
167
|
+
first_result = cache.get(topic) { even_higher_partition_count }
|
168
|
+
expect(first_result).to eq(even_higher_partition_count)
|
169
|
+
|
170
|
+
# Update second topic independently
|
171
|
+
second_updated = higher_partition_count + 3
|
172
|
+
second_result = cache.get(topic2) { second_updated }
|
173
|
+
expect(second_result).to eq(second_updated)
|
174
|
+
|
175
|
+
# Both topics should have their updated values
|
176
|
+
expect(cache.get(topic) { fail "Should not be called" }).to eq(even_higher_partition_count)
|
177
|
+
expect(cache.get(topic2) { fail "Should not be called" }).to eq(second_updated)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
describe "#set" do
|
183
|
+
context "when cache is empty" do
|
184
|
+
it "adds a new entry to the cache" do
|
185
|
+
cache.set(topic, partition_count)
|
186
|
+
|
187
|
+
# Verify through get
|
188
|
+
result = cache.get(topic) { fail "Should not be called" }
|
189
|
+
expect(result).to eq(partition_count)
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
context "when cache already has a value" do
|
194
|
+
before do
|
195
|
+
cache.set(topic, partition_count)
|
196
|
+
end
|
197
|
+
|
198
|
+
it "updates cache when new value is higher" do
|
199
|
+
cache.set(topic, higher_partition_count)
|
200
|
+
|
201
|
+
result = cache.get(topic) { fail "Should not be called" }
|
202
|
+
expect(result).to eq(higher_partition_count)
|
203
|
+
end
|
204
|
+
|
205
|
+
it "keeps original value when new value is lower" do
|
206
|
+
cache.set(topic, lower_partition_count)
|
207
|
+
|
208
|
+
result = cache.get(topic) { fail "Should not be called" }
|
209
|
+
expect(result).to eq(partition_count)
|
210
|
+
end
|
211
|
+
|
212
|
+
it "updates the timestamp even when keeping original value" do
|
213
|
+
# Set initial value
|
214
|
+
cache.set(topic, partition_count)
|
215
|
+
|
216
|
+
# Wait until close to TTL expiring
|
217
|
+
sleep(default_ttl - 0.2)
|
218
|
+
|
219
|
+
# Set lower value (should update timestamp but not value)
|
220
|
+
cache.set(topic, lower_partition_count)
|
221
|
+
|
222
|
+
# Wait a bit more, but still under the full TTL if timestamp was refreshed
|
223
|
+
sleep(0.3)
|
224
|
+
|
225
|
+
# Should still be valid due to timestamp refresh
|
226
|
+
result = cache.get(topic) { fail "Should not be called" }
|
227
|
+
expect(result).to eq(partition_count)
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
context "with concurrent access" do
|
232
|
+
it "correctly handles simultaneous updates to the same topic" do
|
233
|
+
# This test focuses on the final value after concurrent updates
|
234
|
+
threads = []
|
235
|
+
|
236
|
+
# Create 5 threads that all try to update the same topic with increasing values
|
237
|
+
5.times do |i|
|
238
|
+
threads << Thread.new do
|
239
|
+
value = 10 + i # Start at 10 to ensure all are higher than initial value
|
240
|
+
cache.set(topic, value)
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
# Wait for all threads to complete
|
245
|
+
threads.each(&:join)
|
246
|
+
|
247
|
+
# The highest value (14) should be stored and accessible through get
|
248
|
+
result = cache.get(topic) { fail "Should not be called" }
|
249
|
+
expect(result).to eq(14)
|
250
|
+
end
|
251
|
+
end
|
252
|
+
end
|
253
|
+
|
254
|
+
describe "TTL behavior" do
|
255
|
+
it "treats entries as expired when they exceed TTL" do
|
256
|
+
# Set initial value
|
257
|
+
cache.get(topic) { partition_count }
|
258
|
+
|
259
|
+
# Wait just under TTL
|
260
|
+
sleep(default_ttl - 0.1)
|
261
|
+
|
262
|
+
# Value should still be cached (block should not be called)
|
263
|
+
result = cache.get(topic) { fail "Should not be called when cache is valid" }
|
264
|
+
expect(result).to eq(partition_count)
|
265
|
+
|
266
|
+
# Now wait to exceed TTL
|
267
|
+
sleep(0.2) # Total sleep is now default_ttl + 0.1
|
268
|
+
|
269
|
+
# Cache should be expired, block should be called
|
270
|
+
block_called = false
|
271
|
+
new_value = partition_count + 3
|
272
|
+
result = cache.get(topic) do
|
273
|
+
block_called = true
|
274
|
+
new_value
|
275
|
+
end
|
276
|
+
|
277
|
+
expect(block_called).to be true
|
278
|
+
expect(result).to eq(new_value)
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
describe "comprehensive scenarios" do
|
283
|
+
it "handles a full lifecycle of cache operations" do
|
284
|
+
# 1. Initial cache miss, fetch and store
|
285
|
+
result1 = cache.get(topic) { partition_count }
|
286
|
+
expect(result1).to eq(partition_count)
|
287
|
+
|
288
|
+
# 2. Cache hit
|
289
|
+
result2 = cache.get(topic) { fail "Should not be called" }
|
290
|
+
expect(result2).to eq(partition_count)
|
291
|
+
|
292
|
+
# 3. Attempt to set lower value
|
293
|
+
cache.set(topic, lower_partition_count)
|
294
|
+
result3 = cache.get(topic) { fail "Should not be called" }
|
295
|
+
# Should still return the higher original value
|
296
|
+
expect(result3).to eq(partition_count)
|
297
|
+
|
298
|
+
# 4. Set higher value
|
299
|
+
cache.set(topic, higher_partition_count)
|
300
|
+
result4 = cache.get(topic) { fail "Should not be called" }
|
301
|
+
expect(result4).to eq(higher_partition_count)
|
302
|
+
|
303
|
+
# 5. TTL expires, new value provided is lower
|
304
|
+
sleep(default_ttl + 0.1)
|
305
|
+
result5 = cache.get(topic) { lower_partition_count }
|
306
|
+
# This returns the highest value
|
307
|
+
expect(result5).to eq(higher_partition_count)
|
308
|
+
|
309
|
+
# 6. But subsequent get should return the higher cached value
|
310
|
+
result6 = cache.get(topic) { fail "Should not be called" }
|
311
|
+
expect(result6).to eq(higher_partition_count)
|
312
|
+
|
313
|
+
# 7. Set new highest value directly
|
314
|
+
even_higher = higher_partition_count + 5
|
315
|
+
cache.set(topic, even_higher)
|
316
|
+
result7 = cache.get(topic) { fail "Should not be called" }
|
317
|
+
expect(result7).to eq(even_higher)
|
318
|
+
end
|
319
|
+
|
320
|
+
it "handles multiple topics with different TTLs correctly" do
|
321
|
+
# Set up initial values
|
322
|
+
cache.get(topic) { partition_count }
|
323
|
+
custom_ttl_cache.get(topic) { partition_count }
|
324
|
+
|
325
|
+
# Wait past custom TTL but not default TTL
|
326
|
+
sleep(custom_ttl + 0.1)
|
327
|
+
|
328
|
+
# Default cache should NOT refresh (still within default TTL)
|
329
|
+
default_result = cache.get(topic) { fail "Should not be called for default cache" }
|
330
|
+
# Original value should be maintained
|
331
|
+
expect(default_result).to eq(partition_count)
|
332
|
+
|
333
|
+
# Custom TTL cache SHOULD refresh (past custom TTL)
|
334
|
+
custom_cache_value = partition_count + 8
|
335
|
+
custom_block_called = false
|
336
|
+
custom_result = custom_ttl_cache.get(topic) do
|
337
|
+
custom_block_called = true
|
338
|
+
custom_cache_value
|
339
|
+
end
|
340
|
+
|
341
|
+
expect(custom_block_called).to be true
|
342
|
+
expect(custom_result).to eq(custom_cache_value)
|
343
|
+
|
344
|
+
# Now wait past default TTL
|
345
|
+
sleep(default_ttl - custom_ttl + 0.1)
|
346
|
+
|
347
|
+
# Now default cache should also refresh
|
348
|
+
default_block_called = false
|
349
|
+
new_default_value = partition_count + 10
|
350
|
+
new_default_result = cache.get(topic) do
|
351
|
+
default_block_called = true
|
352
|
+
new_default_value
|
353
|
+
end
|
354
|
+
|
355
|
+
expect(default_block_called).to be true
|
356
|
+
expect(new_default_result).to eq(new_default_value)
|
357
|
+
end
|
358
|
+
end
|
359
|
+
end
|
@@ -654,12 +654,11 @@ describe Rdkafka::Producer do
|
|
654
654
|
|
655
655
|
context 'when the partition count value was cached but time expired' do
|
656
656
|
before do
|
657
|
-
|
658
|
-
producer.partition_count('consume_test_topic')
|
657
|
+
::Rdkafka::Producer.partitions_count_cache = Rdkafka::Producer::PartitionsCountCache.new
|
659
658
|
allow(::Rdkafka::Metadata).to receive(:new).and_call_original
|
660
659
|
end
|
661
660
|
|
662
|
-
it 'expect
|
661
|
+
it 'expect to query it again' do
|
663
662
|
producer.partition_count('consume_test_topic')
|
664
663
|
expect(::Rdkafka::Metadata).to have_received(:new)
|
665
664
|
end
|
@@ -819,4 +818,174 @@ describe Rdkafka::Producer do
|
|
819
818
|
end
|
820
819
|
end
|
821
820
|
end
|
821
|
+
|
822
|
+
describe "#produce with headers" do
|
823
|
+
it "should produce a message with array headers" do
|
824
|
+
headers = {
|
825
|
+
"version" => ["2.1.3", "2.1.4"],
|
826
|
+
"type" => "String"
|
827
|
+
}
|
828
|
+
|
829
|
+
report = producer.produce(
|
830
|
+
topic: "consume_test_topic",
|
831
|
+
key: "key headers",
|
832
|
+
headers: headers
|
833
|
+
).wait
|
834
|
+
|
835
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
836
|
+
expect(message).to be
|
837
|
+
expect(message.key).to eq('key headers')
|
838
|
+
expect(message.headers['type']).to eq('String')
|
839
|
+
expect(message.headers['version']).to eq(["2.1.3", "2.1.4"])
|
840
|
+
end
|
841
|
+
|
842
|
+
it "should produce a message with single value headers" do
|
843
|
+
headers = {
|
844
|
+
"version" => "2.1.3",
|
845
|
+
"type" => "String"
|
846
|
+
}
|
847
|
+
|
848
|
+
report = producer.produce(
|
849
|
+
topic: "consume_test_topic",
|
850
|
+
key: "key headers",
|
851
|
+
headers: headers
|
852
|
+
).wait
|
853
|
+
|
854
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
855
|
+
expect(message).to be
|
856
|
+
expect(message.key).to eq('key headers')
|
857
|
+
expect(message.headers['type']).to eq('String')
|
858
|
+
expect(message.headers['version']).to eq('2.1.3')
|
859
|
+
end
|
860
|
+
end
|
861
|
+
|
862
|
+
|
863
|
+
describe 'with active statistics callback' do
|
864
|
+
let(:producer) do
|
865
|
+
rdkafka_producer_config('statistics.interval.ms': 1_000).producer
|
866
|
+
end
|
867
|
+
|
868
|
+
let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
|
869
|
+
let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
870
|
+
let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
871
|
+
|
872
|
+
context "when using partition key" do
|
873
|
+
before do
|
874
|
+
Rdkafka::Config.statistics_callback = ->(*) {}
|
875
|
+
|
876
|
+
# This call will make a blocking request to the metadata cache
|
877
|
+
producer.produce(
|
878
|
+
topic: "produce_test_topic",
|
879
|
+
payload: "payload headers",
|
880
|
+
partition_key: "test"
|
881
|
+
).wait
|
882
|
+
|
883
|
+
pre_statistics_ttl
|
884
|
+
|
885
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
886
|
+
sleep(1.5)
|
887
|
+
|
888
|
+
post_statistics_ttl
|
889
|
+
end
|
890
|
+
|
891
|
+
it 'expect to update ttl on the partitions count cache via statistics' do
|
892
|
+
expect(pre_statistics_ttl).to be < post_statistics_ttl
|
893
|
+
end
|
894
|
+
end
|
895
|
+
|
896
|
+
context "when not using partition key" do
|
897
|
+
before do
|
898
|
+
Rdkafka::Config.statistics_callback = ->(*) {}
|
899
|
+
|
900
|
+
# This call will make a blocking request to the metadata cache
|
901
|
+
producer.produce(
|
902
|
+
topic: "produce_test_topic",
|
903
|
+
payload: "payload headers"
|
904
|
+
).wait
|
905
|
+
|
906
|
+
pre_statistics_ttl
|
907
|
+
|
908
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
909
|
+
sleep(1.5)
|
910
|
+
|
911
|
+
# This will anyhow be populated from statistic
|
912
|
+
post_statistics_ttl
|
913
|
+
end
|
914
|
+
|
915
|
+
it 'expect not to update ttl on the partitions count cache via blocking but via use stats' do
|
916
|
+
expect(pre_statistics_ttl).to be_nil
|
917
|
+
expect(post_statistics_ttl).not_to be_nil
|
918
|
+
end
|
919
|
+
end
|
920
|
+
end
|
921
|
+
|
922
|
+
describe 'without active statistics callback' do
|
923
|
+
let(:producer) do
|
924
|
+
rdkafka_producer_config('statistics.interval.ms': 1_000).producer
|
925
|
+
end
|
926
|
+
|
927
|
+
let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
|
928
|
+
let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
929
|
+
let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
|
930
|
+
|
931
|
+
context "when using partition key" do
|
932
|
+
before do
|
933
|
+
# This call will make a blocking request to the metadata cache
|
934
|
+
producer.produce(
|
935
|
+
topic: "produce_test_topic",
|
936
|
+
payload: "payload headers",
|
937
|
+
partition_key: "test"
|
938
|
+
).wait
|
939
|
+
|
940
|
+
pre_statistics_ttl
|
941
|
+
|
942
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
943
|
+
sleep(1.5)
|
944
|
+
|
945
|
+
post_statistics_ttl
|
946
|
+
end
|
947
|
+
|
948
|
+
it 'expect not to update ttl on the partitions count cache via statistics' do
|
949
|
+
expect(pre_statistics_ttl).to eq post_statistics_ttl
|
950
|
+
end
|
951
|
+
end
|
952
|
+
|
953
|
+
context "when not using partition key" do
|
954
|
+
before do
|
955
|
+
# This call will make a blocking request to the metadata cache
|
956
|
+
producer.produce(
|
957
|
+
topic: "produce_test_topic",
|
958
|
+
payload: "payload headers"
|
959
|
+
).wait
|
960
|
+
|
961
|
+
pre_statistics_ttl
|
962
|
+
|
963
|
+
# We wait to make sure that statistics are triggered and that there is a refresh
|
964
|
+
sleep(1.5)
|
965
|
+
|
966
|
+
# This should not be populated because stats are not in use
|
967
|
+
post_statistics_ttl
|
968
|
+
end
|
969
|
+
|
970
|
+
it 'expect not to update ttl on the partitions count cache via anything' do
|
971
|
+
expect(pre_statistics_ttl).to be_nil
|
972
|
+
expect(post_statistics_ttl).to be_nil
|
973
|
+
end
|
974
|
+
end
|
975
|
+
end
|
976
|
+
|
977
|
+
describe 'with other fiber closing' do
|
978
|
+
context 'when we create many fibers and close producer in some of them' do
|
979
|
+
it 'expect not to crash ruby' do
|
980
|
+
10.times do |i|
|
981
|
+
producer = rdkafka_producer_config.producer
|
982
|
+
|
983
|
+
Fiber.new do
|
984
|
+
GC.start
|
985
|
+
producer.close
|
986
|
+
end.resume
|
987
|
+
end
|
988
|
+
end
|
989
|
+
end
|
990
|
+
end
|
822
991
|
end
|
data/spec/spec_helper.rb
CHANGED
@@ -18,6 +18,9 @@ def rdkafka_base_config
|
|
18
18
|
:"api.version.request" => false,
|
19
19
|
:"broker.version.fallback" => "1.0",
|
20
20
|
:"bootstrap.servers" => "localhost:9092",
|
21
|
+
# Display statistics and refresh often just to cover those in specs
|
22
|
+
:'statistics.interval.ms' => 1_000,
|
23
|
+
:'topic.metadata.refresh.interval.ms' => 1_000
|
21
24
|
}
|
22
25
|
end
|
23
26
|
|
@@ -125,6 +128,12 @@ RSpec.configure do |config|
|
|
125
128
|
config.filter_run focus: true
|
126
129
|
config.run_all_when_everything_filtered = true
|
127
130
|
|
131
|
+
config.before(:each) do
|
132
|
+
Rdkafka::Config.statistics_callback = nil
|
133
|
+
# We need to clear it so state does not leak between specs
|
134
|
+
Rdkafka::Producer.partitions_count_cache.to_h.clear
|
135
|
+
end
|
136
|
+
|
128
137
|
config.before(:suite) do
|
129
138
|
admin = rdkafka_config.admin
|
130
139
|
{
|
metadata
CHANGED
@@ -1,41 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rdkafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.21.
|
4
|
+
version: 0.21.1.alpha2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Thijs Cadier
|
8
8
|
- Maciej Mensfeld
|
9
9
|
bindir: bin
|
10
|
-
cert_chain:
|
11
|
-
-
|
12
|
-
-----BEGIN CERTIFICATE-----
|
13
|
-
MIIEcDCCAtigAwIBAgIBATANBgkqhkiG9w0BAQsFADA/MRAwDgYDVQQDDAdjb250
|
14
|
-
YWN0MRcwFQYKCZImiZPyLGQBGRYHa2FyYWZrYTESMBAGCgmSJomT8ixkARkWAmlv
|
15
|
-
MB4XDTI0MDgyMzEwMTkyMFoXDTQ5MDgxNzEwMTkyMFowPzEQMA4GA1UEAwwHY29u
|
16
|
-
dGFjdDEXMBUGCgmSJomT8ixkARkWB2thcmFma2ExEjAQBgoJkiaJk/IsZAEZFgJp
|
17
|
-
bzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKjLhLjQqUlNayxkXnO+
|
18
|
-
PsmCDs/KFIzhrsYMfLZRZNaWmzV3ujljMOdDjd4snM2X06C41iVdQPWjpe3j8vVe
|
19
|
-
ZXEWR/twSbOP6Eeg8WVH2wCOo0x5i7yhVn4UBLH4JpfEMCbemVcWQ9ry9OMg4WpH
|
20
|
-
Uu4dRwxFV7hzCz3p0QfNLRI4miAxnGWcnlD98IJRjBAksTuR1Llj0vbOrDGsL9ZT
|
21
|
-
JeXP2gdRLd8SqzAFJEWrbeTBCBU7gfSh3oMg5SVDLjaqf7Kz5wC/8bDZydzanOxB
|
22
|
-
T6CDXPsCnllmvTNx2ei2T5rGYJOzJeNTmJLLK6hJWUlAvaQSvCwZRvFJ0tVGLEoS
|
23
|
-
flqSr6uGyyl1eMUsNmsH4BqPEYcAV6P2PKTv2vUR8AP0raDvZ3xL1TKvfRb8xRpo
|
24
|
-
vPopCGlY5XBWEc6QERHfVLTIVsjnls2/Ujj4h8/TSfqqYnaHKefIMLbuD/tquMjD
|
25
|
-
iWQsW2qStBV0T+U7FijKxVfrfqZP7GxQmDAc9o1iiyAa3QIDAQABo3cwdTAJBgNV
|
26
|
-
HRMEAjAAMAsGA1UdDwQEAwIEsDAdBgNVHQ4EFgQU3O4dTXmvE7YpAkszGzR9DdL9
|
27
|
-
sbEwHQYDVR0RBBYwFIESY29udGFjdEBrYXJhZmthLmlvMB0GA1UdEgQWMBSBEmNv
|
28
|
-
bnRhY3RAa2FyYWZrYS5pbzANBgkqhkiG9w0BAQsFAAOCAYEAVKTfoLXn7mqdSxIR
|
29
|
-
eqxcR6Huudg1jes81s1+X0uiRTR3hxxKZ3Y82cPsee9zYWyBrN8TA4KA0WILTru7
|
30
|
-
Ygxvzha0SRPsSiaKLmgOJ+61ebI4+bOORzIJLpD6GxCxu1r7MI4+0r1u1xe0EWi8
|
31
|
-
agkVo1k4Vi8cKMLm6Gl9b3wG9zQBw6fcgKwmpjKiNnOLP+OytzUANrIUJjoq6oal
|
32
|
-
TC+f/Uc0TLaRqUaW/bejxzDWWHoM3SU6aoLPuerglzp9zZVzihXwx3jPLUVKDFpF
|
33
|
-
Rl2lcBDxlpYGueGo0/oNzGJAAy6js8jhtHC9+19PD53vk7wHtFTZ/0ugDQYnwQ+x
|
34
|
-
oml2fAAuVWpTBCgOVFe6XCQpMKopzoxQ1PjKztW2KYxgJdIBX87SnL3aWuBQmhRd
|
35
|
-
i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
|
36
|
-
ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
|
37
|
-
-----END CERTIFICATE-----
|
38
|
-
date: 2025-02-13 00:00:00.000000000 Z
|
10
|
+
cert_chain: []
|
11
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
39
12
|
dependencies:
|
40
13
|
- !ruby/object:Gem::Dependency
|
41
14
|
name: ffi
|
@@ -171,8 +144,11 @@ extensions:
|
|
171
144
|
- ext/Rakefile
|
172
145
|
extra_rdoc_files: []
|
173
146
|
files:
|
147
|
+
- ".github/CODEOWNERS"
|
174
148
|
- ".github/FUNDING.yml"
|
175
149
|
- ".github/workflows/ci.yml"
|
150
|
+
- ".github/workflows/push.yml"
|
151
|
+
- ".github/workflows/verify-action-pins.yml"
|
176
152
|
- ".gitignore"
|
177
153
|
- ".rspec"
|
178
154
|
- ".ruby-gemset"
|
@@ -184,7 +160,6 @@ files:
|
|
184
160
|
- MIT-LICENSE
|
185
161
|
- README.md
|
186
162
|
- Rakefile
|
187
|
-
- certs/cert.pem
|
188
163
|
- dist/librdkafka-2.8.0.tar.gz
|
189
164
|
- dist/patches/rdkafka_global_init.patch
|
190
165
|
- docker-compose.yml
|
@@ -230,6 +205,7 @@ files:
|
|
230
205
|
- lib/rdkafka/producer.rb
|
231
206
|
- lib/rdkafka/producer/delivery_handle.rb
|
232
207
|
- lib/rdkafka/producer/delivery_report.rb
|
208
|
+
- lib/rdkafka/producer/partitions_count_cache.rb
|
233
209
|
- lib/rdkafka/version.rb
|
234
210
|
- rdkafka.gemspec
|
235
211
|
- renovate.json
|
@@ -258,6 +234,7 @@ files:
|
|
258
234
|
- spec/rdkafka/native_kafka_spec.rb
|
259
235
|
- spec/rdkafka/producer/delivery_handle_spec.rb
|
260
236
|
- spec/rdkafka/producer/delivery_report_spec.rb
|
237
|
+
- spec/rdkafka/producer/partitions_count_spec.rb
|
261
238
|
- spec/rdkafka/producer_spec.rb
|
262
239
|
- spec/spec_helper.rb
|
263
240
|
licenses:
|
@@ -265,10 +242,10 @@ licenses:
|
|
265
242
|
metadata:
|
266
243
|
funding_uri: https://karafka.io/#become-pro
|
267
244
|
homepage_uri: https://karafka.io
|
268
|
-
changelog_uri: https://
|
245
|
+
changelog_uri: https://karafka.io/docs/Changelog-Rdkafka
|
269
246
|
bug_tracker_uri: https://github.com/karafka/rdkafka-ruby/issues
|
270
247
|
source_code_uri: https://github.com/karafka/rdkafka-ruby
|
271
|
-
documentation_uri: https://
|
248
|
+
documentation_uri: https://karafka.io/docs
|
272
249
|
rubygems_mfa_required: 'true'
|
273
250
|
rdoc_options: []
|
274
251
|
require_paths:
|
@@ -284,7 +261,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
284
261
|
- !ruby/object:Gem::Version
|
285
262
|
version: '0'
|
286
263
|
requirements: []
|
287
|
-
rubygems_version: 3.6.
|
264
|
+
rubygems_version: 3.6.7
|
288
265
|
specification_version: 4
|
289
266
|
summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
|
290
267
|
It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
|
@@ -315,5 +292,6 @@ test_files:
|
|
315
292
|
- spec/rdkafka/native_kafka_spec.rb
|
316
293
|
- spec/rdkafka/producer/delivery_handle_spec.rb
|
317
294
|
- spec/rdkafka/producer/delivery_report_spec.rb
|
295
|
+
- spec/rdkafka/producer/partitions_count_spec.rb
|
318
296
|
- spec/rdkafka/producer_spec.rb
|
319
297
|
- spec/spec_helper.rb
|
checksums.yaml.gz.sig
DELETED
Binary file
|