rdkafka 0.21.0 → 0.22.0.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/CODEOWNERS +3 -0
- data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
- data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
- data/.github/workflows/ci_macos_arm64.yml +306 -0
- data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
- data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
- data/.github/workflows/push_macos_arm64.yml +54 -0
- data/.github/workflows/push_ruby.yml +37 -0
- data/.github/workflows/verify-action-pins.yml +16 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +17 -0
- data/README.md +2 -1
- data/Rakefile +0 -2
- data/docker-compose.yml +1 -1
- data/ext/Rakefile +1 -1
- data/ext/build_common.sh +361 -0
- data/ext/build_linux_x86_64_gnu.sh +306 -0
- data/ext/build_linux_x86_64_musl.sh +763 -0
- data/ext/build_macos_arm64.sh +550 -0
- data/lib/rdkafka/bindings.rb +30 -3
- data/lib/rdkafka/config.rb +8 -4
- data/lib/rdkafka/consumer/headers.rb +14 -3
- data/lib/rdkafka/native_kafka.rb +8 -2
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +59 -35
- data/lib/rdkafka/version.rb +1 -1
- data/lib/rdkafka.rb +1 -0
- data/rdkafka.gemspec +27 -8
- data/renovate.json +87 -1
- data/spec/rdkafka/admin_spec.rb +27 -11
- data/spec/rdkafka/bindings_spec.rb +0 -9
- data/spec/rdkafka/config_spec.rb +17 -15
- data/spec/rdkafka/consumer/headers_spec.rb +26 -10
- data/spec/rdkafka/consumer_spec.rb +74 -15
- data/spec/rdkafka/metadata_spec.rb +2 -2
- data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +237 -7
- data/spec/spec_helper.rb +30 -7
- metadata +45 -87
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +0 -83
- data/Guardfile +0 -19
- data/certs/cert.pem +0 -26
- data.tar.gz.sig +0 -0
- metadata.gz.sig +0 -0
data/spec/rdkafka/config_spec.rb
CHANGED
@@ -33,23 +33,25 @@ describe Rdkafka::Config do
|
|
33
33
|
expect(log.string).to include "FATAL -- : I love testing"
|
34
34
|
end
|
35
35
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
36
|
+
unless RUBY_PLATFORM == 'java'
|
37
|
+
it "expect to start new logger thread after fork and work" do
|
38
|
+
reader, writer = IO.pipe
|
39
|
+
|
40
|
+
pid = fork do
|
41
|
+
$stdout.reopen(writer)
|
42
|
+
Rdkafka::Config.logger = Logger.new($stdout)
|
43
|
+
reader.close
|
44
|
+
producer = rdkafka_producer_config(debug: 'all').producer
|
45
|
+
producer.close
|
46
|
+
writer.close
|
47
|
+
sleep(1)
|
48
|
+
end
|
49
|
+
|
45
50
|
writer.close
|
46
|
-
|
51
|
+
Process.wait(pid)
|
52
|
+
output = reader.read
|
53
|
+
expect(output.split("\n").size).to be >= 20
|
47
54
|
end
|
48
|
-
|
49
|
-
writer.close
|
50
|
-
Process.wait(pid)
|
51
|
-
output = reader.read
|
52
|
-
expect(output.split("\n").size).to be >= 20
|
53
55
|
end
|
54
56
|
end
|
55
57
|
|
@@ -3,7 +3,7 @@
|
|
3
3
|
describe Rdkafka::Consumer::Headers do
|
4
4
|
let(:headers) do
|
5
5
|
{ # Note String keys!
|
6
|
-
"version" => "2.1.3",
|
6
|
+
"version" => ["2.1.3", "2.1.4"],
|
7
7
|
"type" => "String"
|
8
8
|
}
|
9
9
|
end
|
@@ -17,27 +17,39 @@ describe Rdkafka::Consumer::Headers do
|
|
17
17
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
18
18
|
end
|
19
19
|
|
20
|
+
# First version header
|
20
21
|
expect(Rdkafka::Bindings).to \
|
21
22
|
receive(:rd_kafka_header_get_all)
|
22
23
|
.with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
23
|
-
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null:
|
24
|
-
expect(size_ptr).to receive(:[]).with(:value).and_return(headers
|
25
|
-
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers
|
24
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: "version"))
|
25
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][0].size)
|
26
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers["version"][0]))
|
26
27
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
27
28
|
end
|
28
29
|
|
30
|
+
# Second version header
|
29
31
|
expect(Rdkafka::Bindings).to \
|
30
32
|
receive(:rd_kafka_header_get_all)
|
31
33
|
.with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
32
|
-
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null:
|
33
|
-
expect(size_ptr).to receive(:[]).with(:value).and_return(headers
|
34
|
-
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers
|
34
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: "version"))
|
35
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][1].size)
|
36
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers["version"][1]))
|
35
37
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
36
38
|
end
|
37
39
|
|
40
|
+
# Single type header
|
38
41
|
expect(Rdkafka::Bindings).to \
|
39
42
|
receive(:rd_kafka_header_get_all)
|
40
|
-
.with(headers_ptr, 2, anything, anything, anything)
|
43
|
+
.with(headers_ptr, 2, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
44
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 2", read_string_to_null: "type"))
|
45
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["type"].size)
|
46
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 2", read_string: headers["type"]))
|
47
|
+
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
48
|
+
end
|
49
|
+
|
50
|
+
expect(Rdkafka::Bindings).to \
|
51
|
+
receive(:rd_kafka_header_get_all)
|
52
|
+
.with(headers_ptr, 3, anything, anything, anything)
|
41
53
|
.and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
|
42
54
|
end
|
43
55
|
|
@@ -46,8 +58,12 @@ describe Rdkafka::Consumer::Headers do
|
|
46
58
|
it { is_expected.to eq(headers) }
|
47
59
|
it { is_expected.to be_frozen }
|
48
60
|
|
49
|
-
it '
|
50
|
-
expect(subject['version']).to eq("2.1.3")
|
61
|
+
it 'returns array for duplicate headers' do
|
62
|
+
expect(subject['version']).to eq(["2.1.3", "2.1.4"])
|
63
|
+
end
|
64
|
+
|
65
|
+
it 'returns string for single headers' do
|
66
|
+
expect(subject['type']).to eq("String")
|
51
67
|
end
|
52
68
|
|
53
69
|
it 'does not support symbols mappings' do
|
@@ -170,8 +170,16 @@ describe Rdkafka::Consumer do
|
|
170
170
|
end
|
171
171
|
|
172
172
|
describe "#seek" do
|
173
|
+
let(:topic) { "it-#{SecureRandom.uuid}" }
|
174
|
+
|
175
|
+
before do
|
176
|
+
admin = rdkafka_producer_config.admin
|
177
|
+
admin.create_topic(topic, 1, 1).wait
|
178
|
+
admin.close
|
179
|
+
end
|
180
|
+
|
173
181
|
it "should raise an error when seeking fails" do
|
174
|
-
fake_msg = OpenStruct.new(topic:
|
182
|
+
fake_msg = OpenStruct.new(topic: topic, partition: 0, offset: 0)
|
175
183
|
|
176
184
|
expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
|
177
185
|
expect {
|
@@ -181,9 +189,12 @@ describe Rdkafka::Consumer do
|
|
181
189
|
|
182
190
|
context "subscription" do
|
183
191
|
let(:timeout) { 1000 }
|
192
|
+
# Some specs here test the manual offset commit hence we want to ensure, that we have some
|
193
|
+
# offsets in-memory that we can manually commit
|
194
|
+
let(:consumer) { rdkafka_consumer_config('auto.commit.interval.ms': 60_000).consumer }
|
184
195
|
|
185
196
|
before do
|
186
|
-
consumer.subscribe(
|
197
|
+
consumer.subscribe(topic)
|
187
198
|
|
188
199
|
# 1. partitions are assigned
|
189
200
|
wait_for_assignment(consumer)
|
@@ -196,7 +207,7 @@ describe Rdkafka::Consumer do
|
|
196
207
|
|
197
208
|
def send_one_message(val)
|
198
209
|
producer.produce(
|
199
|
-
topic:
|
210
|
+
topic: topic,
|
200
211
|
payload: "payload #{val}",
|
201
212
|
key: "key 1",
|
202
213
|
partition: 0
|
@@ -211,7 +222,7 @@ describe Rdkafka::Consumer do
|
|
211
222
|
|
212
223
|
# 4. pause the subscription
|
213
224
|
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
214
|
-
tpl.add_topic(
|
225
|
+
tpl.add_topic(topic, 1)
|
215
226
|
consumer.pause(tpl)
|
216
227
|
|
217
228
|
# 5. seek to previous message
|
@@ -219,7 +230,7 @@ describe Rdkafka::Consumer do
|
|
219
230
|
|
220
231
|
# 6. resume the subscription
|
221
232
|
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
222
|
-
tpl.add_topic(
|
233
|
+
tpl.add_topic(topic, 1)
|
223
234
|
consumer.resume(tpl)
|
224
235
|
|
225
236
|
# 7. ensure same message is read again
|
@@ -227,7 +238,7 @@ describe Rdkafka::Consumer do
|
|
227
238
|
|
228
239
|
# This is needed because `enable.auto.offset.store` is true but when running in CI that
|
229
240
|
# is overloaded, offset store lags
|
230
|
-
sleep(
|
241
|
+
sleep(1)
|
231
242
|
|
232
243
|
consumer.commit
|
233
244
|
expect(message1.offset).to eq message2.offset
|
@@ -259,10 +270,17 @@ describe Rdkafka::Consumer do
|
|
259
270
|
end
|
260
271
|
|
261
272
|
describe "#seek_by" do
|
262
|
-
let(:
|
273
|
+
let(:consumer) { rdkafka_consumer_config('auto.commit.interval.ms': 60_000).consumer }
|
274
|
+
let(:topic) { "it-#{SecureRandom.uuid}" }
|
263
275
|
let(:partition) { 0 }
|
264
276
|
let(:offset) { 0 }
|
265
277
|
|
278
|
+
before do
|
279
|
+
admin = rdkafka_producer_config.admin
|
280
|
+
admin.create_topic(topic, 1, 1).wait
|
281
|
+
admin.close
|
282
|
+
end
|
283
|
+
|
266
284
|
it "should raise an error when seeking fails" do
|
267
285
|
expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
|
268
286
|
expect {
|
@@ -588,12 +606,18 @@ describe Rdkafka::Consumer do
|
|
588
606
|
|
589
607
|
describe "#store_offset" do
|
590
608
|
let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': false).consumer }
|
609
|
+
let(:metadata) { SecureRandom.uuid }
|
610
|
+
let(:group_id) { SecureRandom.uuid }
|
611
|
+
let(:base_config) do
|
612
|
+
{
|
613
|
+
'group.id': group_id,
|
614
|
+
'enable.auto.offset.store': false,
|
615
|
+
'enable.auto.commit': false
|
616
|
+
}
|
617
|
+
end
|
591
618
|
|
592
619
|
before do
|
593
|
-
|
594
|
-
config[:'enable.auto.offset.store'] = false
|
595
|
-
config[:'enable.auto.commit'] = false
|
596
|
-
@new_consumer = rdkafka_consumer_config(config).consumer
|
620
|
+
@new_consumer = rdkafka_consumer_config(base_config).consumer
|
597
621
|
@new_consumer.subscribe("consume_test_topic")
|
598
622
|
wait_for_assignment(@new_consumer)
|
599
623
|
end
|
@@ -795,12 +819,14 @@ describe Rdkafka::Consumer do
|
|
795
819
|
end
|
796
820
|
|
797
821
|
it "should return a message if there is one" do
|
822
|
+
topic = "it-#{SecureRandom.uuid}"
|
823
|
+
|
798
824
|
producer.produce(
|
799
|
-
topic:
|
825
|
+
topic: topic,
|
800
826
|
payload: "payload 1",
|
801
827
|
key: "key 1"
|
802
828
|
).wait
|
803
|
-
consumer.subscribe(
|
829
|
+
consumer.subscribe(topic)
|
804
830
|
message = consumer.each {|m| break m}
|
805
831
|
|
806
832
|
expect(message).to be_a Rdkafka::Consumer::Message
|
@@ -1000,7 +1026,7 @@ describe Rdkafka::Consumer do
|
|
1000
1026
|
after { Rdkafka::Config.statistics_callback = nil }
|
1001
1027
|
|
1002
1028
|
let(:consumer) do
|
1003
|
-
config = rdkafka_consumer_config('statistics.interval.ms':
|
1029
|
+
config = rdkafka_consumer_config('statistics.interval.ms': 500)
|
1004
1030
|
config.consumer_poll_set = false
|
1005
1031
|
config.consumer
|
1006
1032
|
end
|
@@ -1097,7 +1123,8 @@ describe Rdkafka::Consumer do
|
|
1097
1123
|
:assign => [ nil ],
|
1098
1124
|
:assignment => nil,
|
1099
1125
|
:committed => [],
|
1100
|
-
:query_watermark_offsets => [ nil, nil ]
|
1126
|
+
:query_watermark_offsets => [ nil, nil ],
|
1127
|
+
:assignment_lost? => []
|
1101
1128
|
}.each do |method, args|
|
1102
1129
|
it "raises an exception if #{method} is called" do
|
1103
1130
|
expect {
|
@@ -1212,4 +1239,36 @@ describe Rdkafka::Consumer do
|
|
1212
1239
|
end
|
1213
1240
|
end
|
1214
1241
|
end
|
1242
|
+
|
1243
|
+
describe "when reaching eof on a topic and eof reporting enabled" do
|
1244
|
+
let(:consumer) { rdkafka_consumer_config(:"enable.partition.eof" => true).consumer }
|
1245
|
+
|
1246
|
+
it "should return proper details" do
|
1247
|
+
(0..2).each do |i|
|
1248
|
+
producer.produce(
|
1249
|
+
topic: "consume_test_topic",
|
1250
|
+
key: "key lag #{i}",
|
1251
|
+
partition: i
|
1252
|
+
).wait
|
1253
|
+
end
|
1254
|
+
|
1255
|
+
# Consume to the end
|
1256
|
+
consumer.subscribe("consume_test_topic")
|
1257
|
+
eof_count = 0
|
1258
|
+
eof_error = nil
|
1259
|
+
|
1260
|
+
loop do
|
1261
|
+
begin
|
1262
|
+
consumer.poll(100)
|
1263
|
+
rescue Rdkafka::RdkafkaError => error
|
1264
|
+
if error.is_partition_eof?
|
1265
|
+
eof_error = error
|
1266
|
+
end
|
1267
|
+
break if eof_error
|
1268
|
+
end
|
1269
|
+
end
|
1270
|
+
|
1271
|
+
expect(eof_error.code).to eq(:partition_eof)
|
1272
|
+
end
|
1273
|
+
end
|
1215
1274
|
end
|
@@ -30,7 +30,7 @@ describe Rdkafka::Metadata do
|
|
30
30
|
it "#brokers returns our single broker" do
|
31
31
|
expect(subject.brokers.length).to eq(1)
|
32
32
|
expect(subject.brokers[0][:broker_id]).to eq(1)
|
33
|
-
expect(subject.brokers[0][:broker_name])
|
33
|
+
expect(%w[127.0.0.1 localhost]).to include(subject.brokers[0][:broker_name])
|
34
34
|
expect(subject.brokers[0][:broker_port]).to eq(9092)
|
35
35
|
end
|
36
36
|
|
@@ -53,7 +53,7 @@ describe Rdkafka::Metadata do
|
|
53
53
|
it "#brokers returns our single broker" do
|
54
54
|
expect(subject.brokers.length).to eq(1)
|
55
55
|
expect(subject.brokers[0][:broker_id]).to eq(1)
|
56
|
-
expect(subject.brokers[0][:broker_name])
|
56
|
+
expect(%w[127.0.0.1 localhost]).to include(subject.brokers[0][:broker_name])
|
57
57
|
expect(subject.brokers[0][:broker_port]).to eq(9092)
|
58
58
|
end
|
59
59
|
|
@@ -0,0 +1,359 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'spec_helper'
|
4
|
+
|
5
|
+
RSpec.describe Rdkafka::Producer::PartitionsCountCache do
|
6
|
+
let(:default_ttl) { 1 } # Reduced from 30 to speed up tests
|
7
|
+
let(:custom_ttl) { 0.5 } # Half the default TTL
|
8
|
+
let(:cache) { described_class.new(default_ttl) }
|
9
|
+
let(:custom_ttl_cache) { described_class.new(custom_ttl) }
|
10
|
+
let(:topic) { "test_topic" }
|
11
|
+
let(:topic2) { "test_topic2" }
|
12
|
+
let(:partition_count) { 5 }
|
13
|
+
let(:higher_partition_count) { 10 }
|
14
|
+
let(:lower_partition_count) { 3 }
|
15
|
+
let(:even_higher_partition_count) { 15 }
|
16
|
+
|
17
|
+
describe "#initialize" do
|
18
|
+
it "creates a cache with default TTL when no TTL is specified" do
|
19
|
+
standard_cache = described_class.new
|
20
|
+
expect(standard_cache).to be_a(described_class)
|
21
|
+
end
|
22
|
+
|
23
|
+
it "creates a cache with custom TTL when specified" do
|
24
|
+
expect(custom_ttl_cache).to be_a(described_class)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
describe "#get" do
|
29
|
+
context "when cache is empty" do
|
30
|
+
it "yields to get the value and caches it" do
|
31
|
+
block_called = false
|
32
|
+
result = cache.get(topic) do
|
33
|
+
block_called = true
|
34
|
+
partition_count
|
35
|
+
end
|
36
|
+
|
37
|
+
expect(block_called).to be true
|
38
|
+
expect(result).to eq(partition_count)
|
39
|
+
|
40
|
+
# Verify caching by checking if block is called again
|
41
|
+
second_block_called = false
|
42
|
+
second_result = cache.get(topic) do
|
43
|
+
second_block_called = true
|
44
|
+
partition_count + 1 # Different value to ensure we get cached value
|
45
|
+
end
|
46
|
+
|
47
|
+
expect(second_block_called).to be false
|
48
|
+
expect(second_result).to eq(partition_count)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
context "when cache has a value" do
|
53
|
+
before do
|
54
|
+
# Seed the cache with a value
|
55
|
+
cache.get(topic) { partition_count }
|
56
|
+
end
|
57
|
+
|
58
|
+
it "returns cached value without yielding if not expired" do
|
59
|
+
block_called = false
|
60
|
+
result = cache.get(topic) do
|
61
|
+
block_called = true
|
62
|
+
partition_count + 1 # Different value to ensure we get cached one
|
63
|
+
end
|
64
|
+
|
65
|
+
expect(block_called).to be false
|
66
|
+
expect(result).to eq(partition_count)
|
67
|
+
end
|
68
|
+
|
69
|
+
it "yields to get new value when TTL has expired" do
|
70
|
+
# Wait for TTL to expire
|
71
|
+
sleep(default_ttl + 0.1)
|
72
|
+
|
73
|
+
block_called = false
|
74
|
+
new_count = partition_count + 1
|
75
|
+
result = cache.get(topic) do
|
76
|
+
block_called = true
|
77
|
+
new_count
|
78
|
+
end
|
79
|
+
|
80
|
+
expect(block_called).to be true
|
81
|
+
expect(result).to eq(new_count)
|
82
|
+
|
83
|
+
# Verify the new value is cached
|
84
|
+
second_block_called = false
|
85
|
+
second_result = cache.get(topic) do
|
86
|
+
second_block_called = true
|
87
|
+
new_count + 1 # Different value again
|
88
|
+
end
|
89
|
+
|
90
|
+
expect(second_block_called).to be false
|
91
|
+
expect(second_result).to eq(new_count)
|
92
|
+
end
|
93
|
+
|
94
|
+
it "respects a custom TTL" do
|
95
|
+
# Seed the custom TTL cache with a value
|
96
|
+
custom_ttl_cache.get(topic) { partition_count }
|
97
|
+
|
98
|
+
# Wait for custom TTL to expire but not default TTL
|
99
|
+
sleep(custom_ttl + 0.1)
|
100
|
+
|
101
|
+
# Custom TTL cache should refresh
|
102
|
+
custom_block_called = false
|
103
|
+
custom_result = custom_ttl_cache.get(topic) do
|
104
|
+
custom_block_called = true
|
105
|
+
higher_partition_count
|
106
|
+
end
|
107
|
+
|
108
|
+
expect(custom_block_called).to be true
|
109
|
+
expect(custom_result).to eq(higher_partition_count)
|
110
|
+
|
111
|
+
# Default TTL cache should not refresh yet
|
112
|
+
default_block_called = false
|
113
|
+
default_result = cache.get(topic) do
|
114
|
+
default_block_called = true
|
115
|
+
higher_partition_count
|
116
|
+
end
|
117
|
+
|
118
|
+
expect(default_block_called).to be false
|
119
|
+
expect(default_result).to eq(partition_count)
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
context "when new value is obtained" do
|
124
|
+
before do
|
125
|
+
# Seed the cache with initial value
|
126
|
+
cache.get(topic) { partition_count }
|
127
|
+
end
|
128
|
+
|
129
|
+
it "updates cache when new value is higher than cached value" do
|
130
|
+
# Wait for TTL to expire
|
131
|
+
sleep(default_ttl + 0.1)
|
132
|
+
|
133
|
+
# Get higher value
|
134
|
+
result = cache.get(topic) { higher_partition_count }
|
135
|
+
expect(result).to eq(higher_partition_count)
|
136
|
+
|
137
|
+
# Verify it was cached
|
138
|
+
second_result = cache.get(topic) { fail "Should not be called" }
|
139
|
+
expect(second_result).to eq(higher_partition_count)
|
140
|
+
end
|
141
|
+
|
142
|
+
it "preserves higher cached value when new value is lower" do
|
143
|
+
# First update to higher value
|
144
|
+
sleep(default_ttl + 0.1)
|
145
|
+
cache.get(topic) { higher_partition_count }
|
146
|
+
|
147
|
+
# Then try to update to lower value
|
148
|
+
sleep(default_ttl + 0.1)
|
149
|
+
result = cache.get(topic) { lower_partition_count }
|
150
|
+
|
151
|
+
expect(result).to eq(higher_partition_count)
|
152
|
+
|
153
|
+
# and subsequent gets should return the previously cached higher value
|
154
|
+
second_result = cache.get(topic) { fail "Should not be called" }
|
155
|
+
expect(second_result).to eq(higher_partition_count)
|
156
|
+
end
|
157
|
+
|
158
|
+
it "handles multiple topics independently" do
|
159
|
+
# Set up both topics with different values
|
160
|
+
cache.get(topic) { partition_count }
|
161
|
+
cache.get(topic2) { higher_partition_count }
|
162
|
+
|
163
|
+
# Wait for TTL to expire
|
164
|
+
sleep(default_ttl + 0.1)
|
165
|
+
|
166
|
+
# Update first topic
|
167
|
+
first_result = cache.get(topic) { even_higher_partition_count }
|
168
|
+
expect(first_result).to eq(even_higher_partition_count)
|
169
|
+
|
170
|
+
# Update second topic independently
|
171
|
+
second_updated = higher_partition_count + 3
|
172
|
+
second_result = cache.get(topic2) { second_updated }
|
173
|
+
expect(second_result).to eq(second_updated)
|
174
|
+
|
175
|
+
# Both topics should have their updated values
|
176
|
+
expect(cache.get(topic) { fail "Should not be called" }).to eq(even_higher_partition_count)
|
177
|
+
expect(cache.get(topic2) { fail "Should not be called" }).to eq(second_updated)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
describe "#set" do
|
183
|
+
context "when cache is empty" do
|
184
|
+
it "adds a new entry to the cache" do
|
185
|
+
cache.set(topic, partition_count)
|
186
|
+
|
187
|
+
# Verify through get
|
188
|
+
result = cache.get(topic) { fail "Should not be called" }
|
189
|
+
expect(result).to eq(partition_count)
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
context "when cache already has a value" do
|
194
|
+
before do
|
195
|
+
cache.set(topic, partition_count)
|
196
|
+
end
|
197
|
+
|
198
|
+
it "updates cache when new value is higher" do
|
199
|
+
cache.set(topic, higher_partition_count)
|
200
|
+
|
201
|
+
result = cache.get(topic) { fail "Should not be called" }
|
202
|
+
expect(result).to eq(higher_partition_count)
|
203
|
+
end
|
204
|
+
|
205
|
+
it "keeps original value when new value is lower" do
|
206
|
+
cache.set(topic, lower_partition_count)
|
207
|
+
|
208
|
+
result = cache.get(topic) { fail "Should not be called" }
|
209
|
+
expect(result).to eq(partition_count)
|
210
|
+
end
|
211
|
+
|
212
|
+
it "updates the timestamp even when keeping original value" do
|
213
|
+
# Set initial value
|
214
|
+
cache.set(topic, partition_count)
|
215
|
+
|
216
|
+
# Wait until close to TTL expiring
|
217
|
+
sleep(default_ttl - 0.2)
|
218
|
+
|
219
|
+
# Set lower value (should update timestamp but not value)
|
220
|
+
cache.set(topic, lower_partition_count)
|
221
|
+
|
222
|
+
# Wait a bit more, but still under the full TTL if timestamp was refreshed
|
223
|
+
sleep(0.3)
|
224
|
+
|
225
|
+
# Should still be valid due to timestamp refresh
|
226
|
+
result = cache.get(topic) { fail "Should not be called" }
|
227
|
+
expect(result).to eq(partition_count)
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
context "with concurrent access" do
|
232
|
+
it "correctly handles simultaneous updates to the same topic" do
|
233
|
+
# This test focuses on the final value after concurrent updates
|
234
|
+
threads = []
|
235
|
+
|
236
|
+
# Create 5 threads that all try to update the same topic with increasing values
|
237
|
+
5.times do |i|
|
238
|
+
threads << Thread.new do
|
239
|
+
value = 10 + i # Start at 10 to ensure all are higher than initial value
|
240
|
+
cache.set(topic, value)
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
# Wait for all threads to complete
|
245
|
+
threads.each(&:join)
|
246
|
+
|
247
|
+
# The highest value (14) should be stored and accessible through get
|
248
|
+
result = cache.get(topic) { fail "Should not be called" }
|
249
|
+
expect(result).to eq(14)
|
250
|
+
end
|
251
|
+
end
|
252
|
+
end
|
253
|
+
|
254
|
+
describe "TTL behavior" do
|
255
|
+
it "treats entries as expired when they exceed TTL" do
|
256
|
+
# Set initial value
|
257
|
+
cache.get(topic) { partition_count }
|
258
|
+
|
259
|
+
# Wait just under TTL
|
260
|
+
sleep(default_ttl - 0.2)
|
261
|
+
|
262
|
+
# Value should still be cached (block should not be called)
|
263
|
+
result = cache.get(topic) { fail "Should not be called when cache is valid" }
|
264
|
+
expect(result).to eq(partition_count)
|
265
|
+
|
266
|
+
# Now wait to exceed TTL
|
267
|
+
sleep(0.2) # Total sleep is now default_ttl + 0.1
|
268
|
+
|
269
|
+
# Cache should be expired, block should be called
|
270
|
+
block_called = false
|
271
|
+
new_value = partition_count + 3
|
272
|
+
result = cache.get(topic) do
|
273
|
+
block_called = true
|
274
|
+
new_value
|
275
|
+
end
|
276
|
+
|
277
|
+
expect(block_called).to be true
|
278
|
+
expect(result).to eq(new_value)
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
describe "comprehensive scenarios" do
|
283
|
+
it "handles a full lifecycle of cache operations" do
|
284
|
+
# 1. Initial cache miss, fetch and store
|
285
|
+
result1 = cache.get(topic) { partition_count }
|
286
|
+
expect(result1).to eq(partition_count)
|
287
|
+
|
288
|
+
# 2. Cache hit
|
289
|
+
result2 = cache.get(topic) { fail "Should not be called" }
|
290
|
+
expect(result2).to eq(partition_count)
|
291
|
+
|
292
|
+
# 3. Attempt to set lower value
|
293
|
+
cache.set(topic, lower_partition_count)
|
294
|
+
result3 = cache.get(topic) { fail "Should not be called" }
|
295
|
+
# Should still return the higher original value
|
296
|
+
expect(result3).to eq(partition_count)
|
297
|
+
|
298
|
+
# 4. Set higher value
|
299
|
+
cache.set(topic, higher_partition_count)
|
300
|
+
result4 = cache.get(topic) { fail "Should not be called" }
|
301
|
+
expect(result4).to eq(higher_partition_count)
|
302
|
+
|
303
|
+
# 5. TTL expires, new value provided is lower
|
304
|
+
sleep(default_ttl + 0.1)
|
305
|
+
result5 = cache.get(topic) { lower_partition_count }
|
306
|
+
# This returns the highest value
|
307
|
+
expect(result5).to eq(higher_partition_count)
|
308
|
+
|
309
|
+
# 6. But subsequent get should return the higher cached value
|
310
|
+
result6 = cache.get(topic) { fail "Should not be called" }
|
311
|
+
expect(result6).to eq(higher_partition_count)
|
312
|
+
|
313
|
+
# 7. Set new highest value directly
|
314
|
+
even_higher = higher_partition_count + 5
|
315
|
+
cache.set(topic, even_higher)
|
316
|
+
result7 = cache.get(topic) { fail "Should not be called" }
|
317
|
+
expect(result7).to eq(even_higher)
|
318
|
+
end
|
319
|
+
|
320
|
+
it "handles multiple topics with different TTLs correctly" do
|
321
|
+
# Set up initial values
|
322
|
+
cache.get(topic) { partition_count }
|
323
|
+
custom_ttl_cache.get(topic) { partition_count }
|
324
|
+
|
325
|
+
# Wait past custom TTL but not default TTL
|
326
|
+
sleep(custom_ttl + 0.1)
|
327
|
+
|
328
|
+
# Default cache should NOT refresh (still within default TTL)
|
329
|
+
default_result = cache.get(topic) { fail "Should not be called for default cache" }
|
330
|
+
# Original value should be maintained
|
331
|
+
expect(default_result).to eq(partition_count)
|
332
|
+
|
333
|
+
# Custom TTL cache SHOULD refresh (past custom TTL)
|
334
|
+
custom_cache_value = partition_count + 8
|
335
|
+
custom_block_called = false
|
336
|
+
custom_result = custom_ttl_cache.get(topic) do
|
337
|
+
custom_block_called = true
|
338
|
+
custom_cache_value
|
339
|
+
end
|
340
|
+
|
341
|
+
expect(custom_block_called).to be true
|
342
|
+
expect(custom_result).to eq(custom_cache_value)
|
343
|
+
|
344
|
+
# Now wait past default TTL
|
345
|
+
sleep(default_ttl - custom_ttl + 0.1)
|
346
|
+
|
347
|
+
# Now default cache should also refresh
|
348
|
+
default_block_called = false
|
349
|
+
new_default_value = partition_count + 10
|
350
|
+
new_default_result = cache.get(topic) do
|
351
|
+
default_block_called = true
|
352
|
+
new_default_value
|
353
|
+
end
|
354
|
+
|
355
|
+
expect(default_block_called).to be true
|
356
|
+
expect(new_default_result).to eq(new_default_value)
|
357
|
+
end
|
358
|
+
end
|
359
|
+
end
|