karafka-rdkafka 0.20.0.rc4-x86_64-linux-musl → 0.20.0-x86_64-linux-musl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci_linux_x86_64_musl.yml +2 -2
- data/.github/workflows/push_linux_x86_64_musl.yml +2 -2
- data/.github/workflows/push_ruby.yml +1 -1
- data/.ruby-version +1 -1
- data/CHANGELOG.md +6 -1
- data/README.md +1 -1
- data/ext/librdkafka.so +0 -0
- data/karafka-rdkafka.gemspec +26 -1
- data/lib/rdkafka/bindings.rb +6 -7
- data/lib/rdkafka/producer.rb +11 -6
- data/lib/rdkafka/version.rb +1 -1
- data/spec/rdkafka/admin_spec.rb +202 -1
- data/spec/rdkafka/bindings_spec.rb +0 -24
- data/spec/rdkafka/consumer_spec.rb +0 -3
- data/spec/rdkafka/producer_spec.rb +295 -1
- data/spec/spec_helper.rb +20 -6
- metadata +12 -13
- data/spec/rdkafka/producer/partitions_count_spec.rb +0 -359
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d68434b9c41f257de1f8570bbfa645f8c49f97c59f83f0b63faba8c2ecb2eb1b
|
4
|
+
data.tar.gz: 66d47fb1e0b186d5c39d4e29950cde5f9931028e3656c7e15f8d75d719b4caa1
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ccd11e549995c0fe8c5a82eb63871de11689d81f6a51770169fb0fc41bd42a17946d22432a9e0197ec8c84b6c9d5019cc104f8bbe7a0089f4d767005fad5903e
|
7
|
+
data.tar.gz: 0ddae6bc6d8a0f379c6bd3d3c7366d787d653594a5743803534bd5ddd252bc4d28b04beec8d28be1ccd31f8e2d8caee456b996812b37ef835b363f5376aa7ccb
|
@@ -24,7 +24,7 @@ jobs:
|
|
24
24
|
timeout-minutes: 30
|
25
25
|
runs-on: ubuntu-latest
|
26
26
|
container:
|
27
|
-
image: alpine:3.22@sha256:
|
27
|
+
image: alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
28
28
|
steps:
|
29
29
|
- name: Install dependencies
|
30
30
|
run: |
|
@@ -123,7 +123,7 @@ jobs:
|
|
123
123
|
timeout-minutes: 45
|
124
124
|
runs-on: ubuntu-latest
|
125
125
|
container:
|
126
|
-
image: alpine:3.22@sha256:
|
126
|
+
image: alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
127
127
|
steps:
|
128
128
|
- name: Install dependencies
|
129
129
|
run: |
|
@@ -13,8 +13,9 @@ jobs:
|
|
13
13
|
if: github.repository_owner == 'karafka'
|
14
14
|
timeout-minutes: 30
|
15
15
|
runs-on: ubuntu-latest
|
16
|
+
environment: deployment
|
16
17
|
container:
|
17
|
-
image: alpine:3.22@sha256:
|
18
|
+
image: alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
18
19
|
steps:
|
19
20
|
- name: Install dependencies
|
20
21
|
run: |
|
@@ -43,7 +44,6 @@ jobs:
|
|
43
44
|
name: librdkafka-precompiled-musl
|
44
45
|
path: ext/
|
45
46
|
retention-days: 1
|
46
|
-
|
47
47
|
push:
|
48
48
|
if: github.repository_owner == 'karafka'
|
49
49
|
timeout-minutes: 30
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.4.
|
1
|
+
3.4.5
|
data/CHANGELOG.md
CHANGED
@@ -1,10 +1,15 @@
|
|
1
1
|
# Rdkafka Changelog
|
2
2
|
|
3
|
-
## 0.20.0 (
|
3
|
+
## 0.20.0 (2025-07-17)
|
4
4
|
- **[Feature]** Add precompiled `x86_64-linux-gnu` setup.
|
5
5
|
- **[Feature]** Add precompiled `x86_64-linux-musl` setup.
|
6
6
|
- **[Feature]** Add precompiled `macos_arm64` setup.
|
7
7
|
- [Enhancement] Run all specs on each of the platforms with and without precompilation.
|
8
|
+
- [Enhancement] Support transactional id in the ACL API.
|
9
|
+
- [Fix] Fix a case where using empty key on the `musl` architecture would cause a segfault.
|
10
|
+
- [Fix] Fix for null pointer reference bypass on empty string being too wide causing segfault.
|
11
|
+
|
12
|
+
**Note**: Precompiled extensions are a new feature in this release. While they significantly improve installation speed and reduce build dependencies, they should be thoroughly tested in your staging environment before deploying to production. If you encounter any issues with precompiled extensions, you can fall back to building from sources. For more information, see the [Native Extensions documentation](https://karafka.io/docs/Development-Native-Extensions/).
|
8
13
|
|
9
14
|
## 0.19.5 (2025-05-30)
|
10
15
|
- [Enhancement] Allow for producing to non-existing topics with `key` and `partition_key` present.
|
data/README.md
CHANGED
@@ -163,7 +163,7 @@ bundle exec rake produce_messages
|
|
163
163
|
|
164
164
|
| rdkafka-ruby | librdkafka | patches |
|
165
165
|
|-|-|-|
|
166
|
-
| 0.20.x (
|
166
|
+
| 0.20.x (2025-07-17) | 2.8.0 (2025-01-07) | yes |
|
167
167
|
| 0.19.x (2025-01-20) | 2.8.0 (2025-01-07) | yes |
|
168
168
|
| 0.18.0 (2024-11-26) | 2.6.1 (2024-11-18) | yes |
|
169
169
|
| 0.17.4 (2024-09-02) | 2.5.3 (2024-09-02) | yes |
|
data/ext/librdkafka.so
CHANGED
Binary file
|
data/karafka-rdkafka.gemspec
CHANGED
@@ -40,12 +40,37 @@ Gem::Specification.new do |gem|
|
|
40
40
|
gem.extensions = %w(ext/Rakefile)
|
41
41
|
end
|
42
42
|
|
43
|
+
if ENV['RUBY_PLATFORM']
|
44
|
+
gem.platform = ENV['RUBY_PLATFORM']
|
45
|
+
gem.files = `git ls-files`.split($\)
|
46
|
+
|
47
|
+
# Do not include the source code for librdkafka as it should be precompiled already per
|
48
|
+
# platform. Same applies to any possible patches.
|
49
|
+
gem.files = gem.files.reject do |file|
|
50
|
+
file.match?(%r{^dist/librdkafka-.*\.tar\.gz$}) ||
|
51
|
+
file.match?(%r{^dist/patches/})
|
52
|
+
end
|
53
|
+
|
54
|
+
# Add the compiled extensions that exist (not in git)
|
55
|
+
if File.exist?('ext/librdkafka.so')
|
56
|
+
gem.files << 'ext/librdkafka.so'
|
57
|
+
end
|
58
|
+
|
59
|
+
if File.exist?('ext/librdkafka.dylib')
|
60
|
+
gem.files << 'ext/librdkafka.dylib'
|
61
|
+
end
|
62
|
+
else
|
63
|
+
gem.platform = Gem::Platform::RUBY
|
64
|
+
gem.files = `git ls-files`.split($\)
|
65
|
+
gem.extensions = %w(ext/Rakefile)
|
66
|
+
end
|
67
|
+
|
43
68
|
gem.add_dependency 'ffi', '~> 1.15'
|
44
69
|
gem.add_dependency 'logger'
|
45
70
|
gem.add_dependency 'mini_portile2', '~> 2.6'
|
46
|
-
gem.add_dependency 'ostruct'
|
47
71
|
gem.add_dependency 'rake', '> 12'
|
48
72
|
|
73
|
+
gem.add_development_dependency 'ostruct'
|
49
74
|
gem.add_development_dependency 'pry'
|
50
75
|
gem.add_development_dependency 'rspec', '~> 3.5'
|
51
76
|
gem.add_development_dependency 'rake'
|
data/lib/rdkafka/bindings.rb
CHANGED
@@ -403,18 +403,16 @@ module Rdkafka
|
|
403
403
|
hsh[name] = method_name
|
404
404
|
end
|
405
405
|
|
406
|
-
def self.partitioner(str, partition_count,
|
406
|
+
def self.partitioner(topic_ptr, str, partition_count, partitioner = "consistent_random")
|
407
407
|
# Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
|
408
408
|
return -1 unless partition_count&.nonzero?
|
409
|
-
# musl architecture crashes with empty string
|
410
|
-
return 0 if str.empty?
|
411
409
|
|
412
|
-
str_ptr = FFI::MemoryPointer.from_string(str)
|
413
|
-
method_name = PARTITIONERS.fetch(
|
414
|
-
raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{
|
410
|
+
str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
|
411
|
+
method_name = PARTITIONERS.fetch(partitioner) do
|
412
|
+
raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner}")
|
415
413
|
end
|
416
414
|
|
417
|
-
public_send(method_name,
|
415
|
+
public_send(method_name, topic_ptr, str_ptr, str.size, partition_count, nil, nil)
|
418
416
|
end
|
419
417
|
|
420
418
|
# Create Topics
|
@@ -532,6 +530,7 @@ module Rdkafka
|
|
532
530
|
RD_KAFKA_RESOURCE_TOPIC = 2
|
533
531
|
RD_KAFKA_RESOURCE_GROUP = 3
|
534
532
|
RD_KAFKA_RESOURCE_BROKER = 4
|
533
|
+
RD_KAFKA_RESOURCE_TRANSACTIONAL_ID = 5
|
535
534
|
|
536
535
|
# rd_kafka_ResourcePatternType_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L7320
|
537
536
|
|
data/lib/rdkafka/producer.rb
CHANGED
@@ -51,13 +51,13 @@ module Rdkafka
|
|
51
51
|
|
52
52
|
# @private
|
53
53
|
# @param native_kafka [NativeKafka]
|
54
|
-
# @param
|
54
|
+
# @param partitioner [String, nil] name of the partitioner we want to use or nil to use
|
55
55
|
# the "consistent_random" default
|
56
|
-
def initialize(native_kafka,
|
56
|
+
def initialize(native_kafka, partitioner)
|
57
57
|
@topics_refs_map = {}
|
58
58
|
@topics_configs = {}
|
59
59
|
@native_kafka = native_kafka
|
60
|
-
@
|
60
|
+
@partitioner = partitioner || "consistent_random"
|
61
61
|
|
62
62
|
# Makes sure, that native kafka gets closed before it gets GCed by Ruby
|
63
63
|
ObjectSpace.define_finalizer(self, native_kafka.finalizer)
|
@@ -337,7 +337,8 @@ module Rdkafka
|
|
337
337
|
timestamp: nil,
|
338
338
|
headers: nil,
|
339
339
|
label: nil,
|
340
|
-
topic_config: EMPTY_HASH
|
340
|
+
topic_config: EMPTY_HASH,
|
341
|
+
partitioner: @partitioner
|
341
342
|
)
|
342
343
|
closed_producer_check(__method__)
|
343
344
|
|
@@ -369,10 +370,14 @@ module Rdkafka
|
|
369
370
|
|
370
371
|
# Check if there are no overrides for the partitioner and use the default one only when
|
371
372
|
# no per-topic is present.
|
372
|
-
|
373
|
+
selected_partitioner = @topics_configs.dig(topic, topic_config_hash, :partitioner) || partitioner
|
373
374
|
|
374
375
|
# If the topic is not present, set to -1
|
375
|
-
partition = Rdkafka::Bindings.partitioner(
|
376
|
+
partition = Rdkafka::Bindings.partitioner(
|
377
|
+
topic_ref,
|
378
|
+
partition_key,
|
379
|
+
partition_count,
|
380
|
+
selected_partitioner) if partition_count.positive?
|
376
381
|
end
|
377
382
|
|
378
383
|
# If partition is nil, use -1 to let librdafka set the partition randomly or
|
data/lib/rdkafka/version.rb
CHANGED
data/spec/rdkafka/admin_spec.rb
CHANGED
@@ -513,7 +513,7 @@ describe Rdkafka::Admin do
|
|
513
513
|
end
|
514
514
|
end
|
515
515
|
|
516
|
-
describe "#ACL tests" do
|
516
|
+
describe "#ACL tests for topic resource" do
|
517
517
|
let(:non_existing_resource_name) {"non-existing-topic"}
|
518
518
|
before do
|
519
519
|
#create topic for testing acl
|
@@ -615,6 +615,207 @@ describe Rdkafka::Admin do
|
|
615
615
|
end
|
616
616
|
end
|
617
617
|
|
618
|
+
describe "#ACL tests for transactional_id" do
|
619
|
+
let(:transactional_id_resource_name) {"test-transactional-id"}
|
620
|
+
let(:non_existing_transactional_id) {"non-existing-transactional-id"}
|
621
|
+
let(:transactional_id_resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID }
|
622
|
+
let(:transactional_id_resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL }
|
623
|
+
let(:transactional_id_principal) { "User:test-user" }
|
624
|
+
let(:transactional_id_host) { "*" }
|
625
|
+
let(:transactional_id_operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE }
|
626
|
+
let(:transactional_id_permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW }
|
627
|
+
|
628
|
+
after do
|
629
|
+
# Clean up any ACLs that might have been created during tests
|
630
|
+
begin
|
631
|
+
delete_acl_handle = admin.delete_acl(
|
632
|
+
resource_type: transactional_id_resource_type,
|
633
|
+
resource_name: nil,
|
634
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
635
|
+
principal: transactional_id_principal,
|
636
|
+
host: transactional_id_host,
|
637
|
+
operation: transactional_id_operation,
|
638
|
+
permission_type: transactional_id_permission_type
|
639
|
+
)
|
640
|
+
delete_acl_handle.wait(max_wait_timeout: 15.0)
|
641
|
+
rescue
|
642
|
+
# Ignore cleanup errors
|
643
|
+
end
|
644
|
+
end
|
645
|
+
|
646
|
+
describe "#create_acl" do
|
647
|
+
it "creates acl for a transactional_id" do
|
648
|
+
create_acl_handle = admin.create_acl(
|
649
|
+
resource_type: transactional_id_resource_type,
|
650
|
+
resource_name: transactional_id_resource_name,
|
651
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
652
|
+
principal: transactional_id_principal,
|
653
|
+
host: transactional_id_host,
|
654
|
+
operation: transactional_id_operation,
|
655
|
+
permission_type: transactional_id_permission_type
|
656
|
+
)
|
657
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
658
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
659
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
660
|
+
end
|
661
|
+
|
662
|
+
it "creates acl for a non-existing transactional_id" do
|
663
|
+
# ACL creation for transactional_ids that don't exist will still get created successfully
|
664
|
+
create_acl_handle = admin.create_acl(
|
665
|
+
resource_type: transactional_id_resource_type,
|
666
|
+
resource_name: non_existing_transactional_id,
|
667
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
668
|
+
principal: transactional_id_principal,
|
669
|
+
host: transactional_id_host,
|
670
|
+
operation: transactional_id_operation,
|
671
|
+
permission_type: transactional_id_permission_type
|
672
|
+
)
|
673
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
674
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
675
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
676
|
+
|
677
|
+
# Clean up the ACL that was created for the non-existing transactional_id
|
678
|
+
delete_acl_handle = admin.delete_acl(
|
679
|
+
resource_type: transactional_id_resource_type,
|
680
|
+
resource_name: non_existing_transactional_id,
|
681
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
682
|
+
principal: transactional_id_principal,
|
683
|
+
host: transactional_id_host,
|
684
|
+
operation: transactional_id_operation,
|
685
|
+
permission_type: transactional_id_permission_type
|
686
|
+
)
|
687
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
688
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
689
|
+
expect(delete_acl_report.deleted_acls.size).to eq(1)
|
690
|
+
end
|
691
|
+
end
|
692
|
+
|
693
|
+
describe "#describe_acl" do
|
694
|
+
it "describes acl of a transactional_id that does not exist" do
|
695
|
+
describe_acl_handle = admin.describe_acl(
|
696
|
+
resource_type: transactional_id_resource_type,
|
697
|
+
resource_name: non_existing_transactional_id,
|
698
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
699
|
+
principal: transactional_id_principal,
|
700
|
+
host: transactional_id_host,
|
701
|
+
operation: transactional_id_operation,
|
702
|
+
permission_type: transactional_id_permission_type
|
703
|
+
)
|
704
|
+
describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
|
705
|
+
expect(describe_acl_handle[:response]).to eq(0)
|
706
|
+
expect(describe_acl_report.acls.size).to eq(0)
|
707
|
+
end
|
708
|
+
|
709
|
+
it "creates acls and describes the newly created transactional_id acls" do
|
710
|
+
# Create first ACL
|
711
|
+
create_acl_handle = admin.create_acl(
|
712
|
+
resource_type: transactional_id_resource_type,
|
713
|
+
resource_name: "test_transactional_id_1",
|
714
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
715
|
+
principal: transactional_id_principal,
|
716
|
+
host: transactional_id_host,
|
717
|
+
operation: transactional_id_operation,
|
718
|
+
permission_type: transactional_id_permission_type
|
719
|
+
)
|
720
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
721
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
722
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
723
|
+
|
724
|
+
# Create second ACL
|
725
|
+
create_acl_handle = admin.create_acl(
|
726
|
+
resource_type: transactional_id_resource_type,
|
727
|
+
resource_name: "test_transactional_id_2",
|
728
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
729
|
+
principal: transactional_id_principal,
|
730
|
+
host: transactional_id_host,
|
731
|
+
operation: transactional_id_operation,
|
732
|
+
permission_type: transactional_id_permission_type
|
733
|
+
)
|
734
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
735
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
736
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
737
|
+
|
738
|
+
# Since we create and immediately check, this is slow on loaded CIs, hence we wait
|
739
|
+
sleep(2)
|
740
|
+
|
741
|
+
# Describe ACLs - filter by transactional_id resource type
|
742
|
+
describe_acl_handle = admin.describe_acl(
|
743
|
+
resource_type: transactional_id_resource_type,
|
744
|
+
resource_name: nil,
|
745
|
+
resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY,
|
746
|
+
principal: transactional_id_principal,
|
747
|
+
host: transactional_id_host,
|
748
|
+
operation: transactional_id_operation,
|
749
|
+
permission_type: transactional_id_permission_type
|
750
|
+
)
|
751
|
+
describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
|
752
|
+
expect(describe_acl_handle[:response]).to eq(0)
|
753
|
+
expect(describe_acl_report.acls.length).to eq(2)
|
754
|
+
end
|
755
|
+
end
|
756
|
+
|
757
|
+
describe "#delete_acl" do
|
758
|
+
it "deletes acl of a transactional_id that does not exist" do
|
759
|
+
delete_acl_handle = admin.delete_acl(
|
760
|
+
resource_type: transactional_id_resource_type,
|
761
|
+
resource_name: non_existing_transactional_id,
|
762
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
763
|
+
principal: transactional_id_principal,
|
764
|
+
host: transactional_id_host,
|
765
|
+
operation: transactional_id_operation,
|
766
|
+
permission_type: transactional_id_permission_type
|
767
|
+
)
|
768
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
769
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
770
|
+
expect(delete_acl_report.deleted_acls.size).to eq(0)
|
771
|
+
end
|
772
|
+
|
773
|
+
it "creates transactional_id acls and deletes the newly created acls" do
|
774
|
+
# Create first ACL
|
775
|
+
create_acl_handle = admin.create_acl(
|
776
|
+
resource_type: transactional_id_resource_type,
|
777
|
+
resource_name: "test_transactional_id_1",
|
778
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
779
|
+
principal: transactional_id_principal,
|
780
|
+
host: transactional_id_host,
|
781
|
+
operation: transactional_id_operation,
|
782
|
+
permission_type: transactional_id_permission_type
|
783
|
+
)
|
784
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
785
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
786
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
787
|
+
|
788
|
+
# Create second ACL
|
789
|
+
create_acl_handle = admin.create_acl(
|
790
|
+
resource_type: transactional_id_resource_type,
|
791
|
+
resource_name: "test_transactional_id_2",
|
792
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
793
|
+
principal: transactional_id_principal,
|
794
|
+
host: transactional_id_host,
|
795
|
+
operation: transactional_id_operation,
|
796
|
+
permission_type: transactional_id_permission_type
|
797
|
+
)
|
798
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
799
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
800
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
801
|
+
|
802
|
+
# Delete ACLs - resource_name nil to delete all ACLs with any resource name and matching all other filters
|
803
|
+
delete_acl_handle = admin.delete_acl(
|
804
|
+
resource_type: transactional_id_resource_type,
|
805
|
+
resource_name: nil,
|
806
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
807
|
+
principal: transactional_id_principal,
|
808
|
+
host: transactional_id_host,
|
809
|
+
operation: transactional_id_operation,
|
810
|
+
permission_type: transactional_id_permission_type
|
811
|
+
)
|
812
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
813
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
814
|
+
expect(delete_acl_report.deleted_acls.length).to eq(2)
|
815
|
+
end
|
816
|
+
end
|
817
|
+
end
|
818
|
+
|
618
819
|
describe('Group tests') do
|
619
820
|
describe "#delete_group" do
|
620
821
|
describe("with an existing group") do
|
@@ -77,30 +77,6 @@ describe Rdkafka::Bindings do
|
|
77
77
|
end
|
78
78
|
end
|
79
79
|
|
80
|
-
describe "partitioner" do
|
81
|
-
let(:partition_key) { ('a'..'z').to_a.shuffle.take(15).join('') }
|
82
|
-
let(:partition_count) { rand(50) + 1 }
|
83
|
-
|
84
|
-
it "should return the same partition for a similar string and the same partition count" do
|
85
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
86
|
-
result_2 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
87
|
-
expect(result_1).to eq(result_2)
|
88
|
-
end
|
89
|
-
|
90
|
-
it "should match the old partitioner" do
|
91
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
92
|
-
result_2 = (Zlib.crc32(partition_key) % partition_count)
|
93
|
-
expect(result_1).to eq(result_2)
|
94
|
-
end
|
95
|
-
|
96
|
-
it "should return the partition calculated by the specified partitioner" do
|
97
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
|
98
|
-
ptr = FFI::MemoryPointer.from_string(partition_key)
|
99
|
-
result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
|
100
|
-
expect(result_1).to eq(result_2)
|
101
|
-
end
|
102
|
-
end
|
103
|
-
|
104
80
|
describe "stats callback" do
|
105
81
|
context "without a stats callback" do
|
106
82
|
it "should do nothing" do
|
@@ -1291,9 +1291,6 @@ describe Rdkafka::Consumer do
|
|
1291
1291
|
end
|
1292
1292
|
|
1293
1293
|
expect(eof_error.code).to eq(:partition_eof)
|
1294
|
-
expect(eof_error.details[:topic]).to eq('consume_test_topic')
|
1295
|
-
expect(eof_error.details[:partition]).to be_a(Integer)
|
1296
|
-
expect(eof_error.details[:offset]).to be_a(Integer)
|
1297
1294
|
end
|
1298
1295
|
end
|
1299
1296
|
end
|
@@ -340,7 +340,7 @@ describe Rdkafka::Producer do
|
|
340
340
|
)
|
341
341
|
end
|
342
342
|
|
343
|
-
expect(messages[0].partition).to
|
343
|
+
expect(messages[0].partition).to be >= 0
|
344
344
|
expect(messages[0].key).to eq 'a'
|
345
345
|
end
|
346
346
|
|
@@ -1231,4 +1231,298 @@ describe Rdkafka::Producer do
|
|
1231
1231
|
end
|
1232
1232
|
end
|
1233
1233
|
end
|
1234
|
+
|
1235
|
+
let(:producer) { rdkafka_producer_config.producer }
|
1236
|
+
let(:all_partitioners) { %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random) }
|
1237
|
+
|
1238
|
+
describe "partitioner behavior through producer API" do
|
1239
|
+
context "testing all partitioners with same key" do
|
1240
|
+
it "should not return partition 0 for all partitioners" do
|
1241
|
+
test_key = "test-key-123"
|
1242
|
+
results = {}
|
1243
|
+
|
1244
|
+
all_partitioners.each do |partitioner|
|
1245
|
+
handle = producer.produce(
|
1246
|
+
topic: "partitioner_test_topic",
|
1247
|
+
payload: "test payload",
|
1248
|
+
partition_key: test_key,
|
1249
|
+
partitioner: partitioner
|
1250
|
+
)
|
1251
|
+
|
1252
|
+
report = handle.wait(max_wait_timeout: 5)
|
1253
|
+
results[partitioner] = report.partition
|
1254
|
+
end
|
1255
|
+
|
1256
|
+
# Should not all be the same partition (especially not all 0)
|
1257
|
+
unique_partitions = results.values.uniq
|
1258
|
+
expect(unique_partitions.size).to be > 1
|
1259
|
+
end
|
1260
|
+
end
|
1261
|
+
|
1262
|
+
context "empty string partition key" do
|
1263
|
+
it "should produce message with empty partition key without crashing and go to partition 0 for all partitioners" do
|
1264
|
+
all_partitioners.each do |partitioner|
|
1265
|
+
handle = producer.produce(
|
1266
|
+
topic: "partitioner_test_topic",
|
1267
|
+
payload: "test payload",
|
1268
|
+
key: "test-key",
|
1269
|
+
partition_key: "",
|
1270
|
+
partitioner: partitioner
|
1271
|
+
)
|
1272
|
+
|
1273
|
+
report = handle.wait(max_wait_timeout: 5)
|
1274
|
+
expect(report.partition).to be >= 0
|
1275
|
+
end
|
1276
|
+
end
|
1277
|
+
end
|
1278
|
+
|
1279
|
+
context "nil partition key" do
|
1280
|
+
it "should handle nil partition key gracefully" do
|
1281
|
+
handle = producer.produce(
|
1282
|
+
topic: "partitioner_test_topic",
|
1283
|
+
payload: "test payload",
|
1284
|
+
key: "test-key",
|
1285
|
+
partition_key: nil
|
1286
|
+
)
|
1287
|
+
|
1288
|
+
report = handle.wait(max_wait_timeout: 5)
|
1289
|
+
expect(report.partition).to be >= 0
|
1290
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1291
|
+
end
|
1292
|
+
end
|
1293
|
+
|
1294
|
+
context "various key types and lengths with different partitioners" do
|
1295
|
+
it "should handle very short keys with all partitioners" do
|
1296
|
+
all_partitioners.each do |partitioner|
|
1297
|
+
handle = producer.produce(
|
1298
|
+
topic: "partitioner_test_topic",
|
1299
|
+
payload: "test payload",
|
1300
|
+
partition_key: "a",
|
1301
|
+
partitioner: partitioner
|
1302
|
+
)
|
1303
|
+
|
1304
|
+
report = handle.wait(max_wait_timeout: 5)
|
1305
|
+
expect(report.partition).to be >= 0
|
1306
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1307
|
+
end
|
1308
|
+
end
|
1309
|
+
|
1310
|
+
it "should handle very long keys with all partitioners" do
|
1311
|
+
long_key = "a" * 1000
|
1312
|
+
|
1313
|
+
all_partitioners.each do |partitioner|
|
1314
|
+
handle = producer.produce(
|
1315
|
+
topic: "partitioner_test_topic",
|
1316
|
+
payload: "test payload",
|
1317
|
+
partition_key: long_key,
|
1318
|
+
partitioner: partitioner
|
1319
|
+
)
|
1320
|
+
|
1321
|
+
report = handle.wait(max_wait_timeout: 5)
|
1322
|
+
expect(report.partition).to be >= 0
|
1323
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1324
|
+
end
|
1325
|
+
end
|
1326
|
+
|
1327
|
+
it "should handle unicode keys with all partitioners" do
|
1328
|
+
unicode_key = "测试键值🚀"
|
1329
|
+
|
1330
|
+
all_partitioners.each do |partitioner|
|
1331
|
+
handle = producer.produce(
|
1332
|
+
topic: "partitioner_test_topic",
|
1333
|
+
payload: "test payload",
|
1334
|
+
partition_key: unicode_key,
|
1335
|
+
partitioner: partitioner
|
1336
|
+
)
|
1337
|
+
|
1338
|
+
report = handle.wait(max_wait_timeout: 5)
|
1339
|
+
expect(report.partition).to be >= 0
|
1340
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1341
|
+
end
|
1342
|
+
end
|
1343
|
+
end
|
1344
|
+
|
1345
|
+
context "consistency testing for deterministic partitioners" do
|
1346
|
+
%w(consistent murmur2 fnv1a).each do |partitioner|
|
1347
|
+
it "should consistently route same partition key to same partition with #{partitioner}" do
|
1348
|
+
partition_key = "consistent-test-key"
|
1349
|
+
|
1350
|
+
# Produce multiple messages with same partition key
|
1351
|
+
reports = 5.times.map do
|
1352
|
+
handle = producer.produce(
|
1353
|
+
topic: "partitioner_test_topic",
|
1354
|
+
payload: "test payload #{Time.now.to_f}",
|
1355
|
+
partition_key: partition_key,
|
1356
|
+
partitioner: partitioner
|
1357
|
+
)
|
1358
|
+
handle.wait(max_wait_timeout: 5)
|
1359
|
+
end
|
1360
|
+
|
1361
|
+
# All should go to same partition
|
1362
|
+
partitions = reports.map(&:partition).uniq
|
1363
|
+
expect(partitions.size).to eq(1)
|
1364
|
+
end
|
1365
|
+
end
|
1366
|
+
end
|
1367
|
+
|
1368
|
+
context "randomness testing for random partitioners" do
|
1369
|
+
%w(random consistent_random murmur2_random fnv1a_random).each do |partitioner|
|
1370
|
+
it "should potentially distribute across partitions with #{partitioner}" do
|
1371
|
+
# Note: random partitioners might still return same value by chance
|
1372
|
+
partition_key = "random-test-key"
|
1373
|
+
|
1374
|
+
reports = 10.times.map do
|
1375
|
+
handle = producer.produce(
|
1376
|
+
topic: "partitioner_test_topic",
|
1377
|
+
payload: "test payload #{Time.now.to_f}",
|
1378
|
+
partition_key: partition_key,
|
1379
|
+
partitioner: partitioner
|
1380
|
+
)
|
1381
|
+
handle.wait(max_wait_timeout: 5)
|
1382
|
+
end
|
1383
|
+
|
1384
|
+
partitions = reports.map(&:partition)
|
1385
|
+
|
1386
|
+
# Just ensure they're valid partitions
|
1387
|
+
partitions.each do |partition|
|
1388
|
+
expect(partition).to be >= 0
|
1389
|
+
expect(partition).to be < producer.partition_count("partitioner_test_topic")
|
1390
|
+
end
|
1391
|
+
end
|
1392
|
+
end
|
1393
|
+
end
|
1394
|
+
|
1395
|
+
context "comparing different partitioners with same key" do
|
1396
|
+
it "should route different partition keys to potentially different partitions" do
|
1397
|
+
keys = ["key1", "key2", "key3", "key4", "key5"]
|
1398
|
+
|
1399
|
+
all_partitioners.each do |partitioner|
|
1400
|
+
reports = keys.map do |key|
|
1401
|
+
handle = producer.produce(
|
1402
|
+
topic: "partitioner_test_topic",
|
1403
|
+
payload: "test payload",
|
1404
|
+
partition_key: key,
|
1405
|
+
partitioner: partitioner
|
1406
|
+
)
|
1407
|
+
handle.wait(max_wait_timeout: 5)
|
1408
|
+
end
|
1409
|
+
|
1410
|
+
partitions = reports.map(&:partition).uniq
|
1411
|
+
|
1412
|
+
# Should distribute across multiple partitions for most partitioners
|
1413
|
+
# (though some might hash all keys to same partition by chance)
|
1414
|
+
expect(partitions.all? { |p| p >= 0 && p < producer.partition_count("partitioner_test_topic") }).to be true
|
1415
|
+
end
|
1416
|
+
end
|
1417
|
+
end
|
1418
|
+
|
1419
|
+
context "partition key vs regular key behavior" do
|
1420
|
+
it "should use partition key for partitioning when both key and partition_key are provided" do
|
1421
|
+
# Use keys that would hash to different partitions
|
1422
|
+
regular_key = "regular-key-123"
|
1423
|
+
partition_key = "partition-key-456"
|
1424
|
+
|
1425
|
+
# Message with both keys
|
1426
|
+
handle1 = producer.produce(
|
1427
|
+
topic: "partitioner_test_topic",
|
1428
|
+
payload: "test payload 1",
|
1429
|
+
key: regular_key,
|
1430
|
+
partition_key: partition_key
|
1431
|
+
)
|
1432
|
+
|
1433
|
+
# Message with only partition key (should go to same partition)
|
1434
|
+
handle2 = producer.produce(
|
1435
|
+
topic: "partitioner_test_topic",
|
1436
|
+
payload: "test payload 2",
|
1437
|
+
partition_key: partition_key
|
1438
|
+
)
|
1439
|
+
|
1440
|
+
# Message with only regular key (should go to different partition)
|
1441
|
+
handle3 = producer.produce(
|
1442
|
+
topic: "partitioner_test_topic",
|
1443
|
+
payload: "test payload 3",
|
1444
|
+
key: regular_key
|
1445
|
+
)
|
1446
|
+
|
1447
|
+
report1 = handle1.wait(max_wait_timeout: 5)
|
1448
|
+
report2 = handle2.wait(max_wait_timeout: 5)
|
1449
|
+
report3 = handle3.wait(max_wait_timeout: 5)
|
1450
|
+
|
1451
|
+
# Messages 1 and 2 should go to same partition (both use partition_key)
|
1452
|
+
expect(report1.partition).to eq(report2.partition)
|
1453
|
+
|
1454
|
+
# Message 3 should potentially go to different partition (uses regular key)
|
1455
|
+
expect(report3.partition).not_to eq(report1.partition)
|
1456
|
+
end
|
1457
|
+
end
|
1458
|
+
|
1459
|
+
context "edge case combinations with different partitioners" do
|
1460
|
+
it "should handle nil partition key with all partitioners" do
|
1461
|
+
all_partitioners.each do |partitioner|
|
1462
|
+
handle = producer.produce(
|
1463
|
+
topic: "partitioner_test_topic",
|
1464
|
+
payload: "test payload",
|
1465
|
+
key: "test-key",
|
1466
|
+
partition_key: nil,
|
1467
|
+
partitioner: partitioner
|
1468
|
+
)
|
1469
|
+
|
1470
|
+
report = handle.wait(max_wait_timeout: 5)
|
1471
|
+
expect(report.partition).to be >= 0
|
1472
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1473
|
+
end
|
1474
|
+
end
|
1475
|
+
|
1476
|
+
it "should handle whitespace-only partition key with all partitioners" do
|
1477
|
+
all_partitioners.each do |partitioner|
|
1478
|
+
handle = producer.produce(
|
1479
|
+
topic: "partitioner_test_topic",
|
1480
|
+
payload: "test payload",
|
1481
|
+
partition_key: " ",
|
1482
|
+
partitioner: partitioner
|
1483
|
+
)
|
1484
|
+
|
1485
|
+
report = handle.wait(max_wait_timeout: 5)
|
1486
|
+
expect(report.partition).to be >= 0
|
1487
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1488
|
+
end
|
1489
|
+
end
|
1490
|
+
|
1491
|
+
it "should handle newline characters in partition key with all partitioners" do
|
1492
|
+
all_partitioners.each do |partitioner|
|
1493
|
+
handle = producer.produce(
|
1494
|
+
topic: "partitioner_test_topic",
|
1495
|
+
payload: "test payload",
|
1496
|
+
partition_key: "key\nwith\nnewlines",
|
1497
|
+
partitioner: partitioner
|
1498
|
+
)
|
1499
|
+
|
1500
|
+
report = handle.wait(max_wait_timeout: 5)
|
1501
|
+
expect(report.partition).to be >= 0
|
1502
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1503
|
+
end
|
1504
|
+
end
|
1505
|
+
end
|
1506
|
+
|
1507
|
+
context "debugging partitioner issues" do
|
1508
|
+
it "should show if all partitioners return 0 (indicating a problem)" do
|
1509
|
+
test_key = "debug-test-key"
|
1510
|
+
zero_count = 0
|
1511
|
+
|
1512
|
+
all_partitioners.each do |partitioner|
|
1513
|
+
handle = producer.produce(
|
1514
|
+
topic: "partitioner_test_topic",
|
1515
|
+
payload: "debug payload",
|
1516
|
+
partition_key: test_key,
|
1517
|
+
partitioner: partitioner
|
1518
|
+
)
|
1519
|
+
|
1520
|
+
report = handle.wait(max_wait_timeout: 5)
|
1521
|
+
zero_count += 1 if report.partition == 0
|
1522
|
+
end
|
1523
|
+
|
1524
|
+
expect(zero_count).to be < all_partitioners.size
|
1525
|
+
end
|
1526
|
+
end
|
1527
|
+
end
|
1234
1528
|
end
|
data/spec/spec_helper.rb
CHANGED
@@ -78,18 +78,32 @@ end
|
|
78
78
|
|
79
79
|
def wait_for_message(topic:, delivery_report:, timeout_in_seconds: 30, consumer: nil)
|
80
80
|
new_consumer = consumer.nil?
|
81
|
-
consumer ||= rdkafka_consumer_config.consumer
|
81
|
+
consumer ||= rdkafka_consumer_config('allow.auto.create.topics': true).consumer
|
82
82
|
consumer.subscribe(topic)
|
83
83
|
timeout = Time.now.to_i + timeout_in_seconds
|
84
|
+
retry_count = 0
|
85
|
+
max_retries = 10
|
86
|
+
|
84
87
|
loop do
|
85
88
|
if timeout <= Time.now.to_i
|
86
89
|
raise "Timeout of #{timeout_in_seconds} seconds reached in wait_for_message"
|
87
90
|
end
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
91
|
+
|
92
|
+
begin
|
93
|
+
message = consumer.poll(100)
|
94
|
+
if message &&
|
95
|
+
message.partition == delivery_report.partition &&
|
96
|
+
message.offset == delivery_report.offset
|
97
|
+
return message
|
98
|
+
end
|
99
|
+
rescue Rdkafka::RdkafkaError => e
|
100
|
+
if e.code == :unknown_topic_or_part && retry_count < max_retries
|
101
|
+
retry_count += 1
|
102
|
+
sleep(0.1) # Small delay before retry
|
103
|
+
next
|
104
|
+
else
|
105
|
+
raise
|
106
|
+
end
|
93
107
|
end
|
94
108
|
end
|
95
109
|
ensure
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka-rdkafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.20.0
|
4
|
+
version: 0.20.0
|
5
5
|
platform: x86_64-linux-musl
|
6
6
|
authors:
|
7
7
|
- Thijs Cadier
|
@@ -53,33 +53,33 @@ dependencies:
|
|
53
53
|
- !ruby/object:Gem::Version
|
54
54
|
version: '2.6'
|
55
55
|
- !ruby/object:Gem::Dependency
|
56
|
-
name:
|
56
|
+
name: rake
|
57
57
|
requirement: !ruby/object:Gem::Requirement
|
58
58
|
requirements:
|
59
|
-
- - "
|
59
|
+
- - ">"
|
60
60
|
- !ruby/object:Gem::Version
|
61
|
-
version: '
|
61
|
+
version: '12'
|
62
62
|
type: :runtime
|
63
63
|
prerelease: false
|
64
64
|
version_requirements: !ruby/object:Gem::Requirement
|
65
65
|
requirements:
|
66
|
-
- - "
|
66
|
+
- - ">"
|
67
67
|
- !ruby/object:Gem::Version
|
68
|
-
version: '
|
68
|
+
version: '12'
|
69
69
|
- !ruby/object:Gem::Dependency
|
70
|
-
name:
|
70
|
+
name: ostruct
|
71
71
|
requirement: !ruby/object:Gem::Requirement
|
72
72
|
requirements:
|
73
|
-
- - "
|
73
|
+
- - ">="
|
74
74
|
- !ruby/object:Gem::Version
|
75
|
-
version: '
|
76
|
-
type: :
|
75
|
+
version: '0'
|
76
|
+
type: :development
|
77
77
|
prerelease: false
|
78
78
|
version_requirements: !ruby/object:Gem::Requirement
|
79
79
|
requirements:
|
80
|
-
- - "
|
80
|
+
- - ">="
|
81
81
|
- !ruby/object:Gem::Version
|
82
|
-
version: '
|
82
|
+
version: '0'
|
83
83
|
- !ruby/object:Gem::Dependency
|
84
84
|
name: pry
|
85
85
|
requirement: !ruby/object:Gem::Requirement
|
@@ -241,7 +241,6 @@ files:
|
|
241
241
|
- spec/rdkafka/producer/delivery_handle_spec.rb
|
242
242
|
- spec/rdkafka/producer/delivery_report_spec.rb
|
243
243
|
- spec/rdkafka/producer/partitions_count_cache_spec.rb
|
244
|
-
- spec/rdkafka/producer/partitions_count_spec.rb
|
245
244
|
- spec/rdkafka/producer_spec.rb
|
246
245
|
- spec/spec_helper.rb
|
247
246
|
licenses:
|
@@ -1,359 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'spec_helper'
|
4
|
-
|
5
|
-
RSpec.describe Rdkafka::Producer::PartitionsCountCache do
|
6
|
-
let(:default_ttl) { 1 } # Reduced from 30 to speed up tests
|
7
|
-
let(:custom_ttl) { 0.5 } # Half the default TTL
|
8
|
-
let(:cache) { described_class.new(default_ttl) }
|
9
|
-
let(:custom_ttl_cache) { described_class.new(custom_ttl) }
|
10
|
-
let(:topic) { "test_topic" }
|
11
|
-
let(:topic2) { "test_topic2" }
|
12
|
-
let(:partition_count) { 5 }
|
13
|
-
let(:higher_partition_count) { 10 }
|
14
|
-
let(:lower_partition_count) { 3 }
|
15
|
-
let(:even_higher_partition_count) { 15 }
|
16
|
-
|
17
|
-
describe "#initialize" do
|
18
|
-
it "creates a cache with default TTL when no TTL is specified" do
|
19
|
-
standard_cache = described_class.new
|
20
|
-
expect(standard_cache).to be_a(described_class)
|
21
|
-
end
|
22
|
-
|
23
|
-
it "creates a cache with custom TTL when specified" do
|
24
|
-
expect(custom_ttl_cache).to be_a(described_class)
|
25
|
-
end
|
26
|
-
end
|
27
|
-
|
28
|
-
describe "#get" do
|
29
|
-
context "when cache is empty" do
|
30
|
-
it "yields to get the value and caches it" do
|
31
|
-
block_called = false
|
32
|
-
result = cache.get(topic) do
|
33
|
-
block_called = true
|
34
|
-
partition_count
|
35
|
-
end
|
36
|
-
|
37
|
-
expect(block_called).to be true
|
38
|
-
expect(result).to eq(partition_count)
|
39
|
-
|
40
|
-
# Verify caching by checking if block is called again
|
41
|
-
second_block_called = false
|
42
|
-
second_result = cache.get(topic) do
|
43
|
-
second_block_called = true
|
44
|
-
partition_count + 1 # Different value to ensure we get cached value
|
45
|
-
end
|
46
|
-
|
47
|
-
expect(second_block_called).to be false
|
48
|
-
expect(second_result).to eq(partition_count)
|
49
|
-
end
|
50
|
-
end
|
51
|
-
|
52
|
-
context "when cache has a value" do
|
53
|
-
before do
|
54
|
-
# Seed the cache with a value
|
55
|
-
cache.get(topic) { partition_count }
|
56
|
-
end
|
57
|
-
|
58
|
-
it "returns cached value without yielding if not expired" do
|
59
|
-
block_called = false
|
60
|
-
result = cache.get(topic) do
|
61
|
-
block_called = true
|
62
|
-
partition_count + 1 # Different value to ensure we get cached one
|
63
|
-
end
|
64
|
-
|
65
|
-
expect(block_called).to be false
|
66
|
-
expect(result).to eq(partition_count)
|
67
|
-
end
|
68
|
-
|
69
|
-
it "yields to get new value when TTL has expired" do
|
70
|
-
# Wait for TTL to expire
|
71
|
-
sleep(default_ttl + 0.1)
|
72
|
-
|
73
|
-
block_called = false
|
74
|
-
new_count = partition_count + 1
|
75
|
-
result = cache.get(topic) do
|
76
|
-
block_called = true
|
77
|
-
new_count
|
78
|
-
end
|
79
|
-
|
80
|
-
expect(block_called).to be true
|
81
|
-
expect(result).to eq(new_count)
|
82
|
-
|
83
|
-
# Verify the new value is cached
|
84
|
-
second_block_called = false
|
85
|
-
second_result = cache.get(topic) do
|
86
|
-
second_block_called = true
|
87
|
-
new_count + 1 # Different value again
|
88
|
-
end
|
89
|
-
|
90
|
-
expect(second_block_called).to be false
|
91
|
-
expect(second_result).to eq(new_count)
|
92
|
-
end
|
93
|
-
|
94
|
-
it "respects a custom TTL" do
|
95
|
-
# Seed the custom TTL cache with a value
|
96
|
-
custom_ttl_cache.get(topic) { partition_count }
|
97
|
-
|
98
|
-
# Wait for custom TTL to expire but not default TTL
|
99
|
-
sleep(custom_ttl + 0.1)
|
100
|
-
|
101
|
-
# Custom TTL cache should refresh
|
102
|
-
custom_block_called = false
|
103
|
-
custom_result = custom_ttl_cache.get(topic) do
|
104
|
-
custom_block_called = true
|
105
|
-
higher_partition_count
|
106
|
-
end
|
107
|
-
|
108
|
-
expect(custom_block_called).to be true
|
109
|
-
expect(custom_result).to eq(higher_partition_count)
|
110
|
-
|
111
|
-
# Default TTL cache should not refresh yet
|
112
|
-
default_block_called = false
|
113
|
-
default_result = cache.get(topic) do
|
114
|
-
default_block_called = true
|
115
|
-
higher_partition_count
|
116
|
-
end
|
117
|
-
|
118
|
-
expect(default_block_called).to be false
|
119
|
-
expect(default_result).to eq(partition_count)
|
120
|
-
end
|
121
|
-
end
|
122
|
-
|
123
|
-
context "when new value is obtained" do
|
124
|
-
before do
|
125
|
-
# Seed the cache with initial value
|
126
|
-
cache.get(topic) { partition_count }
|
127
|
-
end
|
128
|
-
|
129
|
-
it "updates cache when new value is higher than cached value" do
|
130
|
-
# Wait for TTL to expire
|
131
|
-
sleep(default_ttl + 0.1)
|
132
|
-
|
133
|
-
# Get higher value
|
134
|
-
result = cache.get(topic) { higher_partition_count }
|
135
|
-
expect(result).to eq(higher_partition_count)
|
136
|
-
|
137
|
-
# Verify it was cached
|
138
|
-
second_result = cache.get(topic) { fail "Should not be called" }
|
139
|
-
expect(second_result).to eq(higher_partition_count)
|
140
|
-
end
|
141
|
-
|
142
|
-
it "preserves higher cached value when new value is lower" do
|
143
|
-
# First update to higher value
|
144
|
-
sleep(default_ttl + 0.1)
|
145
|
-
cache.get(topic) { higher_partition_count }
|
146
|
-
|
147
|
-
# Then try to update to lower value
|
148
|
-
sleep(default_ttl + 0.1)
|
149
|
-
result = cache.get(topic) { lower_partition_count }
|
150
|
-
|
151
|
-
expect(result).to eq(higher_partition_count)
|
152
|
-
|
153
|
-
# and subsequent gets should return the previously cached higher value
|
154
|
-
second_result = cache.get(topic) { fail "Should not be called" }
|
155
|
-
expect(second_result).to eq(higher_partition_count)
|
156
|
-
end
|
157
|
-
|
158
|
-
it "handles multiple topics independently" do
|
159
|
-
# Set up both topics with different values
|
160
|
-
cache.get(topic) { partition_count }
|
161
|
-
cache.get(topic2) { higher_partition_count }
|
162
|
-
|
163
|
-
# Wait for TTL to expire
|
164
|
-
sleep(default_ttl + 0.1)
|
165
|
-
|
166
|
-
# Update first topic
|
167
|
-
first_result = cache.get(topic) { even_higher_partition_count }
|
168
|
-
expect(first_result).to eq(even_higher_partition_count)
|
169
|
-
|
170
|
-
# Update second topic independently
|
171
|
-
second_updated = higher_partition_count + 3
|
172
|
-
second_result = cache.get(topic2) { second_updated }
|
173
|
-
expect(second_result).to eq(second_updated)
|
174
|
-
|
175
|
-
# Both topics should have their updated values
|
176
|
-
expect(cache.get(topic) { fail "Should not be called" }).to eq(even_higher_partition_count)
|
177
|
-
expect(cache.get(topic2) { fail "Should not be called" }).to eq(second_updated)
|
178
|
-
end
|
179
|
-
end
|
180
|
-
end
|
181
|
-
|
182
|
-
describe "#set" do
|
183
|
-
context "when cache is empty" do
|
184
|
-
it "adds a new entry to the cache" do
|
185
|
-
cache.set(topic, partition_count)
|
186
|
-
|
187
|
-
# Verify through get
|
188
|
-
result = cache.get(topic) { fail "Should not be called" }
|
189
|
-
expect(result).to eq(partition_count)
|
190
|
-
end
|
191
|
-
end
|
192
|
-
|
193
|
-
context "when cache already has a value" do
|
194
|
-
before do
|
195
|
-
cache.set(topic, partition_count)
|
196
|
-
end
|
197
|
-
|
198
|
-
it "updates cache when new value is higher" do
|
199
|
-
cache.set(topic, higher_partition_count)
|
200
|
-
|
201
|
-
result = cache.get(topic) { fail "Should not be called" }
|
202
|
-
expect(result).to eq(higher_partition_count)
|
203
|
-
end
|
204
|
-
|
205
|
-
it "keeps original value when new value is lower" do
|
206
|
-
cache.set(topic, lower_partition_count)
|
207
|
-
|
208
|
-
result = cache.get(topic) { fail "Should not be called" }
|
209
|
-
expect(result).to eq(partition_count)
|
210
|
-
end
|
211
|
-
|
212
|
-
it "updates the timestamp even when keeping original value" do
|
213
|
-
# Set initial value
|
214
|
-
cache.set(topic, partition_count)
|
215
|
-
|
216
|
-
# Wait until close to TTL expiring
|
217
|
-
sleep(default_ttl - 0.2)
|
218
|
-
|
219
|
-
# Set lower value (should update timestamp but not value)
|
220
|
-
cache.set(topic, lower_partition_count)
|
221
|
-
|
222
|
-
# Wait a bit more, but still under the full TTL if timestamp was refreshed
|
223
|
-
sleep(0.3)
|
224
|
-
|
225
|
-
# Should still be valid due to timestamp refresh
|
226
|
-
result = cache.get(topic) { fail "Should not be called" }
|
227
|
-
expect(result).to eq(partition_count)
|
228
|
-
end
|
229
|
-
end
|
230
|
-
|
231
|
-
context "with concurrent access" do
|
232
|
-
it "correctly handles simultaneous updates to the same topic" do
|
233
|
-
# This test focuses on the final value after concurrent updates
|
234
|
-
threads = []
|
235
|
-
|
236
|
-
# Create 5 threads that all try to update the same topic with increasing values
|
237
|
-
5.times do |i|
|
238
|
-
threads << Thread.new do
|
239
|
-
value = 10 + i # Start at 10 to ensure all are higher than initial value
|
240
|
-
cache.set(topic, value)
|
241
|
-
end
|
242
|
-
end
|
243
|
-
|
244
|
-
# Wait for all threads to complete
|
245
|
-
threads.each(&:join)
|
246
|
-
|
247
|
-
# The highest value (14) should be stored and accessible through get
|
248
|
-
result = cache.get(topic) { fail "Should not be called" }
|
249
|
-
expect(result).to eq(14)
|
250
|
-
end
|
251
|
-
end
|
252
|
-
end
|
253
|
-
|
254
|
-
describe "TTL behavior" do
|
255
|
-
it "treats entries as expired when they exceed TTL" do
|
256
|
-
# Set initial value
|
257
|
-
cache.get(topic) { partition_count }
|
258
|
-
|
259
|
-
# Wait just under TTL
|
260
|
-
sleep(default_ttl - 0.2)
|
261
|
-
|
262
|
-
# Value should still be cached (block should not be called)
|
263
|
-
result = cache.get(topic) { fail "Should not be called when cache is valid" }
|
264
|
-
expect(result).to eq(partition_count)
|
265
|
-
|
266
|
-
# Now wait to exceed TTL
|
267
|
-
sleep(0.2) # Total sleep is now default_ttl + 0.1
|
268
|
-
|
269
|
-
# Cache should be expired, block should be called
|
270
|
-
block_called = false
|
271
|
-
new_value = partition_count + 3
|
272
|
-
result = cache.get(topic) do
|
273
|
-
block_called = true
|
274
|
-
new_value
|
275
|
-
end
|
276
|
-
|
277
|
-
expect(block_called).to be true
|
278
|
-
expect(result).to eq(new_value)
|
279
|
-
end
|
280
|
-
end
|
281
|
-
|
282
|
-
describe "comprehensive scenarios" do
|
283
|
-
it "handles a full lifecycle of cache operations" do
|
284
|
-
# 1. Initial cache miss, fetch and store
|
285
|
-
result1 = cache.get(topic) { partition_count }
|
286
|
-
expect(result1).to eq(partition_count)
|
287
|
-
|
288
|
-
# 2. Cache hit
|
289
|
-
result2 = cache.get(topic) { fail "Should not be called" }
|
290
|
-
expect(result2).to eq(partition_count)
|
291
|
-
|
292
|
-
# 3. Attempt to set lower value
|
293
|
-
cache.set(topic, lower_partition_count)
|
294
|
-
result3 = cache.get(topic) { fail "Should not be called" }
|
295
|
-
# Should still return the higher original value
|
296
|
-
expect(result3).to eq(partition_count)
|
297
|
-
|
298
|
-
# 4. Set higher value
|
299
|
-
cache.set(topic, higher_partition_count)
|
300
|
-
result4 = cache.get(topic) { fail "Should not be called" }
|
301
|
-
expect(result4).to eq(higher_partition_count)
|
302
|
-
|
303
|
-
# 5. TTL expires, new value provided is lower
|
304
|
-
sleep(default_ttl + 0.1)
|
305
|
-
result5 = cache.get(topic) { lower_partition_count }
|
306
|
-
# This returns the highest value
|
307
|
-
expect(result5).to eq(higher_partition_count)
|
308
|
-
|
309
|
-
# 6. But subsequent get should return the higher cached value
|
310
|
-
result6 = cache.get(topic) { fail "Should not be called" }
|
311
|
-
expect(result6).to eq(higher_partition_count)
|
312
|
-
|
313
|
-
# 7. Set new highest value directly
|
314
|
-
even_higher = higher_partition_count + 5
|
315
|
-
cache.set(topic, even_higher)
|
316
|
-
result7 = cache.get(topic) { fail "Should not be called" }
|
317
|
-
expect(result7).to eq(even_higher)
|
318
|
-
end
|
319
|
-
|
320
|
-
it "handles multiple topics with different TTLs correctly" do
|
321
|
-
# Set up initial values
|
322
|
-
cache.get(topic) { partition_count }
|
323
|
-
custom_ttl_cache.get(topic) { partition_count }
|
324
|
-
|
325
|
-
# Wait past custom TTL but not default TTL
|
326
|
-
sleep(custom_ttl + 0.1)
|
327
|
-
|
328
|
-
# Default cache should NOT refresh (still within default TTL)
|
329
|
-
default_result = cache.get(topic) { fail "Should not be called for default cache" }
|
330
|
-
# Original value should be maintained
|
331
|
-
expect(default_result).to eq(partition_count)
|
332
|
-
|
333
|
-
# Custom TTL cache SHOULD refresh (past custom TTL)
|
334
|
-
custom_cache_value = partition_count + 8
|
335
|
-
custom_block_called = false
|
336
|
-
custom_result = custom_ttl_cache.get(topic) do
|
337
|
-
custom_block_called = true
|
338
|
-
custom_cache_value
|
339
|
-
end
|
340
|
-
|
341
|
-
expect(custom_block_called).to be true
|
342
|
-
expect(custom_result).to eq(custom_cache_value)
|
343
|
-
|
344
|
-
# Now wait past default TTL
|
345
|
-
sleep(default_ttl - custom_ttl + 0.1)
|
346
|
-
|
347
|
-
# Now default cache should also refresh
|
348
|
-
default_block_called = false
|
349
|
-
new_default_value = partition_count + 10
|
350
|
-
new_default_result = cache.get(topic) do
|
351
|
-
default_block_called = true
|
352
|
-
new_default_value
|
353
|
-
end
|
354
|
-
|
355
|
-
expect(default_block_called).to be true
|
356
|
-
expect(new_default_result).to eq(new_default_value)
|
357
|
-
end
|
358
|
-
end
|
359
|
-
end
|