rdkafka 0.22.0.beta1-arm64-darwin → 0.22.1-arm64-darwin
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci_linux_x86_64_musl.yml +2 -2
- data/.github/workflows/push_linux_x86_64_musl.yml +1 -1
- data/.github/workflows/push_ruby.yml +1 -1
- data/.ruby-version +1 -1
- data/CHANGELOG.md +7 -2
- data/README.md +1 -1
- data/ext/librdkafka.dylib +0 -0
- data/lib/rdkafka/bindings.rb +6 -7
- data/lib/rdkafka/producer.rb +11 -6
- data/lib/rdkafka/version.rb +1 -1
- data/rdkafka.gemspec +0 -2
- data/spec/rdkafka/admin_spec.rb +202 -1
- data/spec/rdkafka/bindings_spec.rb +0 -24
- data/spec/rdkafka/producer_spec.rb +295 -2
- metadata +2 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8ed57e21a97641bf3fac375a479d47a28c8b93d325c25a1b579492ee2b7bb64a
|
4
|
+
data.tar.gz: 2d052d38671bca2d322efc6bf62512a72b0055a964c178bb65fff0bf3848bfc2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c7d10f59d98989b1448dd5feb1d70ed5cd3743a78076d1b8c56f14bfec23f53bc04a183acca1f2cffd36fd3a0dcc6eaa4697de1787a282f8a08cef26a8314a17
|
7
|
+
data.tar.gz: d921cd65eb367d4b65c1155611dcfd03d73f8cae4d5d9626c3eb3338cc9266d918e592fc3c4fecbffb5851295f1bdb5ef47b59886b584bc295c66f4f57f0b9f3
|
@@ -24,7 +24,7 @@ jobs:
|
|
24
24
|
timeout-minutes: 30
|
25
25
|
runs-on: ubuntu-latest
|
26
26
|
container:
|
27
|
-
image: alpine:3.22@sha256:
|
27
|
+
image: alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
28
28
|
steps:
|
29
29
|
- name: Install dependencies
|
30
30
|
run: |
|
@@ -123,7 +123,7 @@ jobs:
|
|
123
123
|
timeout-minutes: 45
|
124
124
|
runs-on: ubuntu-latest
|
125
125
|
container:
|
126
|
-
image: alpine:3.22@sha256:
|
126
|
+
image: alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
127
127
|
steps:
|
128
128
|
- name: Install dependencies
|
129
129
|
run: |
|
@@ -15,7 +15,7 @@ jobs:
|
|
15
15
|
runs-on: ubuntu-latest
|
16
16
|
environment: deployment
|
17
17
|
container:
|
18
|
-
image: alpine:3.22@sha256:
|
18
|
+
image: alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
19
19
|
steps:
|
20
20
|
- name: Install dependencies
|
21
21
|
run: |
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.4.
|
1
|
+
3.4.5
|
data/CHANGELOG.md
CHANGED
@@ -1,21 +1,26 @@
|
|
1
1
|
# Rdkafka Changelog
|
2
2
|
|
3
|
-
## 0.22.
|
3
|
+
## 0.22.1 (2025-07-17)
|
4
|
+
- [Fix] Fix `Rakefile` being available in the precompiled versions causing build failures.
|
5
|
+
|
6
|
+
## 0.22.0 (2025-07-17)
|
4
7
|
- **[Feature]** Add precompiled `x86_64-linux-gnu` setup.
|
5
8
|
- **[Feature]** Add precompiled `x86_64-linux-musl` setup.
|
6
9
|
- **[Feature]** Add precompiled `macos_arm64` setup.
|
7
10
|
- [Fix] Fix a case where using empty key on the `musl` architecture would cause a segfault.
|
11
|
+
- [Fix] Fix for null pointer reference bypass on empty string being too wide causing segfault.
|
8
12
|
- [Enhancement] Allow for producing to non-existing topics with `key` and `partition_key` present.
|
9
13
|
- [Enhancement] Replace TTL-based partition count cache with a global cache that reuses `librdkafka` statistics data when possible.
|
10
14
|
- [Enhancement] Support producing and consuming of headers with mulitple values (KIP-82).
|
11
15
|
- [Enhancement] Allow native Kafka customization poll time.
|
12
16
|
- [Enhancement] Roll out experimental jruby support.
|
13
17
|
- [Enhancement] Run all specs on each of the platforms with and without precompilation.
|
18
|
+
- [Enhancement] Support transactional id in the ACL API.
|
14
19
|
- [Fix] Fix issue where post-closed producer C topics refs would not be cleaned.
|
15
20
|
- [Fix] Fiber causes Segmentation Fault.
|
16
21
|
- [Change] Move to trusted-publishers and remove signing since no longer needed.
|
17
22
|
|
18
|
-
**Note**: Precompiled extensions are a new feature in this release. While they significantly improve installation speed and reduce build dependencies, they should be thoroughly tested in your staging environment before deploying to production. If you encounter any issues with precompiled extensions, you can fall back to building from sources.
|
23
|
+
**Note**: Precompiled extensions are a new feature in this release. While they significantly improve installation speed and reduce build dependencies, they should be thoroughly tested in your staging environment before deploying to production. If you encounter any issues with precompiled extensions, you can fall back to building from sources. For more information, see the [Native Extensions documentation](https://karafka.io/docs/Development-Native-Extensions/).
|
19
24
|
|
20
25
|
## 0.21.0 (2025-02-13)
|
21
26
|
- [Enhancement] Bump librdkafka to `2.8.0`
|
data/README.md
CHANGED
@@ -163,7 +163,7 @@ bundle exec rake produce_messages
|
|
163
163
|
|
164
164
|
| rdkafka-ruby | librdkafka | patches |
|
165
165
|
|-|-|-|
|
166
|
-
| 0.22.x (2025-
|
166
|
+
| 0.22.x (2025-07-17) | 2.8.0 (2025-01-07) | yes |
|
167
167
|
| 0.21.x (2025-02-13) | 2.8.0 (2025-01-07) | yes |
|
168
168
|
| 0.20.0 (2025-01-07) | 2.6.1 (2024-11-18) | yes |
|
169
169
|
| 0.19.0 (2024-10-01) | 2.5.3 (2024-09-02) | yes |
|
data/ext/librdkafka.dylib
CHANGED
Binary file
|
data/lib/rdkafka/bindings.rb
CHANGED
@@ -384,18 +384,16 @@ module Rdkafka
|
|
384
384
|
hsh[name] = method_name
|
385
385
|
end
|
386
386
|
|
387
|
-
def self.partitioner(str, partition_count,
|
387
|
+
def self.partitioner(topic_ptr, str, partition_count, partitioner = "consistent_random")
|
388
388
|
# Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
|
389
389
|
return -1 unless partition_count&.nonzero?
|
390
|
-
# musl architecture crashes with empty string
|
391
|
-
return 0 if str.empty?
|
392
390
|
|
393
|
-
str_ptr = FFI::MemoryPointer.from_string(str)
|
394
|
-
method_name = PARTITIONERS.fetch(
|
395
|
-
raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{
|
391
|
+
str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
|
392
|
+
method_name = PARTITIONERS.fetch(partitioner) do
|
393
|
+
raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner}")
|
396
394
|
end
|
397
395
|
|
398
|
-
public_send(method_name,
|
396
|
+
public_send(method_name, topic_ptr, str_ptr, str.size, partition_count, nil, nil)
|
399
397
|
end
|
400
398
|
|
401
399
|
# Create Topics
|
@@ -513,6 +511,7 @@ module Rdkafka
|
|
513
511
|
RD_KAFKA_RESOURCE_TOPIC = 2
|
514
512
|
RD_KAFKA_RESOURCE_GROUP = 3
|
515
513
|
RD_KAFKA_RESOURCE_BROKER = 4
|
514
|
+
RD_KAFKA_RESOURCE_TRANSACTIONAL_ID = 5
|
516
515
|
|
517
516
|
# rd_kafka_ResourcePatternType_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L7320
|
518
517
|
|
data/lib/rdkafka/producer.rb
CHANGED
@@ -51,13 +51,13 @@ module Rdkafka
|
|
51
51
|
|
52
52
|
# @private
|
53
53
|
# @param native_kafka [NativeKafka]
|
54
|
-
# @param
|
54
|
+
# @param partitioner [String, nil] name of the partitioner we want to use or nil to use
|
55
55
|
# the "consistent_random" default
|
56
|
-
def initialize(native_kafka,
|
56
|
+
def initialize(native_kafka, partitioner)
|
57
57
|
@topics_refs_map = {}
|
58
58
|
@topics_configs = {}
|
59
59
|
@native_kafka = native_kafka
|
60
|
-
@
|
60
|
+
@partitioner = partitioner || "consistent_random"
|
61
61
|
|
62
62
|
# Makes sure, that native kafka gets closed before it gets GCed by Ruby
|
63
63
|
ObjectSpace.define_finalizer(self, native_kafka.finalizer)
|
@@ -275,7 +275,8 @@ module Rdkafka
|
|
275
275
|
timestamp: nil,
|
276
276
|
headers: nil,
|
277
277
|
label: nil,
|
278
|
-
topic_config: EMPTY_HASH
|
278
|
+
topic_config: EMPTY_HASH,
|
279
|
+
partitioner: @partitioner
|
279
280
|
)
|
280
281
|
closed_producer_check(__method__)
|
281
282
|
|
@@ -307,10 +308,14 @@ module Rdkafka
|
|
307
308
|
|
308
309
|
# Check if there are no overrides for the partitioner and use the default one only when
|
309
310
|
# no per-topic is present.
|
310
|
-
|
311
|
+
selected_partitioner = @topics_configs.dig(topic, topic_config_hash, :partitioner) || partitioner
|
311
312
|
|
312
313
|
# If the topic is not present, set to -1
|
313
|
-
partition = Rdkafka::Bindings.partitioner(
|
314
|
+
partition = Rdkafka::Bindings.partitioner(
|
315
|
+
topic_ref,
|
316
|
+
partition_key,
|
317
|
+
partition_count,
|
318
|
+
selected_partitioner) if partition_count.positive?
|
314
319
|
end
|
315
320
|
|
316
321
|
# If partition is nil, use -1 to let librdafka set the partition randomly or
|
data/lib/rdkafka/version.rb
CHANGED
data/rdkafka.gemspec
CHANGED
@@ -9,13 +9,11 @@ Gem::Specification.new do |gem|
|
|
9
9
|
gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.7+."
|
10
10
|
gem.license = 'MIT'
|
11
11
|
|
12
|
-
gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
|
13
12
|
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
|
14
13
|
gem.name = 'rdkafka'
|
15
14
|
gem.require_paths = ['lib']
|
16
15
|
gem.version = Rdkafka::VERSION
|
17
16
|
gem.required_ruby_version = '>= 3.1'
|
18
|
-
gem.extensions = %w(ext/Rakefile)
|
19
17
|
|
20
18
|
if ENV['RUBY_PLATFORM']
|
21
19
|
gem.platform = ENV['RUBY_PLATFORM']
|
data/spec/rdkafka/admin_spec.rb
CHANGED
@@ -513,7 +513,7 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
513
513
|
end
|
514
514
|
end
|
515
515
|
|
516
|
-
describe "#ACL tests" do
|
516
|
+
describe "#ACL tests for topic resource" do
|
517
517
|
let(:non_existing_resource_name) {"non-existing-topic"}
|
518
518
|
before do
|
519
519
|
#create topic for testing acl
|
@@ -615,6 +615,207 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
|
|
615
615
|
end
|
616
616
|
end
|
617
617
|
|
618
|
+
describe "#ACL tests for transactional_id" do
|
619
|
+
let(:transactional_id_resource_name) {"test-transactional-id"}
|
620
|
+
let(:non_existing_transactional_id) {"non-existing-transactional-id"}
|
621
|
+
let(:transactional_id_resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID }
|
622
|
+
let(:transactional_id_resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL }
|
623
|
+
let(:transactional_id_principal) { "User:test-user" }
|
624
|
+
let(:transactional_id_host) { "*" }
|
625
|
+
let(:transactional_id_operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE }
|
626
|
+
let(:transactional_id_permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW }
|
627
|
+
|
628
|
+
after do
|
629
|
+
# Clean up any ACLs that might have been created during tests
|
630
|
+
begin
|
631
|
+
delete_acl_handle = admin.delete_acl(
|
632
|
+
resource_type: transactional_id_resource_type,
|
633
|
+
resource_name: nil,
|
634
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
635
|
+
principal: transactional_id_principal,
|
636
|
+
host: transactional_id_host,
|
637
|
+
operation: transactional_id_operation,
|
638
|
+
permission_type: transactional_id_permission_type
|
639
|
+
)
|
640
|
+
delete_acl_handle.wait(max_wait_timeout: 15.0)
|
641
|
+
rescue
|
642
|
+
# Ignore cleanup errors
|
643
|
+
end
|
644
|
+
end
|
645
|
+
|
646
|
+
describe "#create_acl" do
|
647
|
+
it "creates acl for a transactional_id" do
|
648
|
+
create_acl_handle = admin.create_acl(
|
649
|
+
resource_type: transactional_id_resource_type,
|
650
|
+
resource_name: transactional_id_resource_name,
|
651
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
652
|
+
principal: transactional_id_principal,
|
653
|
+
host: transactional_id_host,
|
654
|
+
operation: transactional_id_operation,
|
655
|
+
permission_type: transactional_id_permission_type
|
656
|
+
)
|
657
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
658
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
659
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
660
|
+
end
|
661
|
+
|
662
|
+
it "creates acl for a non-existing transactional_id" do
|
663
|
+
# ACL creation for transactional_ids that don't exist will still get created successfully
|
664
|
+
create_acl_handle = admin.create_acl(
|
665
|
+
resource_type: transactional_id_resource_type,
|
666
|
+
resource_name: non_existing_transactional_id,
|
667
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
668
|
+
principal: transactional_id_principal,
|
669
|
+
host: transactional_id_host,
|
670
|
+
operation: transactional_id_operation,
|
671
|
+
permission_type: transactional_id_permission_type
|
672
|
+
)
|
673
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
674
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
675
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
676
|
+
|
677
|
+
# Clean up the ACL that was created for the non-existing transactional_id
|
678
|
+
delete_acl_handle = admin.delete_acl(
|
679
|
+
resource_type: transactional_id_resource_type,
|
680
|
+
resource_name: non_existing_transactional_id,
|
681
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
682
|
+
principal: transactional_id_principal,
|
683
|
+
host: transactional_id_host,
|
684
|
+
operation: transactional_id_operation,
|
685
|
+
permission_type: transactional_id_permission_type
|
686
|
+
)
|
687
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
688
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
689
|
+
expect(delete_acl_report.deleted_acls.size).to eq(1)
|
690
|
+
end
|
691
|
+
end
|
692
|
+
|
693
|
+
describe "#describe_acl" do
|
694
|
+
it "describes acl of a transactional_id that does not exist" do
|
695
|
+
describe_acl_handle = admin.describe_acl(
|
696
|
+
resource_type: transactional_id_resource_type,
|
697
|
+
resource_name: non_existing_transactional_id,
|
698
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
699
|
+
principal: transactional_id_principal,
|
700
|
+
host: transactional_id_host,
|
701
|
+
operation: transactional_id_operation,
|
702
|
+
permission_type: transactional_id_permission_type
|
703
|
+
)
|
704
|
+
describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
|
705
|
+
expect(describe_acl_handle[:response]).to eq(0)
|
706
|
+
expect(describe_acl_report.acls.size).to eq(0)
|
707
|
+
end
|
708
|
+
|
709
|
+
it "creates acls and describes the newly created transactional_id acls" do
|
710
|
+
# Create first ACL
|
711
|
+
create_acl_handle = admin.create_acl(
|
712
|
+
resource_type: transactional_id_resource_type,
|
713
|
+
resource_name: "test_transactional_id_1",
|
714
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
715
|
+
principal: transactional_id_principal,
|
716
|
+
host: transactional_id_host,
|
717
|
+
operation: transactional_id_operation,
|
718
|
+
permission_type: transactional_id_permission_type
|
719
|
+
)
|
720
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
721
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
722
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
723
|
+
|
724
|
+
# Create second ACL
|
725
|
+
create_acl_handle = admin.create_acl(
|
726
|
+
resource_type: transactional_id_resource_type,
|
727
|
+
resource_name: "test_transactional_id_2",
|
728
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
729
|
+
principal: transactional_id_principal,
|
730
|
+
host: transactional_id_host,
|
731
|
+
operation: transactional_id_operation,
|
732
|
+
permission_type: transactional_id_permission_type
|
733
|
+
)
|
734
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
735
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
736
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
737
|
+
|
738
|
+
# Since we create and immediately check, this is slow on loaded CIs, hence we wait
|
739
|
+
sleep(2)
|
740
|
+
|
741
|
+
# Describe ACLs - filter by transactional_id resource type
|
742
|
+
describe_acl_handle = admin.describe_acl(
|
743
|
+
resource_type: transactional_id_resource_type,
|
744
|
+
resource_name: nil,
|
745
|
+
resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY,
|
746
|
+
principal: transactional_id_principal,
|
747
|
+
host: transactional_id_host,
|
748
|
+
operation: transactional_id_operation,
|
749
|
+
permission_type: transactional_id_permission_type
|
750
|
+
)
|
751
|
+
describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
|
752
|
+
expect(describe_acl_handle[:response]).to eq(0)
|
753
|
+
expect(describe_acl_report.acls.length).to eq(2)
|
754
|
+
end
|
755
|
+
end
|
756
|
+
|
757
|
+
describe "#delete_acl" do
|
758
|
+
it "deletes acl of a transactional_id that does not exist" do
|
759
|
+
delete_acl_handle = admin.delete_acl(
|
760
|
+
resource_type: transactional_id_resource_type,
|
761
|
+
resource_name: non_existing_transactional_id,
|
762
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
763
|
+
principal: transactional_id_principal,
|
764
|
+
host: transactional_id_host,
|
765
|
+
operation: transactional_id_operation,
|
766
|
+
permission_type: transactional_id_permission_type
|
767
|
+
)
|
768
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
769
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
770
|
+
expect(delete_acl_report.deleted_acls.size).to eq(0)
|
771
|
+
end
|
772
|
+
|
773
|
+
it "creates transactional_id acls and deletes the newly created acls" do
|
774
|
+
# Create first ACL
|
775
|
+
create_acl_handle = admin.create_acl(
|
776
|
+
resource_type: transactional_id_resource_type,
|
777
|
+
resource_name: "test_transactional_id_1",
|
778
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
779
|
+
principal: transactional_id_principal,
|
780
|
+
host: transactional_id_host,
|
781
|
+
operation: transactional_id_operation,
|
782
|
+
permission_type: transactional_id_permission_type
|
783
|
+
)
|
784
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
785
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
786
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
787
|
+
|
788
|
+
# Create second ACL
|
789
|
+
create_acl_handle = admin.create_acl(
|
790
|
+
resource_type: transactional_id_resource_type,
|
791
|
+
resource_name: "test_transactional_id_2",
|
792
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
793
|
+
principal: transactional_id_principal,
|
794
|
+
host: transactional_id_host,
|
795
|
+
operation: transactional_id_operation,
|
796
|
+
permission_type: transactional_id_permission_type
|
797
|
+
)
|
798
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
799
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
800
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
801
|
+
|
802
|
+
# Delete ACLs - resource_name nil to delete all ACLs with any resource name and matching all other filters
|
803
|
+
delete_acl_handle = admin.delete_acl(
|
804
|
+
resource_type: transactional_id_resource_type,
|
805
|
+
resource_name: nil,
|
806
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
807
|
+
principal: transactional_id_principal,
|
808
|
+
host: transactional_id_host,
|
809
|
+
operation: transactional_id_operation,
|
810
|
+
permission_type: transactional_id_permission_type
|
811
|
+
)
|
812
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
813
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
814
|
+
expect(delete_acl_report.deleted_acls.length).to eq(2)
|
815
|
+
end
|
816
|
+
end
|
817
|
+
end
|
818
|
+
|
618
819
|
describe('Group tests') do
|
619
820
|
describe "#delete_group" do
|
620
821
|
describe("with an existing group") do
|
@@ -77,30 +77,6 @@ describe Rdkafka::Bindings do
|
|
77
77
|
end
|
78
78
|
end
|
79
79
|
|
80
|
-
describe "partitioner" do
|
81
|
-
let(:partition_key) { ('a'..'z').to_a.shuffle.take(15).join('') }
|
82
|
-
let(:partition_count) { rand(50) + 1 }
|
83
|
-
|
84
|
-
it "should return the same partition for a similar string and the same partition count" do
|
85
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
86
|
-
result_2 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
87
|
-
expect(result_1).to eq(result_2)
|
88
|
-
end
|
89
|
-
|
90
|
-
it "should match the old partitioner" do
|
91
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
92
|
-
result_2 = (Zlib.crc32(partition_key) % partition_count)
|
93
|
-
expect(result_1).to eq(result_2)
|
94
|
-
end
|
95
|
-
|
96
|
-
it "should return the partition calculated by the specified partitioner" do
|
97
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
|
98
|
-
ptr = FFI::MemoryPointer.from_string(partition_key)
|
99
|
-
result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
|
100
|
-
expect(result_1).to eq(result_2)
|
101
|
-
end
|
102
|
-
end
|
103
|
-
|
104
80
|
describe "stats callback" do
|
105
81
|
context "without a stats callback" do
|
106
82
|
it "should do nothing" do
|
@@ -340,7 +340,7 @@ describe Rdkafka::Producer do
|
|
340
340
|
)
|
341
341
|
end
|
342
342
|
|
343
|
-
expect(messages[0].partition).to
|
343
|
+
expect(messages[0].partition).to be >= 0
|
344
344
|
expect(messages[0].key).to eq 'a'
|
345
345
|
end
|
346
346
|
|
@@ -920,7 +920,6 @@ describe Rdkafka::Producer do
|
|
920
920
|
end
|
921
921
|
end
|
922
922
|
|
923
|
-
|
924
923
|
describe 'with active statistics callback' do
|
925
924
|
let(:producer) do
|
926
925
|
rdkafka_producer_config('statistics.interval.ms': 1_000).producer
|
@@ -1049,4 +1048,298 @@ describe Rdkafka::Producer do
|
|
1049
1048
|
end
|
1050
1049
|
end
|
1051
1050
|
end
|
1051
|
+
|
1052
|
+
let(:producer) { rdkafka_producer_config.producer }
|
1053
|
+
let(:all_partitioners) { %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random) }
|
1054
|
+
|
1055
|
+
describe "partitioner behavior through producer API" do
|
1056
|
+
context "testing all partitioners with same key" do
|
1057
|
+
it "should not return partition 0 for all partitioners" do
|
1058
|
+
test_key = "test-key-123"
|
1059
|
+
results = {}
|
1060
|
+
|
1061
|
+
all_partitioners.each do |partitioner|
|
1062
|
+
handle = producer.produce(
|
1063
|
+
topic: "partitioner_test_topic",
|
1064
|
+
payload: "test payload",
|
1065
|
+
partition_key: test_key,
|
1066
|
+
partitioner: partitioner
|
1067
|
+
)
|
1068
|
+
|
1069
|
+
report = handle.wait(max_wait_timeout: 5)
|
1070
|
+
results[partitioner] = report.partition
|
1071
|
+
end
|
1072
|
+
|
1073
|
+
# Should not all be the same partition (especially not all 0)
|
1074
|
+
unique_partitions = results.values.uniq
|
1075
|
+
expect(unique_partitions.size).to be > 1
|
1076
|
+
end
|
1077
|
+
end
|
1078
|
+
|
1079
|
+
context "empty string partition key" do
|
1080
|
+
it "should produce message with empty partition key without crashing and go to partition 0 for all partitioners" do
|
1081
|
+
all_partitioners.each do |partitioner|
|
1082
|
+
handle = producer.produce(
|
1083
|
+
topic: "partitioner_test_topic",
|
1084
|
+
payload: "test payload",
|
1085
|
+
key: "test-key",
|
1086
|
+
partition_key: "",
|
1087
|
+
partitioner: partitioner
|
1088
|
+
)
|
1089
|
+
|
1090
|
+
report = handle.wait(max_wait_timeout: 5)
|
1091
|
+
expect(report.partition).to be >= 0
|
1092
|
+
end
|
1093
|
+
end
|
1094
|
+
end
|
1095
|
+
|
1096
|
+
context "nil partition key" do
|
1097
|
+
it "should handle nil partition key gracefully" do
|
1098
|
+
handle = producer.produce(
|
1099
|
+
topic: "partitioner_test_topic",
|
1100
|
+
payload: "test payload",
|
1101
|
+
key: "test-key",
|
1102
|
+
partition_key: nil
|
1103
|
+
)
|
1104
|
+
|
1105
|
+
report = handle.wait(max_wait_timeout: 5)
|
1106
|
+
expect(report.partition).to be >= 0
|
1107
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1108
|
+
end
|
1109
|
+
end
|
1110
|
+
|
1111
|
+
context "various key types and lengths with different partitioners" do
|
1112
|
+
it "should handle very short keys with all partitioners" do
|
1113
|
+
all_partitioners.each do |partitioner|
|
1114
|
+
handle = producer.produce(
|
1115
|
+
topic: "partitioner_test_topic",
|
1116
|
+
payload: "test payload",
|
1117
|
+
partition_key: "a",
|
1118
|
+
partitioner: partitioner
|
1119
|
+
)
|
1120
|
+
|
1121
|
+
report = handle.wait(max_wait_timeout: 5)
|
1122
|
+
expect(report.partition).to be >= 0
|
1123
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1124
|
+
end
|
1125
|
+
end
|
1126
|
+
|
1127
|
+
it "should handle very long keys with all partitioners" do
|
1128
|
+
long_key = "a" * 1000
|
1129
|
+
|
1130
|
+
all_partitioners.each do |partitioner|
|
1131
|
+
handle = producer.produce(
|
1132
|
+
topic: "partitioner_test_topic",
|
1133
|
+
payload: "test payload",
|
1134
|
+
partition_key: long_key,
|
1135
|
+
partitioner: partitioner
|
1136
|
+
)
|
1137
|
+
|
1138
|
+
report = handle.wait(max_wait_timeout: 5)
|
1139
|
+
expect(report.partition).to be >= 0
|
1140
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1141
|
+
end
|
1142
|
+
end
|
1143
|
+
|
1144
|
+
it "should handle unicode keys with all partitioners" do
|
1145
|
+
unicode_key = "测试键值🚀"
|
1146
|
+
|
1147
|
+
all_partitioners.each do |partitioner|
|
1148
|
+
handle = producer.produce(
|
1149
|
+
topic: "partitioner_test_topic",
|
1150
|
+
payload: "test payload",
|
1151
|
+
partition_key: unicode_key,
|
1152
|
+
partitioner: partitioner
|
1153
|
+
)
|
1154
|
+
|
1155
|
+
report = handle.wait(max_wait_timeout: 5)
|
1156
|
+
expect(report.partition).to be >= 0
|
1157
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1158
|
+
end
|
1159
|
+
end
|
1160
|
+
end
|
1161
|
+
|
1162
|
+
context "consistency testing for deterministic partitioners" do
|
1163
|
+
%w(consistent murmur2 fnv1a).each do |partitioner|
|
1164
|
+
it "should consistently route same partition key to same partition with #{partitioner}" do
|
1165
|
+
partition_key = "consistent-test-key"
|
1166
|
+
|
1167
|
+
# Produce multiple messages with same partition key
|
1168
|
+
reports = 5.times.map do
|
1169
|
+
handle = producer.produce(
|
1170
|
+
topic: "partitioner_test_topic",
|
1171
|
+
payload: "test payload #{Time.now.to_f}",
|
1172
|
+
partition_key: partition_key,
|
1173
|
+
partitioner: partitioner
|
1174
|
+
)
|
1175
|
+
handle.wait(max_wait_timeout: 5)
|
1176
|
+
end
|
1177
|
+
|
1178
|
+
# All should go to same partition
|
1179
|
+
partitions = reports.map(&:partition).uniq
|
1180
|
+
expect(partitions.size).to eq(1)
|
1181
|
+
end
|
1182
|
+
end
|
1183
|
+
end
|
1184
|
+
|
1185
|
+
context "randomness testing for random partitioners" do
|
1186
|
+
%w(random consistent_random murmur2_random fnv1a_random).each do |partitioner|
|
1187
|
+
it "should potentially distribute across partitions with #{partitioner}" do
|
1188
|
+
# Note: random partitioners might still return same value by chance
|
1189
|
+
partition_key = "random-test-key"
|
1190
|
+
|
1191
|
+
reports = 10.times.map do
|
1192
|
+
handle = producer.produce(
|
1193
|
+
topic: "partitioner_test_topic",
|
1194
|
+
payload: "test payload #{Time.now.to_f}",
|
1195
|
+
partition_key: partition_key,
|
1196
|
+
partitioner: partitioner
|
1197
|
+
)
|
1198
|
+
handle.wait(max_wait_timeout: 5)
|
1199
|
+
end
|
1200
|
+
|
1201
|
+
partitions = reports.map(&:partition)
|
1202
|
+
|
1203
|
+
# Just ensure they're valid partitions
|
1204
|
+
partitions.each do |partition|
|
1205
|
+
expect(partition).to be >= 0
|
1206
|
+
expect(partition).to be < producer.partition_count("partitioner_test_topic")
|
1207
|
+
end
|
1208
|
+
end
|
1209
|
+
end
|
1210
|
+
end
|
1211
|
+
|
1212
|
+
context "comparing different partitioners with same key" do
|
1213
|
+
it "should route different partition keys to potentially different partitions" do
|
1214
|
+
keys = ["key1", "key2", "key3", "key4", "key5"]
|
1215
|
+
|
1216
|
+
all_partitioners.each do |partitioner|
|
1217
|
+
reports = keys.map do |key|
|
1218
|
+
handle = producer.produce(
|
1219
|
+
topic: "partitioner_test_topic",
|
1220
|
+
payload: "test payload",
|
1221
|
+
partition_key: key,
|
1222
|
+
partitioner: partitioner
|
1223
|
+
)
|
1224
|
+
handle.wait(max_wait_timeout: 5)
|
1225
|
+
end
|
1226
|
+
|
1227
|
+
partitions = reports.map(&:partition).uniq
|
1228
|
+
|
1229
|
+
# Should distribute across multiple partitions for most partitioners
|
1230
|
+
# (though some might hash all keys to same partition by chance)
|
1231
|
+
expect(partitions.all? { |p| p >= 0 && p < producer.partition_count("partitioner_test_topic") }).to be true
|
1232
|
+
end
|
1233
|
+
end
|
1234
|
+
end
|
1235
|
+
|
1236
|
+
context "partition key vs regular key behavior" do
|
1237
|
+
it "should use partition key for partitioning when both key and partition_key are provided" do
|
1238
|
+
# Use keys that would hash to different partitions
|
1239
|
+
regular_key = "regular-key-123"
|
1240
|
+
partition_key = "partition-key-456"
|
1241
|
+
|
1242
|
+
# Message with both keys
|
1243
|
+
handle1 = producer.produce(
|
1244
|
+
topic: "partitioner_test_topic",
|
1245
|
+
payload: "test payload 1",
|
1246
|
+
key: regular_key,
|
1247
|
+
partition_key: partition_key
|
1248
|
+
)
|
1249
|
+
|
1250
|
+
# Message with only partition key (should go to same partition)
|
1251
|
+
handle2 = producer.produce(
|
1252
|
+
topic: "partitioner_test_topic",
|
1253
|
+
payload: "test payload 2",
|
1254
|
+
partition_key: partition_key
|
1255
|
+
)
|
1256
|
+
|
1257
|
+
# Message with only regular key (should go to different partition)
|
1258
|
+
handle3 = producer.produce(
|
1259
|
+
topic: "partitioner_test_topic",
|
1260
|
+
payload: "test payload 3",
|
1261
|
+
key: regular_key
|
1262
|
+
)
|
1263
|
+
|
1264
|
+
report1 = handle1.wait(max_wait_timeout: 5)
|
1265
|
+
report2 = handle2.wait(max_wait_timeout: 5)
|
1266
|
+
report3 = handle3.wait(max_wait_timeout: 5)
|
1267
|
+
|
1268
|
+
# Messages 1 and 2 should go to same partition (both use partition_key)
|
1269
|
+
expect(report1.partition).to eq(report2.partition)
|
1270
|
+
|
1271
|
+
# Message 3 should potentially go to different partition (uses regular key)
|
1272
|
+
expect(report3.partition).not_to eq(report1.partition)
|
1273
|
+
end
|
1274
|
+
end
|
1275
|
+
|
1276
|
+
context "edge case combinations with different partitioners" do
|
1277
|
+
it "should handle nil partition key with all partitioners" do
|
1278
|
+
all_partitioners.each do |partitioner|
|
1279
|
+
handle = producer.produce(
|
1280
|
+
topic: "partitioner_test_topic",
|
1281
|
+
payload: "test payload",
|
1282
|
+
key: "test-key",
|
1283
|
+
partition_key: nil,
|
1284
|
+
partitioner: partitioner
|
1285
|
+
)
|
1286
|
+
|
1287
|
+
report = handle.wait(max_wait_timeout: 5)
|
1288
|
+
expect(report.partition).to be >= 0
|
1289
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1290
|
+
end
|
1291
|
+
end
|
1292
|
+
|
1293
|
+
it "should handle whitespace-only partition key with all partitioners" do
|
1294
|
+
all_partitioners.each do |partitioner|
|
1295
|
+
handle = producer.produce(
|
1296
|
+
topic: "partitioner_test_topic",
|
1297
|
+
payload: "test payload",
|
1298
|
+
partition_key: " ",
|
1299
|
+
partitioner: partitioner
|
1300
|
+
)
|
1301
|
+
|
1302
|
+
report = handle.wait(max_wait_timeout: 5)
|
1303
|
+
expect(report.partition).to be >= 0
|
1304
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1305
|
+
end
|
1306
|
+
end
|
1307
|
+
|
1308
|
+
it "should handle newline characters in partition key with all partitioners" do
|
1309
|
+
all_partitioners.each do |partitioner|
|
1310
|
+
handle = producer.produce(
|
1311
|
+
topic: "partitioner_test_topic",
|
1312
|
+
payload: "test payload",
|
1313
|
+
partition_key: "key\nwith\nnewlines",
|
1314
|
+
partitioner: partitioner
|
1315
|
+
)
|
1316
|
+
|
1317
|
+
report = handle.wait(max_wait_timeout: 5)
|
1318
|
+
expect(report.partition).to be >= 0
|
1319
|
+
expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
|
1320
|
+
end
|
1321
|
+
end
|
1322
|
+
end
|
1323
|
+
|
1324
|
+
context "debugging partitioner issues" do
|
1325
|
+
it "should show if all partitioners return 0 (indicating a problem)" do
|
1326
|
+
test_key = "debug-test-key"
|
1327
|
+
zero_count = 0
|
1328
|
+
|
1329
|
+
all_partitioners.each do |partitioner|
|
1330
|
+
handle = producer.produce(
|
1331
|
+
topic: "partitioner_test_topic",
|
1332
|
+
payload: "debug payload",
|
1333
|
+
partition_key: test_key,
|
1334
|
+
partitioner: partitioner
|
1335
|
+
)
|
1336
|
+
|
1337
|
+
report = handle.wait(max_wait_timeout: 5)
|
1338
|
+
zero_count += 1 if report.partition == 0
|
1339
|
+
end
|
1340
|
+
|
1341
|
+
expect(zero_count).to be < all_partitioners.size
|
1342
|
+
end
|
1343
|
+
end
|
1344
|
+
end
|
1052
1345
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rdkafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.22.
|
4
|
+
version: 0.22.1
|
5
5
|
platform: arm64-darwin
|
6
6
|
authors:
|
7
7
|
- Thijs Cadier
|
@@ -140,8 +140,7 @@ description: Modern Kafka client library for Ruby based on librdkafka
|
|
140
140
|
email:
|
141
141
|
- contact@karafka.io
|
142
142
|
executables: []
|
143
|
-
extensions:
|
144
|
-
- ext/Rakefile
|
143
|
+
extensions: []
|
145
144
|
extra_rdoc_files: []
|
146
145
|
files:
|
147
146
|
- ".github/CODEOWNERS"
|