karafka-rdkafka 0.20.0.rc2 → 0.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
- data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
- data/.github/workflows/ci_macos_arm64.yml +306 -0
- data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
- data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
- data/.github/workflows/push_macos_arm64.yml +54 -0
- data/.github/workflows/push_ruby.yml +37 -0
- data/.gitignore +1 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +25 -4
- data/README.md +2 -3
- data/Rakefile +0 -2
- data/dist/{librdkafka-2.10.0.tar.gz → librdkafka-2.8.0.tar.gz} +0 -0
- data/docker-compose.yml +1 -1
- data/ext/Rakefile +1 -1
- data/ext/build_common.sh +361 -0
- data/ext/build_linux_x86_64_gnu.sh +306 -0
- data/ext/build_linux_x86_64_musl.sh +763 -0
- data/ext/build_macos_arm64.sh +550 -0
- data/karafka-rdkafka.gemspec +51 -10
- data/lib/rdkafka/bindings.rb +32 -6
- data/lib/rdkafka/config.rb +4 -1
- data/lib/rdkafka/error.rb +8 -1
- data/lib/rdkafka/native_kafka.rb +4 -0
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +51 -34
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +1 -0
- data/renovate.json +74 -0
- data/spec/rdkafka/admin_spec.rb +217 -3
- data/spec/rdkafka/bindings_spec.rb +0 -25
- data/spec/rdkafka/config_spec.rb +1 -1
- data/spec/rdkafka/consumer_spec.rb +35 -17
- data/spec/rdkafka/metadata_spec.rb +2 -2
- data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +493 -8
- data/spec/spec_helper.rb +32 -7
- metadata +37 -95
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +0 -99
- data/Guardfile +0 -19
- data/certs/cert.pem +0 -26
- data.tar.gz.sig +0 -0
- metadata.gz.sig +0 -3
data/spec/rdkafka/admin_spec.rb
CHANGED
@@ -34,7 +34,7 @@ describe Rdkafka::Admin do
|
|
34
34
|
describe '#describe_errors' do
|
35
35
|
let(:errors) { admin.class.describe_errors }
|
36
36
|
|
37
|
-
it { expect(errors.size).to eq(
|
37
|
+
it { expect(errors.size).to eq(170) }
|
38
38
|
it { expect(errors[-184]).to eq(code: -184, description: 'Local: Queue full', name: '_QUEUE_FULL') }
|
39
39
|
it { expect(errors[21]).to eq(code: 21, description: 'Broker: Invalid required acks value', name: 'INVALID_REQUIRED_ACKS') }
|
40
40
|
end
|
@@ -295,6 +295,8 @@ describe Rdkafka::Admin do
|
|
295
295
|
expect(resources_results.first.type).to eq(2)
|
296
296
|
expect(resources_results.first.name).to eq(topic_name)
|
297
297
|
|
298
|
+
sleep(1)
|
299
|
+
|
298
300
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
299
301
|
config.name == 'delete.retention.ms'
|
300
302
|
end
|
@@ -325,6 +327,9 @@ describe Rdkafka::Admin do
|
|
325
327
|
expect(resources_results.size).to eq(1)
|
326
328
|
expect(resources_results.first.type).to eq(2)
|
327
329
|
expect(resources_results.first.name).to eq(topic_name)
|
330
|
+
|
331
|
+
sleep(1)
|
332
|
+
|
328
333
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
329
334
|
config.name == 'delete.retention.ms'
|
330
335
|
end
|
@@ -356,6 +361,8 @@ describe Rdkafka::Admin do
|
|
356
361
|
expect(resources_results.first.type).to eq(2)
|
357
362
|
expect(resources_results.first.name).to eq(topic_name)
|
358
363
|
|
364
|
+
sleep(1)
|
365
|
+
|
359
366
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
360
367
|
config.name == 'cleanup.policy'
|
361
368
|
end
|
@@ -387,6 +394,8 @@ describe Rdkafka::Admin do
|
|
387
394
|
expect(resources_results.first.type).to eq(2)
|
388
395
|
expect(resources_results.first.name).to eq(topic_name)
|
389
396
|
|
397
|
+
sleep(1)
|
398
|
+
|
390
399
|
ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
|
391
400
|
config.name == 'cleanup.policy'
|
392
401
|
end
|
@@ -504,7 +513,7 @@ describe Rdkafka::Admin do
|
|
504
513
|
end
|
505
514
|
end
|
506
515
|
|
507
|
-
describe "#ACL tests" do
|
516
|
+
describe "#ACL tests for topic resource" do
|
508
517
|
let(:non_existing_resource_name) {"non-existing-topic"}
|
509
518
|
before do
|
510
519
|
#create topic for testing acl
|
@@ -606,6 +615,207 @@ describe Rdkafka::Admin do
|
|
606
615
|
end
|
607
616
|
end
|
608
617
|
|
618
|
+
describe "#ACL tests for transactional_id" do
|
619
|
+
let(:transactional_id_resource_name) {"test-transactional-id"}
|
620
|
+
let(:non_existing_transactional_id) {"non-existing-transactional-id"}
|
621
|
+
let(:transactional_id_resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID }
|
622
|
+
let(:transactional_id_resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL }
|
623
|
+
let(:transactional_id_principal) { "User:test-user" }
|
624
|
+
let(:transactional_id_host) { "*" }
|
625
|
+
let(:transactional_id_operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE }
|
626
|
+
let(:transactional_id_permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW }
|
627
|
+
|
628
|
+
after do
|
629
|
+
# Clean up any ACLs that might have been created during tests
|
630
|
+
begin
|
631
|
+
delete_acl_handle = admin.delete_acl(
|
632
|
+
resource_type: transactional_id_resource_type,
|
633
|
+
resource_name: nil,
|
634
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
635
|
+
principal: transactional_id_principal,
|
636
|
+
host: transactional_id_host,
|
637
|
+
operation: transactional_id_operation,
|
638
|
+
permission_type: transactional_id_permission_type
|
639
|
+
)
|
640
|
+
delete_acl_handle.wait(max_wait_timeout: 15.0)
|
641
|
+
rescue
|
642
|
+
# Ignore cleanup errors
|
643
|
+
end
|
644
|
+
end
|
645
|
+
|
646
|
+
describe "#create_acl" do
|
647
|
+
it "creates acl for a transactional_id" do
|
648
|
+
create_acl_handle = admin.create_acl(
|
649
|
+
resource_type: transactional_id_resource_type,
|
650
|
+
resource_name: transactional_id_resource_name,
|
651
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
652
|
+
principal: transactional_id_principal,
|
653
|
+
host: transactional_id_host,
|
654
|
+
operation: transactional_id_operation,
|
655
|
+
permission_type: transactional_id_permission_type
|
656
|
+
)
|
657
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
658
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
659
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
660
|
+
end
|
661
|
+
|
662
|
+
it "creates acl for a non-existing transactional_id" do
|
663
|
+
# ACL creation for transactional_ids that don't exist will still get created successfully
|
664
|
+
create_acl_handle = admin.create_acl(
|
665
|
+
resource_type: transactional_id_resource_type,
|
666
|
+
resource_name: non_existing_transactional_id,
|
667
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
668
|
+
principal: transactional_id_principal,
|
669
|
+
host: transactional_id_host,
|
670
|
+
operation: transactional_id_operation,
|
671
|
+
permission_type: transactional_id_permission_type
|
672
|
+
)
|
673
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
674
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
675
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
676
|
+
|
677
|
+
# Clean up the ACL that was created for the non-existing transactional_id
|
678
|
+
delete_acl_handle = admin.delete_acl(
|
679
|
+
resource_type: transactional_id_resource_type,
|
680
|
+
resource_name: non_existing_transactional_id,
|
681
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
682
|
+
principal: transactional_id_principal,
|
683
|
+
host: transactional_id_host,
|
684
|
+
operation: transactional_id_operation,
|
685
|
+
permission_type: transactional_id_permission_type
|
686
|
+
)
|
687
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
688
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
689
|
+
expect(delete_acl_report.deleted_acls.size).to eq(1)
|
690
|
+
end
|
691
|
+
end
|
692
|
+
|
693
|
+
describe "#describe_acl" do
|
694
|
+
it "describes acl of a transactional_id that does not exist" do
|
695
|
+
describe_acl_handle = admin.describe_acl(
|
696
|
+
resource_type: transactional_id_resource_type,
|
697
|
+
resource_name: non_existing_transactional_id,
|
698
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
699
|
+
principal: transactional_id_principal,
|
700
|
+
host: transactional_id_host,
|
701
|
+
operation: transactional_id_operation,
|
702
|
+
permission_type: transactional_id_permission_type
|
703
|
+
)
|
704
|
+
describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
|
705
|
+
expect(describe_acl_handle[:response]).to eq(0)
|
706
|
+
expect(describe_acl_report.acls.size).to eq(0)
|
707
|
+
end
|
708
|
+
|
709
|
+
it "creates acls and describes the newly created transactional_id acls" do
|
710
|
+
# Create first ACL
|
711
|
+
create_acl_handle = admin.create_acl(
|
712
|
+
resource_type: transactional_id_resource_type,
|
713
|
+
resource_name: "test_transactional_id_1",
|
714
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
715
|
+
principal: transactional_id_principal,
|
716
|
+
host: transactional_id_host,
|
717
|
+
operation: transactional_id_operation,
|
718
|
+
permission_type: transactional_id_permission_type
|
719
|
+
)
|
720
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
721
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
722
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
723
|
+
|
724
|
+
# Create second ACL
|
725
|
+
create_acl_handle = admin.create_acl(
|
726
|
+
resource_type: transactional_id_resource_type,
|
727
|
+
resource_name: "test_transactional_id_2",
|
728
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
729
|
+
principal: transactional_id_principal,
|
730
|
+
host: transactional_id_host,
|
731
|
+
operation: transactional_id_operation,
|
732
|
+
permission_type: transactional_id_permission_type
|
733
|
+
)
|
734
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
735
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
736
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
737
|
+
|
738
|
+
# Since we create and immediately check, this is slow on loaded CIs, hence we wait
|
739
|
+
sleep(2)
|
740
|
+
|
741
|
+
# Describe ACLs - filter by transactional_id resource type
|
742
|
+
describe_acl_handle = admin.describe_acl(
|
743
|
+
resource_type: transactional_id_resource_type,
|
744
|
+
resource_name: nil,
|
745
|
+
resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY,
|
746
|
+
principal: transactional_id_principal,
|
747
|
+
host: transactional_id_host,
|
748
|
+
operation: transactional_id_operation,
|
749
|
+
permission_type: transactional_id_permission_type
|
750
|
+
)
|
751
|
+
describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
|
752
|
+
expect(describe_acl_handle[:response]).to eq(0)
|
753
|
+
expect(describe_acl_report.acls.length).to eq(2)
|
754
|
+
end
|
755
|
+
end
|
756
|
+
|
757
|
+
describe "#delete_acl" do
|
758
|
+
it "deletes acl of a transactional_id that does not exist" do
|
759
|
+
delete_acl_handle = admin.delete_acl(
|
760
|
+
resource_type: transactional_id_resource_type,
|
761
|
+
resource_name: non_existing_transactional_id,
|
762
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
763
|
+
principal: transactional_id_principal,
|
764
|
+
host: transactional_id_host,
|
765
|
+
operation: transactional_id_operation,
|
766
|
+
permission_type: transactional_id_permission_type
|
767
|
+
)
|
768
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
769
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
770
|
+
expect(delete_acl_report.deleted_acls.size).to eq(0)
|
771
|
+
end
|
772
|
+
|
773
|
+
it "creates transactional_id acls and deletes the newly created acls" do
|
774
|
+
# Create first ACL
|
775
|
+
create_acl_handle = admin.create_acl(
|
776
|
+
resource_type: transactional_id_resource_type,
|
777
|
+
resource_name: "test_transactional_id_1",
|
778
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
779
|
+
principal: transactional_id_principal,
|
780
|
+
host: transactional_id_host,
|
781
|
+
operation: transactional_id_operation,
|
782
|
+
permission_type: transactional_id_permission_type
|
783
|
+
)
|
784
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
785
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
786
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
787
|
+
|
788
|
+
# Create second ACL
|
789
|
+
create_acl_handle = admin.create_acl(
|
790
|
+
resource_type: transactional_id_resource_type,
|
791
|
+
resource_name: "test_transactional_id_2",
|
792
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
793
|
+
principal: transactional_id_principal,
|
794
|
+
host: transactional_id_host,
|
795
|
+
operation: transactional_id_operation,
|
796
|
+
permission_type: transactional_id_permission_type
|
797
|
+
)
|
798
|
+
create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
|
799
|
+
expect(create_acl_report.rdkafka_response).to eq(0)
|
800
|
+
expect(create_acl_report.rdkafka_response_string).to eq("")
|
801
|
+
|
802
|
+
# Delete ACLs - resource_name nil to delete all ACLs with any resource name and matching all other filters
|
803
|
+
delete_acl_handle = admin.delete_acl(
|
804
|
+
resource_type: transactional_id_resource_type,
|
805
|
+
resource_name: nil,
|
806
|
+
resource_pattern_type: transactional_id_resource_pattern_type,
|
807
|
+
principal: transactional_id_principal,
|
808
|
+
host: transactional_id_host,
|
809
|
+
operation: transactional_id_operation,
|
810
|
+
permission_type: transactional_id_permission_type
|
811
|
+
)
|
812
|
+
delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
|
813
|
+
expect(delete_acl_handle[:response]).to eq(0)
|
814
|
+
expect(delete_acl_report.deleted_acls.length).to eq(2)
|
815
|
+
end
|
816
|
+
end
|
817
|
+
end
|
818
|
+
|
609
819
|
describe('Group tests') do
|
610
820
|
describe "#delete_group" do
|
611
821
|
describe("with an existing group") do
|
@@ -622,7 +832,11 @@ describe Rdkafka::Admin do
|
|
622
832
|
|
623
833
|
consumer.subscribe(topic_name)
|
624
834
|
wait_for_assignment(consumer)
|
625
|
-
message =
|
835
|
+
message = nil
|
836
|
+
|
837
|
+
10.times do
|
838
|
+
message ||= consumer.poll(100)
|
839
|
+
end
|
626
840
|
|
627
841
|
expect(message).to_not be_nil
|
628
842
|
|
@@ -77,30 +77,6 @@ describe Rdkafka::Bindings do
|
|
77
77
|
end
|
78
78
|
end
|
79
79
|
|
80
|
-
describe "partitioner" do
|
81
|
-
let(:partition_key) { ('a'..'z').to_a.shuffle.take(15).join('') }
|
82
|
-
let(:partition_count) { rand(50) + 1 }
|
83
|
-
|
84
|
-
it "should return the same partition for a similar string and the same partition count" do
|
85
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
86
|
-
result_2 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
87
|
-
expect(result_1).to eq(result_2)
|
88
|
-
end
|
89
|
-
|
90
|
-
it "should match the old partitioner" do
|
91
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
|
92
|
-
result_2 = (Zlib.crc32(partition_key) % partition_count)
|
93
|
-
expect(result_1).to eq(result_2)
|
94
|
-
end
|
95
|
-
|
96
|
-
it "should return the partition calculated by the specified partitioner" do
|
97
|
-
result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
|
98
|
-
ptr = FFI::MemoryPointer.from_string(partition_key)
|
99
|
-
result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
|
100
|
-
expect(result_1).to eq(result_2)
|
101
|
-
end
|
102
|
-
end
|
103
|
-
|
104
80
|
describe "stats callback" do
|
105
81
|
context "without a stats callback" do
|
106
82
|
it "should do nothing" do
|
@@ -194,7 +170,6 @@ describe Rdkafka::Bindings do
|
|
194
170
|
end
|
195
171
|
|
196
172
|
describe "oauthbearer callback" do
|
197
|
-
|
198
173
|
context "without an oauthbearer callback" do
|
199
174
|
it "should do nothing" do
|
200
175
|
expect {
|
data/spec/rdkafka/config_spec.rb
CHANGED
@@ -159,7 +159,7 @@ describe Rdkafka::Config do
|
|
159
159
|
|
160
160
|
it "should use default configuration" do
|
161
161
|
config = Rdkafka::Config.new
|
162
|
-
expect(config[:"api.version.request"]).to eq
|
162
|
+
expect(config[:"api.version.request"]).to eq true
|
163
163
|
end
|
164
164
|
|
165
165
|
it "should create a consumer with valid config" do
|
@@ -170,8 +170,16 @@ describe Rdkafka::Consumer do
|
|
170
170
|
end
|
171
171
|
|
172
172
|
describe "#seek" do
|
173
|
+
let(:topic) { "it-#{SecureRandom.uuid}" }
|
174
|
+
|
175
|
+
before do
|
176
|
+
admin = rdkafka_producer_config.admin
|
177
|
+
admin.create_topic(topic, 1, 1).wait
|
178
|
+
admin.close
|
179
|
+
end
|
180
|
+
|
173
181
|
it "should raise an error when seeking fails" do
|
174
|
-
fake_msg = OpenStruct.new(topic:
|
182
|
+
fake_msg = OpenStruct.new(topic: topic, partition: 0, offset: 0)
|
175
183
|
|
176
184
|
expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
|
177
185
|
expect {
|
@@ -181,9 +189,12 @@ describe Rdkafka::Consumer do
|
|
181
189
|
|
182
190
|
context "subscription" do
|
183
191
|
let(:timeout) { 1000 }
|
192
|
+
# Some specs here test the manual offset commit hence we want to ensure, that we have some
|
193
|
+
# offsets in-memory that we can manually commit
|
194
|
+
let(:consumer) { rdkafka_consumer_config('auto.commit.interval.ms': 60_000).consumer }
|
184
195
|
|
185
196
|
before do
|
186
|
-
consumer.subscribe(
|
197
|
+
consumer.subscribe(topic)
|
187
198
|
|
188
199
|
# 1. partitions are assigned
|
189
200
|
wait_for_assignment(consumer)
|
@@ -196,7 +207,7 @@ describe Rdkafka::Consumer do
|
|
196
207
|
|
197
208
|
def send_one_message(val)
|
198
209
|
producer.produce(
|
199
|
-
topic:
|
210
|
+
topic: topic,
|
200
211
|
payload: "payload #{val}",
|
201
212
|
key: "key 1",
|
202
213
|
partition: 0
|
@@ -211,7 +222,7 @@ describe Rdkafka::Consumer do
|
|
211
222
|
|
212
223
|
# 4. pause the subscription
|
213
224
|
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
214
|
-
tpl.add_topic(
|
225
|
+
tpl.add_topic(topic, 1)
|
215
226
|
consumer.pause(tpl)
|
216
227
|
|
217
228
|
# 5. seek to previous message
|
@@ -219,7 +230,7 @@ describe Rdkafka::Consumer do
|
|
219
230
|
|
220
231
|
# 6. resume the subscription
|
221
232
|
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
222
|
-
tpl.add_topic(
|
233
|
+
tpl.add_topic(topic, 1)
|
223
234
|
consumer.resume(tpl)
|
224
235
|
|
225
236
|
# 7. ensure same message is read again
|
@@ -227,7 +238,7 @@ describe Rdkafka::Consumer do
|
|
227
238
|
|
228
239
|
# This is needed because `enable.auto.offset.store` is true but when running in CI that
|
229
240
|
# is overloaded, offset store lags
|
230
|
-
sleep(
|
241
|
+
sleep(1)
|
231
242
|
|
232
243
|
consumer.commit
|
233
244
|
expect(message1.offset).to eq message2.offset
|
@@ -259,10 +270,17 @@ describe Rdkafka::Consumer do
|
|
259
270
|
end
|
260
271
|
|
261
272
|
describe "#seek_by" do
|
262
|
-
let(:
|
273
|
+
let(:consumer) { rdkafka_consumer_config('auto.commit.interval.ms': 60_000).consumer }
|
274
|
+
let(:topic) { "it-#{SecureRandom.uuid}" }
|
263
275
|
let(:partition) { 0 }
|
264
276
|
let(:offset) { 0 }
|
265
277
|
|
278
|
+
before do
|
279
|
+
admin = rdkafka_producer_config.admin
|
280
|
+
admin.create_topic(topic, 1, 1).wait
|
281
|
+
admin.close
|
282
|
+
end
|
283
|
+
|
266
284
|
it "should raise an error when seeking fails" do
|
267
285
|
expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
|
268
286
|
expect {
|
@@ -283,6 +301,7 @@ describe Rdkafka::Consumer do
|
|
283
301
|
# 2. eat unrelated messages
|
284
302
|
while(consumer.poll(timeout)) do; end
|
285
303
|
end
|
304
|
+
|
286
305
|
after { consumer.unsubscribe }
|
287
306
|
|
288
307
|
def send_one_message(val)
|
@@ -813,12 +832,14 @@ describe Rdkafka::Consumer do
|
|
813
832
|
end
|
814
833
|
|
815
834
|
it "should return a message if there is one" do
|
835
|
+
topic = "it-#{SecureRandom.uuid}"
|
836
|
+
|
816
837
|
producer.produce(
|
817
|
-
topic:
|
838
|
+
topic: topic,
|
818
839
|
payload: "payload 1",
|
819
840
|
key: "key 1"
|
820
841
|
).wait
|
821
|
-
consumer.subscribe(
|
842
|
+
consumer.subscribe(topic)
|
822
843
|
message = consumer.each {|m| break m}
|
823
844
|
|
824
845
|
expect(message).to be_a Rdkafka::Consumer::Message
|
@@ -838,13 +859,13 @@ describe Rdkafka::Consumer do
|
|
838
859
|
}.to raise_error Rdkafka::RdkafkaError
|
839
860
|
end
|
840
861
|
|
841
|
-
it "expect
|
862
|
+
it "expect to raise error when polling non-existing topic" do
|
842
863
|
missing_topic = SecureRandom.uuid
|
843
864
|
consumer.subscribe(missing_topic)
|
844
865
|
|
845
|
-
|
846
|
-
|
847
|
-
|
866
|
+
expect {
|
867
|
+
consumer.poll(1_000)
|
868
|
+
}.to raise_error Rdkafka::RdkafkaError, /Subscribed topic not available: #{missing_topic}/
|
848
869
|
end
|
849
870
|
end
|
850
871
|
|
@@ -1027,7 +1048,7 @@ describe Rdkafka::Consumer do
|
|
1027
1048
|
after { Rdkafka::Config.statistics_callback = nil }
|
1028
1049
|
|
1029
1050
|
let(:consumer) do
|
1030
|
-
config = rdkafka_consumer_config('statistics.interval.ms':
|
1051
|
+
config = rdkafka_consumer_config('statistics.interval.ms': 500)
|
1031
1052
|
config.consumer_poll_set = false
|
1032
1053
|
config.consumer
|
1033
1054
|
end
|
@@ -1270,9 +1291,6 @@ describe Rdkafka::Consumer do
|
|
1270
1291
|
end
|
1271
1292
|
|
1272
1293
|
expect(eof_error.code).to eq(:partition_eof)
|
1273
|
-
expect(eof_error.details[:topic]).to eq('consume_test_topic')
|
1274
|
-
expect(eof_error.details[:partition]).to be_a(Integer)
|
1275
|
-
expect(eof_error.details[:offset]).to be_a(Integer)
|
1276
1294
|
end
|
1277
1295
|
end
|
1278
1296
|
end
|
@@ -30,7 +30,7 @@ describe Rdkafka::Metadata do
|
|
30
30
|
it "#brokers returns our single broker" do
|
31
31
|
expect(subject.brokers.length).to eq(1)
|
32
32
|
expect(subject.brokers[0][:broker_id]).to eq(1)
|
33
|
-
expect(subject.brokers[0][:broker_name])
|
33
|
+
expect(%w[127.0.0.1 localhost]).to include(subject.brokers[0][:broker_name])
|
34
34
|
expect(subject.brokers[0][:broker_port]).to eq(9092)
|
35
35
|
end
|
36
36
|
|
@@ -53,7 +53,7 @@ describe Rdkafka::Metadata do
|
|
53
53
|
it "#brokers returns our single broker" do
|
54
54
|
expect(subject.brokers.length).to eq(1)
|
55
55
|
expect(subject.brokers[0][:broker_id]).to eq(1)
|
56
|
-
expect(subject.brokers[0][:broker_name])
|
56
|
+
expect(%w[127.0.0.1 localhost]).to include(subject.brokers[0][:broker_name])
|
57
57
|
expect(subject.brokers[0][:broker_port]).to eq(9092)
|
58
58
|
end
|
59
59
|
|