karafka-rdkafka 0.20.0.rc5-x86_64-linux-gnu → 0.20.1-x86_64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -51,13 +51,13 @@ module Rdkafka
51
51
 
52
52
  # @private
53
53
  # @param native_kafka [NativeKafka]
54
- # @param partitioner_name [String, nil] name of the partitioner we want to use or nil to use
54
+ # @param partitioner [String, nil] name of the partitioner we want to use or nil to use
55
55
  # the "consistent_random" default
56
- def initialize(native_kafka, partitioner_name)
56
+ def initialize(native_kafka, partitioner)
57
57
  @topics_refs_map = {}
58
58
  @topics_configs = {}
59
59
  @native_kafka = native_kafka
60
- @partitioner_name = partitioner_name || "consistent_random"
60
+ @partitioner = partitioner || "consistent_random"
61
61
 
62
62
  # Makes sure, that native kafka gets closed before it gets GCed by Ruby
63
63
  ObjectSpace.define_finalizer(self, native_kafka.finalizer)
@@ -337,7 +337,8 @@ module Rdkafka
337
337
  timestamp: nil,
338
338
  headers: nil,
339
339
  label: nil,
340
- topic_config: EMPTY_HASH
340
+ topic_config: EMPTY_HASH,
341
+ partitioner: @partitioner
341
342
  )
342
343
  closed_producer_check(__method__)
343
344
 
@@ -369,10 +370,14 @@ module Rdkafka
369
370
 
370
371
  # Check if there are no overrides for the partitioner and use the default one only when
371
372
  # no per-topic is present.
372
- partitioner_name = @topics_configs.dig(topic, topic_config_hash, :partitioner) || @partitioner_name
373
+ selected_partitioner = @topics_configs.dig(topic, topic_config_hash, :partitioner) || partitioner
373
374
 
374
375
  # If the topic is not present, set to -1
375
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, partitioner_name) if partition_count.positive?
376
+ partition = Rdkafka::Bindings.partitioner(
377
+ topic_ref,
378
+ partition_key,
379
+ partition_count,
380
+ selected_partitioner) if partition_count.positive?
376
381
  end
377
382
 
378
383
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.20.0.rc5"
4
+ VERSION = "0.20.1"
5
5
  LIBRDKAFKA_VERSION = "2.8.0"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "5bd1c46f63265f31c6bfcedcde78703f77d28238eadf23821c2b43fc30be3e25"
7
7
  end
@@ -513,7 +513,7 @@ describe Rdkafka::Admin do
513
513
  end
514
514
  end
515
515
 
516
- describe "#ACL tests" do
516
+ describe "#ACL tests for topic resource" do
517
517
  let(:non_existing_resource_name) {"non-existing-topic"}
518
518
  before do
519
519
  #create topic for testing acl
@@ -615,6 +615,207 @@ describe Rdkafka::Admin do
615
615
  end
616
616
  end
617
617
 
618
+ describe "#ACL tests for transactional_id" do
619
+ let(:transactional_id_resource_name) {"test-transactional-id"}
620
+ let(:non_existing_transactional_id) {"non-existing-transactional-id"}
621
+ let(:transactional_id_resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID }
622
+ let(:transactional_id_resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL }
623
+ let(:transactional_id_principal) { "User:test-user" }
624
+ let(:transactional_id_host) { "*" }
625
+ let(:transactional_id_operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE }
626
+ let(:transactional_id_permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW }
627
+
628
+ after do
629
+ # Clean up any ACLs that might have been created during tests
630
+ begin
631
+ delete_acl_handle = admin.delete_acl(
632
+ resource_type: transactional_id_resource_type,
633
+ resource_name: nil,
634
+ resource_pattern_type: transactional_id_resource_pattern_type,
635
+ principal: transactional_id_principal,
636
+ host: transactional_id_host,
637
+ operation: transactional_id_operation,
638
+ permission_type: transactional_id_permission_type
639
+ )
640
+ delete_acl_handle.wait(max_wait_timeout: 15.0)
641
+ rescue
642
+ # Ignore cleanup errors
643
+ end
644
+ end
645
+
646
+ describe "#create_acl" do
647
+ it "creates acl for a transactional_id" do
648
+ create_acl_handle = admin.create_acl(
649
+ resource_type: transactional_id_resource_type,
650
+ resource_name: transactional_id_resource_name,
651
+ resource_pattern_type: transactional_id_resource_pattern_type,
652
+ principal: transactional_id_principal,
653
+ host: transactional_id_host,
654
+ operation: transactional_id_operation,
655
+ permission_type: transactional_id_permission_type
656
+ )
657
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
658
+ expect(create_acl_report.rdkafka_response).to eq(0)
659
+ expect(create_acl_report.rdkafka_response_string).to eq("")
660
+ end
661
+
662
+ it "creates acl for a non-existing transactional_id" do
663
+ # ACL creation for transactional_ids that don't exist will still get created successfully
664
+ create_acl_handle = admin.create_acl(
665
+ resource_type: transactional_id_resource_type,
666
+ resource_name: non_existing_transactional_id,
667
+ resource_pattern_type: transactional_id_resource_pattern_type,
668
+ principal: transactional_id_principal,
669
+ host: transactional_id_host,
670
+ operation: transactional_id_operation,
671
+ permission_type: transactional_id_permission_type
672
+ )
673
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
674
+ expect(create_acl_report.rdkafka_response).to eq(0)
675
+ expect(create_acl_report.rdkafka_response_string).to eq("")
676
+
677
+ # Clean up the ACL that was created for the non-existing transactional_id
678
+ delete_acl_handle = admin.delete_acl(
679
+ resource_type: transactional_id_resource_type,
680
+ resource_name: non_existing_transactional_id,
681
+ resource_pattern_type: transactional_id_resource_pattern_type,
682
+ principal: transactional_id_principal,
683
+ host: transactional_id_host,
684
+ operation: transactional_id_operation,
685
+ permission_type: transactional_id_permission_type
686
+ )
687
+ delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
688
+ expect(delete_acl_handle[:response]).to eq(0)
689
+ expect(delete_acl_report.deleted_acls.size).to eq(1)
690
+ end
691
+ end
692
+
693
+ describe "#describe_acl" do
694
+ it "describes acl of a transactional_id that does not exist" do
695
+ describe_acl_handle = admin.describe_acl(
696
+ resource_type: transactional_id_resource_type,
697
+ resource_name: non_existing_transactional_id,
698
+ resource_pattern_type: transactional_id_resource_pattern_type,
699
+ principal: transactional_id_principal,
700
+ host: transactional_id_host,
701
+ operation: transactional_id_operation,
702
+ permission_type: transactional_id_permission_type
703
+ )
704
+ describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
705
+ expect(describe_acl_handle[:response]).to eq(0)
706
+ expect(describe_acl_report.acls.size).to eq(0)
707
+ end
708
+
709
+ it "creates acls and describes the newly created transactional_id acls" do
710
+ # Create first ACL
711
+ create_acl_handle = admin.create_acl(
712
+ resource_type: transactional_id_resource_type,
713
+ resource_name: "test_transactional_id_1",
714
+ resource_pattern_type: transactional_id_resource_pattern_type,
715
+ principal: transactional_id_principal,
716
+ host: transactional_id_host,
717
+ operation: transactional_id_operation,
718
+ permission_type: transactional_id_permission_type
719
+ )
720
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
721
+ expect(create_acl_report.rdkafka_response).to eq(0)
722
+ expect(create_acl_report.rdkafka_response_string).to eq("")
723
+
724
+ # Create second ACL
725
+ create_acl_handle = admin.create_acl(
726
+ resource_type: transactional_id_resource_type,
727
+ resource_name: "test_transactional_id_2",
728
+ resource_pattern_type: transactional_id_resource_pattern_type,
729
+ principal: transactional_id_principal,
730
+ host: transactional_id_host,
731
+ operation: transactional_id_operation,
732
+ permission_type: transactional_id_permission_type
733
+ )
734
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
735
+ expect(create_acl_report.rdkafka_response).to eq(0)
736
+ expect(create_acl_report.rdkafka_response_string).to eq("")
737
+
738
+ # Since we create and immediately check, this is slow on loaded CIs, hence we wait
739
+ sleep(2)
740
+
741
+ # Describe ACLs - filter by transactional_id resource type
742
+ describe_acl_handle = admin.describe_acl(
743
+ resource_type: transactional_id_resource_type,
744
+ resource_name: nil,
745
+ resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY,
746
+ principal: transactional_id_principal,
747
+ host: transactional_id_host,
748
+ operation: transactional_id_operation,
749
+ permission_type: transactional_id_permission_type
750
+ )
751
+ describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
752
+ expect(describe_acl_handle[:response]).to eq(0)
753
+ expect(describe_acl_report.acls.length).to eq(2)
754
+ end
755
+ end
756
+
757
+ describe "#delete_acl" do
758
+ it "deletes acl of a transactional_id that does not exist" do
759
+ delete_acl_handle = admin.delete_acl(
760
+ resource_type: transactional_id_resource_type,
761
+ resource_name: non_existing_transactional_id,
762
+ resource_pattern_type: transactional_id_resource_pattern_type,
763
+ principal: transactional_id_principal,
764
+ host: transactional_id_host,
765
+ operation: transactional_id_operation,
766
+ permission_type: transactional_id_permission_type
767
+ )
768
+ delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
769
+ expect(delete_acl_handle[:response]).to eq(0)
770
+ expect(delete_acl_report.deleted_acls.size).to eq(0)
771
+ end
772
+
773
+ it "creates transactional_id acls and deletes the newly created acls" do
774
+ # Create first ACL
775
+ create_acl_handle = admin.create_acl(
776
+ resource_type: transactional_id_resource_type,
777
+ resource_name: "test_transactional_id_1",
778
+ resource_pattern_type: transactional_id_resource_pattern_type,
779
+ principal: transactional_id_principal,
780
+ host: transactional_id_host,
781
+ operation: transactional_id_operation,
782
+ permission_type: transactional_id_permission_type
783
+ )
784
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
785
+ expect(create_acl_report.rdkafka_response).to eq(0)
786
+ expect(create_acl_report.rdkafka_response_string).to eq("")
787
+
788
+ # Create second ACL
789
+ create_acl_handle = admin.create_acl(
790
+ resource_type: transactional_id_resource_type,
791
+ resource_name: "test_transactional_id_2",
792
+ resource_pattern_type: transactional_id_resource_pattern_type,
793
+ principal: transactional_id_principal,
794
+ host: transactional_id_host,
795
+ operation: transactional_id_operation,
796
+ permission_type: transactional_id_permission_type
797
+ )
798
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
799
+ expect(create_acl_report.rdkafka_response).to eq(0)
800
+ expect(create_acl_report.rdkafka_response_string).to eq("")
801
+
802
+ # Delete ACLs - resource_name nil to delete all ACLs with any resource name and matching all other filters
803
+ delete_acl_handle = admin.delete_acl(
804
+ resource_type: transactional_id_resource_type,
805
+ resource_name: nil,
806
+ resource_pattern_type: transactional_id_resource_pattern_type,
807
+ principal: transactional_id_principal,
808
+ host: transactional_id_host,
809
+ operation: transactional_id_operation,
810
+ permission_type: transactional_id_permission_type
811
+ )
812
+ delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
813
+ expect(delete_acl_handle[:response]).to eq(0)
814
+ expect(delete_acl_report.deleted_acls.length).to eq(2)
815
+ end
816
+ end
817
+ end
818
+
618
819
  describe('Group tests') do
619
820
  describe "#delete_group" do
620
821
  describe("with an existing group") do
@@ -77,30 +77,6 @@ describe Rdkafka::Bindings do
77
77
  end
78
78
  end
79
79
 
80
- describe "partitioner" do
81
- let(:partition_key) { ('a'..'z').to_a.shuffle.take(15).join('') }
82
- let(:partition_count) { rand(50) + 1 }
83
-
84
- it "should return the same partition for a similar string and the same partition count" do
85
- result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
86
- result_2 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
87
- expect(result_1).to eq(result_2)
88
- end
89
-
90
- it "should match the old partitioner" do
91
- result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
92
- result_2 = (Zlib.crc32(partition_key) % partition_count)
93
- expect(result_1).to eq(result_2)
94
- end
95
-
96
- it "should return the partition calculated by the specified partitioner" do
97
- result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
98
- ptr = FFI::MemoryPointer.from_string(partition_key)
99
- result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
100
- expect(result_1).to eq(result_2)
101
- end
102
- end
103
-
104
80
  describe "stats callback" do
105
81
  context "without a stats callback" do
106
82
  it "should do nothing" do
@@ -1291,9 +1291,6 @@ describe Rdkafka::Consumer do
1291
1291
  end
1292
1292
 
1293
1293
  expect(eof_error.code).to eq(:partition_eof)
1294
- expect(eof_error.details[:topic]).to eq('consume_test_topic')
1295
- expect(eof_error.details[:partition]).to be_a(Integer)
1296
- expect(eof_error.details[:offset]).to be_a(Integer)
1297
1294
  end
1298
1295
  end
1299
1296
  end
@@ -340,7 +340,7 @@ describe Rdkafka::Producer do
340
340
  )
341
341
  end
342
342
 
343
- expect(messages[0].partition).to eq 0
343
+ expect(messages[0].partition).to be >= 0
344
344
  expect(messages[0].key).to eq 'a'
345
345
  end
346
346
 
@@ -1231,4 +1231,298 @@ describe Rdkafka::Producer do
1231
1231
  end
1232
1232
  end
1233
1233
  end
1234
+
1235
+ let(:producer) { rdkafka_producer_config.producer }
1236
+ let(:all_partitioners) { %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random) }
1237
+
1238
+ describe "partitioner behavior through producer API" do
1239
+ context "testing all partitioners with same key" do
1240
+ it "should not return partition 0 for all partitioners" do
1241
+ test_key = "test-key-123"
1242
+ results = {}
1243
+
1244
+ all_partitioners.each do |partitioner|
1245
+ handle = producer.produce(
1246
+ topic: "partitioner_test_topic",
1247
+ payload: "test payload",
1248
+ partition_key: test_key,
1249
+ partitioner: partitioner
1250
+ )
1251
+
1252
+ report = handle.wait(max_wait_timeout: 5)
1253
+ results[partitioner] = report.partition
1254
+ end
1255
+
1256
+ # Should not all be the same partition (especially not all 0)
1257
+ unique_partitions = results.values.uniq
1258
+ expect(unique_partitions.size).to be > 1
1259
+ end
1260
+ end
1261
+
1262
+ context "empty string partition key" do
1263
+ it "should produce message with empty partition key without crashing and go to partition 0 for all partitioners" do
1264
+ all_partitioners.each do |partitioner|
1265
+ handle = producer.produce(
1266
+ topic: "partitioner_test_topic",
1267
+ payload: "test payload",
1268
+ key: "test-key",
1269
+ partition_key: "",
1270
+ partitioner: partitioner
1271
+ )
1272
+
1273
+ report = handle.wait(max_wait_timeout: 5)
1274
+ expect(report.partition).to be >= 0
1275
+ end
1276
+ end
1277
+ end
1278
+
1279
+ context "nil partition key" do
1280
+ it "should handle nil partition key gracefully" do
1281
+ handle = producer.produce(
1282
+ topic: "partitioner_test_topic",
1283
+ payload: "test payload",
1284
+ key: "test-key",
1285
+ partition_key: nil
1286
+ )
1287
+
1288
+ report = handle.wait(max_wait_timeout: 5)
1289
+ expect(report.partition).to be >= 0
1290
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1291
+ end
1292
+ end
1293
+
1294
+ context "various key types and lengths with different partitioners" do
1295
+ it "should handle very short keys with all partitioners" do
1296
+ all_partitioners.each do |partitioner|
1297
+ handle = producer.produce(
1298
+ topic: "partitioner_test_topic",
1299
+ payload: "test payload",
1300
+ partition_key: "a",
1301
+ partitioner: partitioner
1302
+ )
1303
+
1304
+ report = handle.wait(max_wait_timeout: 5)
1305
+ expect(report.partition).to be >= 0
1306
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1307
+ end
1308
+ end
1309
+
1310
+ it "should handle very long keys with all partitioners" do
1311
+ long_key = "a" * 1000
1312
+
1313
+ all_partitioners.each do |partitioner|
1314
+ handle = producer.produce(
1315
+ topic: "partitioner_test_topic",
1316
+ payload: "test payload",
1317
+ partition_key: long_key,
1318
+ partitioner: partitioner
1319
+ )
1320
+
1321
+ report = handle.wait(max_wait_timeout: 5)
1322
+ expect(report.partition).to be >= 0
1323
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1324
+ end
1325
+ end
1326
+
1327
+ it "should handle unicode keys with all partitioners" do
1328
+ unicode_key = "测试键值🚀"
1329
+
1330
+ all_partitioners.each do |partitioner|
1331
+ handle = producer.produce(
1332
+ topic: "partitioner_test_topic",
1333
+ payload: "test payload",
1334
+ partition_key: unicode_key,
1335
+ partitioner: partitioner
1336
+ )
1337
+
1338
+ report = handle.wait(max_wait_timeout: 5)
1339
+ expect(report.partition).to be >= 0
1340
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1341
+ end
1342
+ end
1343
+ end
1344
+
1345
+ context "consistency testing for deterministic partitioners" do
1346
+ %w(consistent murmur2 fnv1a).each do |partitioner|
1347
+ it "should consistently route same partition key to same partition with #{partitioner}" do
1348
+ partition_key = "consistent-test-key"
1349
+
1350
+ # Produce multiple messages with same partition key
1351
+ reports = 5.times.map do
1352
+ handle = producer.produce(
1353
+ topic: "partitioner_test_topic",
1354
+ payload: "test payload #{Time.now.to_f}",
1355
+ partition_key: partition_key,
1356
+ partitioner: partitioner
1357
+ )
1358
+ handle.wait(max_wait_timeout: 5)
1359
+ end
1360
+
1361
+ # All should go to same partition
1362
+ partitions = reports.map(&:partition).uniq
1363
+ expect(partitions.size).to eq(1)
1364
+ end
1365
+ end
1366
+ end
1367
+
1368
+ context "randomness testing for random partitioners" do
1369
+ %w(random consistent_random murmur2_random fnv1a_random).each do |partitioner|
1370
+ it "should potentially distribute across partitions with #{partitioner}" do
1371
+ # Note: random partitioners might still return same value by chance
1372
+ partition_key = "random-test-key"
1373
+
1374
+ reports = 10.times.map do
1375
+ handle = producer.produce(
1376
+ topic: "partitioner_test_topic",
1377
+ payload: "test payload #{Time.now.to_f}",
1378
+ partition_key: partition_key,
1379
+ partitioner: partitioner
1380
+ )
1381
+ handle.wait(max_wait_timeout: 5)
1382
+ end
1383
+
1384
+ partitions = reports.map(&:partition)
1385
+
1386
+ # Just ensure they're valid partitions
1387
+ partitions.each do |partition|
1388
+ expect(partition).to be >= 0
1389
+ expect(partition).to be < producer.partition_count("partitioner_test_topic")
1390
+ end
1391
+ end
1392
+ end
1393
+ end
1394
+
1395
+ context "comparing different partitioners with same key" do
1396
+ it "should route different partition keys to potentially different partitions" do
1397
+ keys = ["key1", "key2", "key3", "key4", "key5"]
1398
+
1399
+ all_partitioners.each do |partitioner|
1400
+ reports = keys.map do |key|
1401
+ handle = producer.produce(
1402
+ topic: "partitioner_test_topic",
1403
+ payload: "test payload",
1404
+ partition_key: key,
1405
+ partitioner: partitioner
1406
+ )
1407
+ handle.wait(max_wait_timeout: 5)
1408
+ end
1409
+
1410
+ partitions = reports.map(&:partition).uniq
1411
+
1412
+ # Should distribute across multiple partitions for most partitioners
1413
+ # (though some might hash all keys to same partition by chance)
1414
+ expect(partitions.all? { |p| p >= 0 && p < producer.partition_count("partitioner_test_topic") }).to be true
1415
+ end
1416
+ end
1417
+ end
1418
+
1419
+ context "partition key vs regular key behavior" do
1420
+ it "should use partition key for partitioning when both key and partition_key are provided" do
1421
+ # Use keys that would hash to different partitions
1422
+ regular_key = "regular-key-123"
1423
+ partition_key = "partition-key-456"
1424
+
1425
+ # Message with both keys
1426
+ handle1 = producer.produce(
1427
+ topic: "partitioner_test_topic",
1428
+ payload: "test payload 1",
1429
+ key: regular_key,
1430
+ partition_key: partition_key
1431
+ )
1432
+
1433
+ # Message with only partition key (should go to same partition)
1434
+ handle2 = producer.produce(
1435
+ topic: "partitioner_test_topic",
1436
+ payload: "test payload 2",
1437
+ partition_key: partition_key
1438
+ )
1439
+
1440
+ # Message with only regular key (should go to different partition)
1441
+ handle3 = producer.produce(
1442
+ topic: "partitioner_test_topic",
1443
+ payload: "test payload 3",
1444
+ key: regular_key
1445
+ )
1446
+
1447
+ report1 = handle1.wait(max_wait_timeout: 5)
1448
+ report2 = handle2.wait(max_wait_timeout: 5)
1449
+ report3 = handle3.wait(max_wait_timeout: 5)
1450
+
1451
+ # Messages 1 and 2 should go to same partition (both use partition_key)
1452
+ expect(report1.partition).to eq(report2.partition)
1453
+
1454
+ # Message 3 should potentially go to different partition (uses regular key)
1455
+ expect(report3.partition).not_to eq(report1.partition)
1456
+ end
1457
+ end
1458
+
1459
+ context "edge case combinations with different partitioners" do
1460
+ it "should handle nil partition key with all partitioners" do
1461
+ all_partitioners.each do |partitioner|
1462
+ handle = producer.produce(
1463
+ topic: "partitioner_test_topic",
1464
+ payload: "test payload",
1465
+ key: "test-key",
1466
+ partition_key: nil,
1467
+ partitioner: partitioner
1468
+ )
1469
+
1470
+ report = handle.wait(max_wait_timeout: 5)
1471
+ expect(report.partition).to be >= 0
1472
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1473
+ end
1474
+ end
1475
+
1476
+ it "should handle whitespace-only partition key with all partitioners" do
1477
+ all_partitioners.each do |partitioner|
1478
+ handle = producer.produce(
1479
+ topic: "partitioner_test_topic",
1480
+ payload: "test payload",
1481
+ partition_key: " ",
1482
+ partitioner: partitioner
1483
+ )
1484
+
1485
+ report = handle.wait(max_wait_timeout: 5)
1486
+ expect(report.partition).to be >= 0
1487
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1488
+ end
1489
+ end
1490
+
1491
+ it "should handle newline characters in partition key with all partitioners" do
1492
+ all_partitioners.each do |partitioner|
1493
+ handle = producer.produce(
1494
+ topic: "partitioner_test_topic",
1495
+ payload: "test payload",
1496
+ partition_key: "key\nwith\nnewlines",
1497
+ partitioner: partitioner
1498
+ )
1499
+
1500
+ report = handle.wait(max_wait_timeout: 5)
1501
+ expect(report.partition).to be >= 0
1502
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1503
+ end
1504
+ end
1505
+ end
1506
+
1507
+ context "debugging partitioner issues" do
1508
+ it "should show if all partitioners return 0 (indicating a problem)" do
1509
+ test_key = "debug-test-key"
1510
+ zero_count = 0
1511
+
1512
+ all_partitioners.each do |partitioner|
1513
+ handle = producer.produce(
1514
+ topic: "partitioner_test_topic",
1515
+ payload: "debug payload",
1516
+ partition_key: test_key,
1517
+ partitioner: partitioner
1518
+ )
1519
+
1520
+ report = handle.wait(max_wait_timeout: 5)
1521
+ zero_count += 1 if report.partition == 0
1522
+ end
1523
+
1524
+ expect(zero_count).to be < all_partitioners.size
1525
+ end
1526
+ end
1527
+ end
1234
1528
  end
data/spec/spec_helper.rb CHANGED
@@ -78,18 +78,32 @@ end
78
78
 
79
79
  def wait_for_message(topic:, delivery_report:, timeout_in_seconds: 30, consumer: nil)
80
80
  new_consumer = consumer.nil?
81
- consumer ||= rdkafka_consumer_config.consumer
81
+ consumer ||= rdkafka_consumer_config('allow.auto.create.topics': true).consumer
82
82
  consumer.subscribe(topic)
83
83
  timeout = Time.now.to_i + timeout_in_seconds
84
+ retry_count = 0
85
+ max_retries = 10
86
+
84
87
  loop do
85
88
  if timeout <= Time.now.to_i
86
89
  raise "Timeout of #{timeout_in_seconds} seconds reached in wait_for_message"
87
90
  end
88
- message = consumer.poll(100)
89
- if message &&
90
- message.partition == delivery_report.partition &&
91
- message.offset == delivery_report.offset
92
- return message
91
+
92
+ begin
93
+ message = consumer.poll(100)
94
+ if message &&
95
+ message.partition == delivery_report.partition &&
96
+ message.offset == delivery_report.offset
97
+ return message
98
+ end
99
+ rescue Rdkafka::RdkafkaError => e
100
+ if e.code == :unknown_topic_or_part && retry_count < max_retries
101
+ retry_count += 1
102
+ sleep(0.1) # Small delay before retry
103
+ next
104
+ else
105
+ raise
106
+ end
93
107
  end
94
108
  end
95
109
  ensure