karafka-rdkafka 0.20.0.rc5-arm64-darwin → 0.21.0-arm64-darwin

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/{ci_linux_x86_64_musl.yml → ci_linux_alpine_x86_64_musl.yml} +63 -71
  3. data/.github/workflows/ci_linux_alpine_x86_64_musl_complementary.yml +264 -0
  4. data/.github/workflows/ci_linux_debian_x86_64_gnu.yml +271 -0
  5. data/.github/workflows/ci_linux_debian_x86_64_gnu_complementary.yml +334 -0
  6. data/.github/workflows/{ci_linux_x86_64_gnu.yml → ci_linux_ubuntu_aarch64_gnu.yml} +78 -56
  7. data/.github/workflows/ci_linux_ubuntu_aarch64_gnu_complementary.yml +295 -0
  8. data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +281 -0
  9. data/.github/workflows/ci_linux_ubuntu_x86_64_gnu_complementary.yml +294 -0
  10. data/.github/workflows/ci_macos_arm64.yml +28 -50
  11. data/.github/workflows/push_linux_aarch64_gnu.yml +65 -0
  12. data/.github/workflows/push_linux_x86_64_gnu.yml +4 -3
  13. data/.github/workflows/push_linux_x86_64_musl.yml +6 -4
  14. data/.github/workflows/push_macos_arm64.yml +3 -3
  15. data/.github/workflows/push_ruby.yml +1 -1
  16. data/.github/workflows/trigger-wiki-refresh.yml +30 -0
  17. data/.github/workflows/verify-action-pins.yml +1 -1
  18. data/.gitignore +1 -0
  19. data/.rspec +1 -0
  20. data/.ruby-version +1 -1
  21. data/CHANGELOG.md +18 -2
  22. data/README.md +48 -147
  23. data/dist/cyrus-sasl-2.1.28.tar.gz +0 -0
  24. data/dist/krb5-1.21.3.tar.gz +0 -0
  25. data/dist/openssl-3.0.16.tar.gz +0 -0
  26. data/dist/zlib-1.3.1.tar.gz +0 -0
  27. data/dist/zstd-1.5.7.tar.gz +0 -0
  28. data/docker-compose-ssl.yml +35 -0
  29. data/ext/build_common.sh +18 -3
  30. data/ext/build_linux_aarch64_gnu.sh +326 -0
  31. data/ext/build_linux_x86_64_gnu.sh +17 -6
  32. data/ext/build_linux_x86_64_musl.sh +18 -8
  33. data/ext/build_macos_arm64.sh +7 -0
  34. data/ext/generate-ssl-certs.sh +109 -0
  35. data/ext/librdkafka.dylib +0 -0
  36. data/karafka-rdkafka.gemspec +3 -1
  37. data/lib/rdkafka/bindings.rb +6 -8
  38. data/lib/rdkafka/config.rb +1 -4
  39. data/lib/rdkafka/consumer.rb +1 -1
  40. data/lib/rdkafka/producer.rb +11 -6
  41. data/lib/rdkafka/version.rb +3 -3
  42. data/spec/integrations/ssl_stress_spec.rb +121 -0
  43. data/spec/{rdkafka → lib/rdkafka}/admin_spec.rb +219 -6
  44. data/spec/{rdkafka → lib/rdkafka}/bindings_spec.rb +0 -24
  45. data/spec/{rdkafka → lib/rdkafka}/config_spec.rb +1 -1
  46. data/spec/{rdkafka → lib/rdkafka}/consumer_spec.rb +50 -6
  47. data/spec/{rdkafka → lib/rdkafka}/metadata_spec.rb +2 -2
  48. data/spec/{rdkafka → lib/rdkafka}/producer/delivery_report_spec.rb +1 -1
  49. data/spec/{rdkafka → lib/rdkafka}/producer_spec.rb +301 -8
  50. data/spec/spec_helper.rb +65 -16
  51. metadata +87 -43
  52. data/spec/rdkafka/producer/partitions_count_spec.rb +0 -359
  53. /data/spec/{rdkafka → lib/rdkafka}/abstract_handle_spec.rb +0 -0
  54. /data/spec/{rdkafka → lib/rdkafka}/admin/create_acl_handle_spec.rb +0 -0
  55. /data/spec/{rdkafka → lib/rdkafka}/admin/create_acl_report_spec.rb +0 -0
  56. /data/spec/{rdkafka → lib/rdkafka}/admin/create_topic_handle_spec.rb +0 -0
  57. /data/spec/{rdkafka → lib/rdkafka}/admin/create_topic_report_spec.rb +0 -0
  58. /data/spec/{rdkafka → lib/rdkafka}/admin/delete_acl_handle_spec.rb +0 -0
  59. /data/spec/{rdkafka → lib/rdkafka}/admin/delete_acl_report_spec.rb +0 -0
  60. /data/spec/{rdkafka → lib/rdkafka}/admin/delete_topic_handle_spec.rb +0 -0
  61. /data/spec/{rdkafka → lib/rdkafka}/admin/delete_topic_report_spec.rb +0 -0
  62. /data/spec/{rdkafka → lib/rdkafka}/admin/describe_acl_handle_spec.rb +0 -0
  63. /data/spec/{rdkafka → lib/rdkafka}/admin/describe_acl_report_spec.rb +0 -0
  64. /data/spec/{rdkafka → lib/rdkafka}/callbacks_spec.rb +0 -0
  65. /data/spec/{rdkafka → lib/rdkafka}/consumer/headers_spec.rb +0 -0
  66. /data/spec/{rdkafka → lib/rdkafka}/consumer/message_spec.rb +0 -0
  67. /data/spec/{rdkafka → lib/rdkafka}/consumer/partition_spec.rb +0 -0
  68. /data/spec/{rdkafka → lib/rdkafka}/consumer/topic_partition_list_spec.rb +0 -0
  69. /data/spec/{rdkafka → lib/rdkafka}/error_spec.rb +0 -0
  70. /data/spec/{rdkafka → lib/rdkafka}/native_kafka_spec.rb +0 -0
  71. /data/spec/{rdkafka → lib/rdkafka}/producer/delivery_handle_spec.rb +0 -0
  72. /data/spec/{rdkafka → lib/rdkafka}/producer/partitions_count_cache_spec.rb +0 -0
@@ -160,7 +160,6 @@ module Rdkafka
160
160
  attach_function :rd_kafka_error_is_retriable, [:pointer], :int
161
161
  attach_function :rd_kafka_error_txn_requires_abort, [:pointer], :int
162
162
  attach_function :rd_kafka_error_destroy, [:pointer], :void
163
- attach_function :rd_kafka_error_code, [:pointer], :int
164
163
  attach_function :rd_kafka_get_err_descs, [:pointer, :pointer], :void
165
164
 
166
165
  # Configuration
@@ -403,18 +402,16 @@ module Rdkafka
403
402
  hsh[name] = method_name
404
403
  end
405
404
 
406
- def self.partitioner(str, partition_count, partitioner_name = "consistent_random")
405
+ def self.partitioner(topic_ptr, str, partition_count, partitioner = "consistent_random")
407
406
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
408
407
  return -1 unless partition_count&.nonzero?
409
- # musl architecture crashes with empty string
410
- return 0 if str.empty?
411
408
 
412
- str_ptr = FFI::MemoryPointer.from_string(str)
413
- method_name = PARTITIONERS.fetch(partitioner_name) do
414
- raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
409
+ str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
410
+ method_name = PARTITIONERS.fetch(partitioner) do
411
+ raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner}")
415
412
  end
416
413
 
417
- public_send(method_name, nil, str_ptr, str.size, partition_count, nil, nil)
414
+ public_send(method_name, topic_ptr, str_ptr, str.size, partition_count, nil, nil)
418
415
  end
419
416
 
420
417
  # Create Topics
@@ -532,6 +529,7 @@ module Rdkafka
532
529
  RD_KAFKA_RESOURCE_TOPIC = 2
533
530
  RD_KAFKA_RESOURCE_GROUP = 3
534
531
  RD_KAFKA_RESOURCE_BROKER = 4
532
+ RD_KAFKA_RESOURCE_TRANSACTIONAL_ID = 5
535
533
 
536
534
  # rd_kafka_ResourcePatternType_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L7320
537
535
 
@@ -129,10 +129,7 @@ module Rdkafka
129
129
  end
130
130
 
131
131
  # Default config that can be overwritten.
132
- DEFAULT_CONFIG = {
133
- # Request api version so advanced features work
134
- :"api.version.request" => true
135
- }.freeze
132
+ DEFAULT_CONFIG = {}.freeze
136
133
 
137
134
  # Required config that cannot be overwritten.
138
135
  REQUIRED_CONFIG = {
@@ -344,7 +344,7 @@ module Rdkafka
344
344
  topic_out = {}
345
345
  partitions.each do |p|
346
346
  next if p.offset.nil?
347
- _, high = query_watermark_offsets(
347
+ _low, high = query_watermark_offsets(
348
348
  topic,
349
349
  p.partition,
350
350
  watermark_timeout_ms
@@ -51,13 +51,13 @@ module Rdkafka
51
51
 
52
52
  # @private
53
53
  # @param native_kafka [NativeKafka]
54
- # @param partitioner_name [String, nil] name of the partitioner we want to use or nil to use
54
+ # @param partitioner [String, nil] name of the partitioner we want to use or nil to use
55
55
  # the "consistent_random" default
56
- def initialize(native_kafka, partitioner_name)
56
+ def initialize(native_kafka, partitioner)
57
57
  @topics_refs_map = {}
58
58
  @topics_configs = {}
59
59
  @native_kafka = native_kafka
60
- @partitioner_name = partitioner_name || "consistent_random"
60
+ @partitioner = partitioner || "consistent_random"
61
61
 
62
62
  # Makes sure, that native kafka gets closed before it gets GCed by Ruby
63
63
  ObjectSpace.define_finalizer(self, native_kafka.finalizer)
@@ -337,7 +337,8 @@ module Rdkafka
337
337
  timestamp: nil,
338
338
  headers: nil,
339
339
  label: nil,
340
- topic_config: EMPTY_HASH
340
+ topic_config: EMPTY_HASH,
341
+ partitioner: @partitioner
341
342
  )
342
343
  closed_producer_check(__method__)
343
344
 
@@ -369,10 +370,14 @@ module Rdkafka
369
370
 
370
371
  # Check if there are no overrides for the partitioner and use the default one only when
371
372
  # no per-topic is present.
372
- partitioner_name = @topics_configs.dig(topic, topic_config_hash, :partitioner) || @partitioner_name
373
+ selected_partitioner = @topics_configs.dig(topic, topic_config_hash, :partitioner) || partitioner
373
374
 
374
375
  # If the topic is not present, set to -1
375
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, partitioner_name) if partition_count.positive?
376
+ partition = Rdkafka::Bindings.partitioner(
377
+ topic_ref,
378
+ partition_key,
379
+ partition_count,
380
+ selected_partitioner) if partition_count.positive?
376
381
  end
377
382
 
378
383
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.20.0.rc5"
5
- LIBRDKAFKA_VERSION = "2.8.0"
6
- LIBRDKAFKA_SOURCE_SHA256 = "5bd1c46f63265f31c6bfcedcde78703f77d28238eadf23821c2b43fc30be3e25"
4
+ VERSION = "0.21.0"
5
+ LIBRDKAFKA_VERSION = "2.11.0"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "592a823dc7c09ad4ded1bc8f700da6d4e0c88ffaf267815c6f25e7450b9395ca"
7
7
  end
@@ -0,0 +1,121 @@
1
+ # ssl_stress_test.rb
2
+ #
3
+ # This script is designed to stress-test the OpenSSL SSL/TLS layer under high concurrency
4
+ # to help detect regressions like the one described in OpenSSL issue #28171:
5
+ # https://github.com/openssl/openssl/issues/28171
6
+ #
7
+ # Issue summary:
8
+ # - OpenSSL 3.0.17 introduced a concurrency-related regression.
9
+ # - Multiple threads sharing the same SSL_CTX and making parallel TLS connections
10
+ # (often with certificate verification enabled) can cause segmentation faults
11
+ # due to race conditions in X509 store handling.
12
+ # - Affected users include Python (httpx), Rust (reqwest, native-tls), and C applications.
13
+ #
14
+ # Script details:
15
+ # - Starts 100 SSL servers using self-signed, in-memory certs on sequential localhost ports.
16
+ # - Uses `rdkafka-ruby` to spin up 100 consumer threads that continuously create and destroy
17
+ # SSL connections to these servers for a given duration.
18
+ # - This mimics high TLS connection churn and aims to trigger latent SSL_CTX or X509_STORE
19
+ # threading bugs like double-frees, memory corruption, or segmentation faults.
20
+ #
21
+ # Goal:
22
+ # - Catch regressions early by validating that heavy concurrent SSL use does not lead to crashes.
23
+ # - Provide a minimal and repeatable reproducer when diagnosing OpenSSL-level SSL instability.
24
+ #
25
+ # In case of a failure, segfault will happen
26
+
27
+ require 'rdkafka'
28
+ require 'socket'
29
+ require 'openssl'
30
+
31
+ $stdout.sync = true
32
+
33
+ STARTING_PORT = 19093
34
+ NUM_PORTS = 150
35
+ BATCHES = 100
36
+ PORTS = STARTING_PORT...(STARTING_PORT + NUM_PORTS)
37
+
38
+ CONFIG = {
39
+ 'bootstrap.servers': Array.new(NUM_PORTS) { |i| "127.0.0.1:#{19093+i}" }.join(','),
40
+ 'security.protocol': 'SSL',
41
+ 'enable.ssl.certificate.verification': false
42
+ }
43
+
44
+ # Generate in-memory self-signed cert
45
+ key = OpenSSL::PKey::RSA.new(2048)
46
+
47
+ name = OpenSSL::X509::Name.parse("/CN=127.0.0.1")
48
+ cert = OpenSSL::X509::Certificate.new
49
+ cert.version = 2
50
+ cert.serial = 1
51
+ cert.subject = name
52
+ cert.issuer = name
53
+ cert.public_key = key.public_key
54
+ cert.not_before = Time.now
55
+ cert.not_after = Time.now + 3600
56
+ cert.sign(key, OpenSSL::Digest::SHA256.new)
57
+
58
+ # Start servers on multiple ports
59
+ PORTS.map do |port|
60
+ Thread.new do
61
+ # Prepare SSL context
62
+ # We do not use a shared context for the server because the goal is to stress librdkafka layer
63
+ # and not the Ruby SSL layer
64
+ ssl_context = OpenSSL::SSL::SSLContext.new
65
+ ssl_context.cert = cert
66
+ ssl_context.key = key
67
+
68
+ tcp_server = TCPServer.new('127.0.0.1', port)
69
+ ssl_server = OpenSSL::SSL::SSLServer.new(tcp_server, ssl_context)
70
+
71
+ loop do
72
+ begin
73
+ ssl_socket = ssl_server.accept
74
+ ssl_socket.close
75
+ rescue => e
76
+ # Some errors are expected and irrelevant
77
+ next if e.message.include?('unexpected eof while reading')
78
+
79
+ puts "Port #{port} SSL error: #{e}"
80
+ end
81
+ end
82
+ end
83
+ end
84
+
85
+ puts "Starting #{NUM_PORTS} on ports from #{PORTS.first} to #{PORTS.last} SSL servers"
86
+
87
+ timeout = 30
88
+ start = Time.now
89
+
90
+ # Wait for the servers to be available
91
+ # We want to make sure that they are available so we are sure that librdkafka actually hammers
92
+ # them
93
+ loop do
94
+ all_up = PORTS.all? do |port|
95
+ TCPSocket.new('127.0.0.1', port).close
96
+ true
97
+ rescue
98
+ false
99
+ end
100
+
101
+ break if all_up
102
+
103
+ raise "Timeout waiting for SSL servers" if Time.now - start > timeout
104
+
105
+ sleep 0.1
106
+ end
107
+
108
+ puts "SSL servers ready"
109
+
110
+ start_time = Time.now
111
+ duration = 60 * 10 # 10 minutes - it should crash faster than that if SSL vulnerable
112
+ attempts = 0
113
+
114
+ while Time.now - start_time < duration do
115
+ css = Array.new(BATCHES) { Rdkafka::Config.new(CONFIG) }
116
+ csss = css.map(&:consumer)
117
+ # This print is needed. No idea why but it increases the chances of segfault
118
+ p attempts += 1
119
+ sleep(1)
120
+ csss.each(&:close)
121
+ end
@@ -34,7 +34,7 @@ describe Rdkafka::Admin do
34
34
  describe '#describe_errors' do
35
35
  let(:errors) { admin.class.describe_errors }
36
36
 
37
- it { expect(errors.size).to eq(170) }
37
+ it { expect(errors.size).to eq(172) }
38
38
  it { expect(errors[-184]).to eq(code: -184, description: 'Local: Queue full', name: '_QUEUE_FULL') }
39
39
  it { expect(errors[21]).to eq(code: 21, description: 'Broker: Invalid required acks value', name: 'INVALID_REQUIRED_ACKS') }
40
40
  end
@@ -513,22 +513,22 @@ describe Rdkafka::Admin do
513
513
  end
514
514
  end
515
515
 
516
- describe "#ACL tests" do
516
+ describe "#ACL tests for topic resource" do
517
517
  let(:non_existing_resource_name) {"non-existing-topic"}
518
518
  before do
519
519
  #create topic for testing acl
520
520
  create_topic_handle = admin.create_topic(resource_name, topic_partition_count, topic_replication_factor)
521
- create_topic_report = create_topic_handle.wait(max_wait_timeout: 15.0)
521
+ create_topic_handle.wait(max_wait_timeout: 15.0)
522
522
  end
523
523
 
524
524
  after do
525
525
  #delete acl
526
526
  delete_acl_handle = admin.delete_acl(resource_type: resource_type, resource_name: resource_name, resource_pattern_type: resource_pattern_type, principal: principal, host: host, operation: operation, permission_type: permission_type)
527
- delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
527
+ delete_acl_handle.wait(max_wait_timeout: 15.0)
528
528
 
529
529
  #delete topic that was created for testing acl
530
530
  delete_topic_handle = admin.delete_topic(resource_name)
531
- delete_topic_report = delete_topic_handle.wait(max_wait_timeout: 15.0)
531
+ delete_topic_handle.wait(max_wait_timeout: 15.0)
532
532
  end
533
533
 
534
534
  describe "#create_acl" do
@@ -615,6 +615,207 @@ describe Rdkafka::Admin do
615
615
  end
616
616
  end
617
617
 
618
+ describe "#ACL tests for transactional_id" do
619
+ let(:transactional_id_resource_name) {"test-transactional-id"}
620
+ let(:non_existing_transactional_id) {"non-existing-transactional-id"}
621
+ let(:transactional_id_resource_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID }
622
+ let(:transactional_id_resource_pattern_type) { Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL }
623
+ let(:transactional_id_principal) { "User:test-user" }
624
+ let(:transactional_id_host) { "*" }
625
+ let(:transactional_id_operation) { Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE }
626
+ let(:transactional_id_permission_type) { Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW }
627
+
628
+ after do
629
+ # Clean up any ACLs that might have been created during tests
630
+ begin
631
+ delete_acl_handle = admin.delete_acl(
632
+ resource_type: transactional_id_resource_type,
633
+ resource_name: nil,
634
+ resource_pattern_type: transactional_id_resource_pattern_type,
635
+ principal: transactional_id_principal,
636
+ host: transactional_id_host,
637
+ operation: transactional_id_operation,
638
+ permission_type: transactional_id_permission_type
639
+ )
640
+ delete_acl_handle.wait(max_wait_timeout: 15.0)
641
+ rescue
642
+ # Ignore cleanup errors
643
+ end
644
+ end
645
+
646
+ describe "#create_acl" do
647
+ it "creates acl for a transactional_id" do
648
+ create_acl_handle = admin.create_acl(
649
+ resource_type: transactional_id_resource_type,
650
+ resource_name: transactional_id_resource_name,
651
+ resource_pattern_type: transactional_id_resource_pattern_type,
652
+ principal: transactional_id_principal,
653
+ host: transactional_id_host,
654
+ operation: transactional_id_operation,
655
+ permission_type: transactional_id_permission_type
656
+ )
657
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
658
+ expect(create_acl_report.rdkafka_response).to eq(0)
659
+ expect(create_acl_report.rdkafka_response_string).to eq("")
660
+ end
661
+
662
+ it "creates acl for a non-existing transactional_id" do
663
+ # ACL creation for transactional_ids that don't exist will still get created successfully
664
+ create_acl_handle = admin.create_acl(
665
+ resource_type: transactional_id_resource_type,
666
+ resource_name: non_existing_transactional_id,
667
+ resource_pattern_type: transactional_id_resource_pattern_type,
668
+ principal: transactional_id_principal,
669
+ host: transactional_id_host,
670
+ operation: transactional_id_operation,
671
+ permission_type: transactional_id_permission_type
672
+ )
673
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
674
+ expect(create_acl_report.rdkafka_response).to eq(0)
675
+ expect(create_acl_report.rdkafka_response_string).to eq("")
676
+
677
+ # Clean up the ACL that was created for the non-existing transactional_id
678
+ delete_acl_handle = admin.delete_acl(
679
+ resource_type: transactional_id_resource_type,
680
+ resource_name: non_existing_transactional_id,
681
+ resource_pattern_type: transactional_id_resource_pattern_type,
682
+ principal: transactional_id_principal,
683
+ host: transactional_id_host,
684
+ operation: transactional_id_operation,
685
+ permission_type: transactional_id_permission_type
686
+ )
687
+ delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
688
+ expect(delete_acl_handle[:response]).to eq(0)
689
+ expect(delete_acl_report.deleted_acls.size).to eq(1)
690
+ end
691
+ end
692
+
693
+ describe "#describe_acl" do
694
+ it "describes acl of a transactional_id that does not exist" do
695
+ describe_acl_handle = admin.describe_acl(
696
+ resource_type: transactional_id_resource_type,
697
+ resource_name: non_existing_transactional_id,
698
+ resource_pattern_type: transactional_id_resource_pattern_type,
699
+ principal: transactional_id_principal,
700
+ host: transactional_id_host,
701
+ operation: transactional_id_operation,
702
+ permission_type: transactional_id_permission_type
703
+ )
704
+ describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
705
+ expect(describe_acl_handle[:response]).to eq(0)
706
+ expect(describe_acl_report.acls.size).to eq(0)
707
+ end
708
+
709
+ it "creates acls and describes the newly created transactional_id acls" do
710
+ # Create first ACL
711
+ create_acl_handle = admin.create_acl(
712
+ resource_type: transactional_id_resource_type,
713
+ resource_name: "test_transactional_id_1",
714
+ resource_pattern_type: transactional_id_resource_pattern_type,
715
+ principal: transactional_id_principal,
716
+ host: transactional_id_host,
717
+ operation: transactional_id_operation,
718
+ permission_type: transactional_id_permission_type
719
+ )
720
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
721
+ expect(create_acl_report.rdkafka_response).to eq(0)
722
+ expect(create_acl_report.rdkafka_response_string).to eq("")
723
+
724
+ # Create second ACL
725
+ create_acl_handle = admin.create_acl(
726
+ resource_type: transactional_id_resource_type,
727
+ resource_name: "test_transactional_id_2",
728
+ resource_pattern_type: transactional_id_resource_pattern_type,
729
+ principal: transactional_id_principal,
730
+ host: transactional_id_host,
731
+ operation: transactional_id_operation,
732
+ permission_type: transactional_id_permission_type
733
+ )
734
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
735
+ expect(create_acl_report.rdkafka_response).to eq(0)
736
+ expect(create_acl_report.rdkafka_response_string).to eq("")
737
+
738
+ # Since we create and immediately check, this is slow on loaded CIs, hence we wait
739
+ sleep(2)
740
+
741
+ # Describe ACLs - filter by transactional_id resource type
742
+ describe_acl_handle = admin.describe_acl(
743
+ resource_type: transactional_id_resource_type,
744
+ resource_name: nil,
745
+ resource_pattern_type: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY,
746
+ principal: transactional_id_principal,
747
+ host: transactional_id_host,
748
+ operation: transactional_id_operation,
749
+ permission_type: transactional_id_permission_type
750
+ )
751
+ describe_acl_report = describe_acl_handle.wait(max_wait_timeout: 15.0)
752
+ expect(describe_acl_handle[:response]).to eq(0)
753
+ expect(describe_acl_report.acls.length).to eq(2)
754
+ end
755
+ end
756
+
757
+ describe "#delete_acl" do
758
+ it "deletes acl of a transactional_id that does not exist" do
759
+ delete_acl_handle = admin.delete_acl(
760
+ resource_type: transactional_id_resource_type,
761
+ resource_name: non_existing_transactional_id,
762
+ resource_pattern_type: transactional_id_resource_pattern_type,
763
+ principal: transactional_id_principal,
764
+ host: transactional_id_host,
765
+ operation: transactional_id_operation,
766
+ permission_type: transactional_id_permission_type
767
+ )
768
+ delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
769
+ expect(delete_acl_handle[:response]).to eq(0)
770
+ expect(delete_acl_report.deleted_acls.size).to eq(0)
771
+ end
772
+
773
+ it "creates transactional_id acls and deletes the newly created acls" do
774
+ # Create first ACL
775
+ create_acl_handle = admin.create_acl(
776
+ resource_type: transactional_id_resource_type,
777
+ resource_name: "test_transactional_id_1",
778
+ resource_pattern_type: transactional_id_resource_pattern_type,
779
+ principal: transactional_id_principal,
780
+ host: transactional_id_host,
781
+ operation: transactional_id_operation,
782
+ permission_type: transactional_id_permission_type
783
+ )
784
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
785
+ expect(create_acl_report.rdkafka_response).to eq(0)
786
+ expect(create_acl_report.rdkafka_response_string).to eq("")
787
+
788
+ # Create second ACL
789
+ create_acl_handle = admin.create_acl(
790
+ resource_type: transactional_id_resource_type,
791
+ resource_name: "test_transactional_id_2",
792
+ resource_pattern_type: transactional_id_resource_pattern_type,
793
+ principal: transactional_id_principal,
794
+ host: transactional_id_host,
795
+ operation: transactional_id_operation,
796
+ permission_type: transactional_id_permission_type
797
+ )
798
+ create_acl_report = create_acl_handle.wait(max_wait_timeout: 15.0)
799
+ expect(create_acl_report.rdkafka_response).to eq(0)
800
+ expect(create_acl_report.rdkafka_response_string).to eq("")
801
+
802
+ # Delete ACLs - resource_name nil to delete all ACLs with any resource name and matching all other filters
803
+ delete_acl_handle = admin.delete_acl(
804
+ resource_type: transactional_id_resource_type,
805
+ resource_name: nil,
806
+ resource_pattern_type: transactional_id_resource_pattern_type,
807
+ principal: transactional_id_principal,
808
+ host: transactional_id_host,
809
+ operation: transactional_id_operation,
810
+ permission_type: transactional_id_permission_type
811
+ )
812
+ delete_acl_report = delete_acl_handle.wait(max_wait_timeout: 15.0)
813
+ expect(delete_acl_handle[:response]).to eq(0)
814
+ expect(delete_acl_report.deleted_acls.length).to eq(2)
815
+ end
816
+ end
817
+ end
818
+
618
819
  describe('Group tests') do
619
820
  describe "#delete_group" do
620
821
  describe("with an existing group") do
@@ -675,7 +876,17 @@ describe Rdkafka::Admin do
675
876
  end
676
877
 
677
878
  describe '#create_partitions' do
678
- let(:metadata) { admin.metadata(topic_name).topics.first }
879
+ let(:metadata) do
880
+ begin
881
+ admin.metadata(topic_name).topics.first
882
+ rescue Rdkafka::RdkafkaError
883
+ # We have to wait because if we query too fast after topic creation request, it may not
884
+ # yet be available throwing an error.
885
+ # This occurs mostly on slow CIs
886
+ sleep(1)
887
+ admin.metadata(topic_name).topics.first
888
+ end
889
+ end
679
890
 
680
891
  context 'when topic does not exist' do
681
892
  it 'expect to fail due to unknown partition' do
@@ -697,6 +908,8 @@ describe Rdkafka::Admin do
697
908
 
698
909
  it 'expect not to change number of partitions' do
699
910
  expect { admin.create_partitions(topic_name, 2).wait }.to raise_error(Rdkafka::RdkafkaError, /invalid_partitions/)
911
+ # On slow CI this may propagate, thus we wait a bit
912
+ sleep(1)
700
913
  expect(metadata[:partition_count]).to eq(5)
701
914
  end
702
915
  end
@@ -77,30 +77,6 @@ describe Rdkafka::Bindings do
77
77
  end
78
78
  end
79
79
 
80
- describe "partitioner" do
81
- let(:partition_key) { ('a'..'z').to_a.shuffle.take(15).join('') }
82
- let(:partition_count) { rand(50) + 1 }
83
-
84
- it "should return the same partition for a similar string and the same partition count" do
85
- result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
86
- result_2 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
87
- expect(result_1).to eq(result_2)
88
- end
89
-
90
- it "should match the old partitioner" do
91
- result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count)
92
- result_2 = (Zlib.crc32(partition_key) % partition_count)
93
- expect(result_1).to eq(result_2)
94
- end
95
-
96
- it "should return the partition calculated by the specified partitioner" do
97
- result_1 = Rdkafka::Bindings.partitioner(partition_key, partition_count, "murmur2")
98
- ptr = FFI::MemoryPointer.from_string(partition_key)
99
- result_2 = Rdkafka::Bindings.rd_kafka_msg_partitioner_murmur2(nil, ptr, partition_key.size, partition_count, nil, nil)
100
- expect(result_1).to eq(result_2)
101
- end
102
- end
103
-
104
80
  describe "stats callback" do
105
81
  context "without a stats callback" do
106
82
  it "should do nothing" do
@@ -159,7 +159,7 @@ describe Rdkafka::Config do
159
159
 
160
160
  it "should use default configuration" do
161
161
  config = Rdkafka::Config.new
162
- expect(config[:"api.version.request"]).to eq true
162
+ expect(config[:"api.version.request"]).to eq nil
163
163
  end
164
164
 
165
165
  it "should create a consumer with valid config" do
@@ -175,6 +175,7 @@ describe Rdkafka::Consumer do
175
175
  before do
176
176
  admin = rdkafka_producer_config.admin
177
177
  admin.create_topic(topic, 1, 1).wait
178
+ wait_for_topic(admin, topic)
178
179
  admin.close
179
180
  end
180
181
 
@@ -278,6 +279,7 @@ describe Rdkafka::Consumer do
278
279
  before do
279
280
  admin = rdkafka_producer_config.admin
280
281
  admin.create_topic(topic, 1, 1).wait
282
+ wait_for_topic(admin, topic)
281
283
  admin.close
282
284
  end
283
285
 
@@ -426,7 +428,7 @@ describe Rdkafka::Consumer do
426
428
  describe '#assignment_lost?' do
427
429
  it "should not return true as we do have an assignment" do
428
430
  consumer.subscribe("consume_test_topic")
429
- expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
431
+ Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
430
432
  list.add_topic("consume_test_topic")
431
433
  end
432
434
 
@@ -436,7 +438,7 @@ describe Rdkafka::Consumer do
436
438
 
437
439
  it "should not return true after voluntary unsubscribing" do
438
440
  consumer.subscribe("consume_test_topic")
439
- expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
441
+ Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
440
442
  list.add_topic("consume_test_topic")
441
443
  end
442
444
 
@@ -1276,7 +1278,6 @@ describe Rdkafka::Consumer do
1276
1278
 
1277
1279
  # Consume to the end
1278
1280
  consumer.subscribe("consume_test_topic")
1279
- eof_count = 0
1280
1281
  eof_error = nil
1281
1282
 
1282
1283
  loop do
@@ -1291,9 +1292,52 @@ describe Rdkafka::Consumer do
1291
1292
  end
1292
1293
 
1293
1294
  expect(eof_error.code).to eq(:partition_eof)
1294
- expect(eof_error.details[:topic]).to eq('consume_test_topic')
1295
- expect(eof_error.details[:partition]).to be_a(Integer)
1296
- expect(eof_error.details[:offset]).to be_a(Integer)
1295
+ end
1296
+ end
1297
+
1298
+ describe "long running consumption" do
1299
+ let(:consumer) { rdkafka_consumer_config.consumer }
1300
+ let(:producer) { rdkafka_producer_config.producer }
1301
+
1302
+ after { consumer.close }
1303
+ after { producer.close }
1304
+
1305
+ it "should consume messages continuously for 60 seconds" do
1306
+ consumer.subscribe("consume_test_topic")
1307
+ wait_for_assignment(consumer)
1308
+
1309
+ messages_consumed = 0
1310
+ start_time = Time.now
1311
+
1312
+ # Producer thread - sends message every second
1313
+ producer_thread = Thread.new do
1314
+ counter = 0
1315
+ while Time.now - start_time < 60
1316
+ producer.produce(
1317
+ topic: "consume_test_topic",
1318
+ payload: "payload #{counter}",
1319
+ key: "key #{counter}",
1320
+ partition: 0
1321
+ ).wait
1322
+ counter += 1
1323
+ sleep(1)
1324
+ end
1325
+ end
1326
+
1327
+ # Consumer loop
1328
+ while Time.now - start_time < 60
1329
+ message = consumer.poll(1000)
1330
+ if message
1331
+ expect(message).to be_a Rdkafka::Consumer::Message
1332
+ expect(message.topic).to eq("consume_test_topic")
1333
+ messages_consumed += 1
1334
+ consumer.commit if messages_consumed % 10 == 0
1335
+ end
1336
+ end
1337
+
1338
+ producer_thread.join
1339
+
1340
+ expect(messages_consumed).to be > 50 # Should consume most messages
1297
1341
  end
1298
1342
  end
1299
1343
  end
@@ -31,7 +31,7 @@ describe Rdkafka::Metadata do
31
31
  expect(subject.brokers.length).to eq(1)
32
32
  expect(subject.brokers[0][:broker_id]).to eq(1)
33
33
  expect(%w[127.0.0.1 localhost]).to include(subject.brokers[0][:broker_name])
34
- expect(subject.brokers[0][:broker_port]).to eq(9092)
34
+ expect(subject.brokers[0][:broker_port]).to eq(rdkafka_base_config[:'bootstrap.servers'].split(':').last.to_i)
35
35
  end
36
36
 
37
37
  it "#topics returns data on our test topic" do
@@ -54,7 +54,7 @@ describe Rdkafka::Metadata do
54
54
  expect(subject.brokers.length).to eq(1)
55
55
  expect(subject.brokers[0][:broker_id]).to eq(1)
56
56
  expect(%w[127.0.0.1 localhost]).to include(subject.brokers[0][:broker_name])
57
- expect(subject.brokers[0][:broker_port]).to eq(9092)
57
+ expect(subject.brokers[0][:broker_port]).to eq(rdkafka_base_config[:'bootstrap.servers'].split(':').last.to_i)
58
58
  end
59
59
 
60
60
  it "#topics returns data about all of our test topics" do
@@ -20,6 +20,6 @@ describe Rdkafka::Producer::DeliveryReport do
20
20
  end
21
21
 
22
22
  it "should get the error" do
23
- expect(subject.error).to eq -1
23
+ expect(subject.error).to eq(-1)
24
24
  end
25
25
  end