rdkafka 0.19.0 → 0.20.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +29 -2
- data/.ruby-version +1 -1
- data/CHANGELOG.md +7 -0
- data/README.md +1 -0
- data/dist/librdkafka-2.6.1.tar.gz +0 -0
- data/dist/patches/rdkafka_global_init.patch +15 -0
- data/docker-compose.yml +1 -3
- data/ext/Rakefile +6 -6
- data/lib/rdkafka/bindings.rb +5 -0
- data/lib/rdkafka/consumer.rb +16 -80
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +2 -0
- data/spec/rdkafka/consumer/topic_partition_list_spec.rb +8 -2
- data/spec/rdkafka/consumer_spec.rb +4 -231
- data.tar.gz.sig +0 -0
- metadata +5 -9
- metadata.gz.sig +0 -0
- data/dist/librdkafka_2.5.3.tar.gz +0 -0
- data/dist/patches/rdkafka_sticky_assignor.c.patch +0 -26
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1be38bd2e19d5ef4a8ddbe9a884ab1659448e240975d3aff8805529206ed7764
|
4
|
+
data.tar.gz: 4e5c9662def495ceccdb05fd29594568286b980c2a3c57f9cc405fec955913d5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d6305b9bc8baf478da702f92678b39089ba10f987b8dee0c71d0be9b2799f3060d1f11893428cee0471b331f640627252888e5fe3be06849712b87e7bc2452a4
|
7
|
+
data.tar.gz: 351fc0bf7dcedc63075bb62b765d7db846979146dcb49b7494eb102bf2630799d4a5da76a02b08499a3061e1efbddaab6c965b22feeb50832f5a09f33bcd9801
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/.github/workflows/ci.yml
CHANGED
@@ -22,12 +22,12 @@ jobs:
|
|
22
22
|
fail-fast: false
|
23
23
|
matrix:
|
24
24
|
ruby:
|
25
|
-
- '3.4
|
25
|
+
- '3.4'
|
26
26
|
- '3.3'
|
27
27
|
- '3.2'
|
28
28
|
- '3.1'
|
29
29
|
include:
|
30
|
-
- ruby: '3.
|
30
|
+
- ruby: '3.4'
|
31
31
|
coverage: 'true'
|
32
32
|
steps:
|
33
33
|
- uses: actions/checkout@v4
|
@@ -54,3 +54,30 @@ jobs:
|
|
54
54
|
cd ext && bundle exec rake
|
55
55
|
cd ..
|
56
56
|
bundle exec rspec
|
57
|
+
|
58
|
+
|
59
|
+
macos_build:
|
60
|
+
timeout-minutes: 30
|
61
|
+
runs-on: macos-latest
|
62
|
+
strategy:
|
63
|
+
fail-fast: false
|
64
|
+
matrix:
|
65
|
+
ruby:
|
66
|
+
- '3.4'
|
67
|
+
- '3.3'
|
68
|
+
- '3.2'
|
69
|
+
- '3.1'
|
70
|
+
steps:
|
71
|
+
- uses: actions/checkout@v4
|
72
|
+
|
73
|
+
- name: Set up Ruby
|
74
|
+
uses: ruby/setup-ruby@v1
|
75
|
+
with:
|
76
|
+
ruby-version: ${{matrix.ruby}}
|
77
|
+
bundler-cache: false
|
78
|
+
|
79
|
+
- name: Build rdkafka-ruby
|
80
|
+
run: |
|
81
|
+
set -e
|
82
|
+
bundle install --path vendor/bundle
|
83
|
+
cd ext && bundle exec rake
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.
|
1
|
+
3.4.1
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,12 @@
|
|
1
1
|
# Rdkafka Changelog
|
2
2
|
|
3
|
+
## 0.20.0 (2025-01-07)
|
4
|
+
- **[Breaking]** Deprecate and remove `#each_batch` due to data consistency concerns.
|
5
|
+
- [Enhancement] Bump librdkafka to `2.6.1`
|
6
|
+
- [Enhancement] Expose `rd_kafka_global_init` to mitigate macos forking issues.
|
7
|
+
- [Enhancement] Avoid clobbering LDFLAGS and CPPFLAGS if in a nix prepared environment (secobarbital).
|
8
|
+
- [Patch] Retire no longer needed cooperative-sticky patch.
|
9
|
+
|
3
10
|
## 0.19.0 (2024-10-01)
|
4
11
|
- **[Breaking]** Drop Ruby 3.0 support
|
5
12
|
- [Enhancement] Update `librdkafka` to `2.5.3`
|
data/README.md
CHANGED
@@ -163,6 +163,7 @@ bundle exec rake produce_messages
|
|
163
163
|
|
164
164
|
| rdkafka-ruby | librdkafka | patches |
|
165
165
|
|-|-|-|
|
166
|
+
| 0.20.0 (2025-01-07) | 2.6.1 (2024-11-18) | yes |
|
166
167
|
| 0.19.0 (2024-10-01) | 2.5.3 (2024-09-02) | yes |
|
167
168
|
| 0.18.0 (2024-09-02) | 2.5.0 (2024-06-10) | yes |
|
168
169
|
| 0.17.0 (2024-08-03) | 2.4.0 (2024-05-07) | no |
|
Binary file
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# This patch is released under the 2-clause BSD license, same as librdkafka
|
2
|
+
#
|
3
|
+
--- librdkafka_2.5.3/src/rdkafka.h
|
4
|
+
+++ librdkafka_2.5.3/src/rdkafka.h
|
5
|
+
@@ -1101,6 +1101,10 @@ rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(
|
6
|
+
const char *topic,
|
7
|
+
int32_t partition);
|
8
|
+
|
9
|
+
+/**
|
10
|
+
+ * @brief Allows for the global init of rdkafka when required in the Ruby process.
|
11
|
+
+ */
|
12
|
+
+RD_EXPORT void rd_kafka_global_init(void);
|
13
|
+
|
14
|
+
/**
|
15
|
+
* @brief Sort list using comparator \p cmp.
|
data/docker-compose.yml
CHANGED
data/ext/Rakefile
CHANGED
@@ -16,17 +16,17 @@ task :default => :clean do
|
|
16
16
|
require "mini_portile2"
|
17
17
|
recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
|
18
18
|
|
19
|
-
# Use default homebrew openssl if we're on mac and the directory exists
|
20
|
-
# and each of flags is not
|
21
|
-
if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
|
22
|
-
ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV
|
23
|
-
ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV
|
19
|
+
# Use default homebrew openssl if we're on mac and the directory exists, is not using nix-prepared libraries
|
20
|
+
# and each of flags is not already set
|
21
|
+
if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}") && ENV.key?("NIX_LDFLAGS")
|
22
|
+
ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV.key?("CPPFLAGS")
|
23
|
+
ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV.key?("LDFLAGS")
|
24
24
|
end
|
25
25
|
|
26
26
|
releases = File.expand_path(File.join(File.dirname(__FILE__), '../dist'))
|
27
27
|
|
28
28
|
recipe.files << {
|
29
|
-
:url => "file://#{releases}/
|
29
|
+
:url => "file://#{releases}/librdkafka-#{Rdkafka::LIBRDKAFKA_VERSION}.tar.gz",
|
30
30
|
:sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
|
31
31
|
}
|
32
32
|
recipe.configure_options = ["--host=#{recipe.host}"]
|
data/lib/rdkafka/bindings.rb
CHANGED
@@ -30,6 +30,11 @@ module Rdkafka
|
|
30
30
|
layout :value, :size_t
|
31
31
|
end
|
32
32
|
|
33
|
+
# This function comes from our patch on top of librdkafka. It allows os to load all the
|
34
|
+
# librdkafka components without initializing the client
|
35
|
+
# @see https://github.com/confluentinc/librdkafka/issues/4590
|
36
|
+
attach_function :rd_kafka_global_init, [], :void
|
37
|
+
|
33
38
|
# Polling
|
34
39
|
|
35
40
|
attach_function :rd_kafka_flush, [:pointer, :int], :int, blocking: true
|
data/lib/rdkafka/consumer.rb
CHANGED
@@ -609,87 +609,23 @@ module Rdkafka
|
|
609
609
|
end
|
610
610
|
end
|
611
611
|
|
612
|
-
#
|
613
|
-
# messages from more than one partition.
|
614
|
-
#
|
615
|
-
# Rather than yield each message immediately as soon as it is received,
|
616
|
-
# each_batch will attempt to wait for as long as `timeout_ms` in order
|
617
|
-
# to create a batch of up to but no more than `max_items` in size.
|
618
|
-
#
|
619
|
-
# Said differently, if more than `max_items` are available within
|
620
|
-
# `timeout_ms`, then `each_batch` will yield early with `max_items` in the
|
621
|
-
# array, but if `timeout_ms` passes by with fewer messages arriving, it
|
622
|
-
# will yield an array of fewer messages, quite possibly zero.
|
623
|
-
#
|
624
|
-
# In order to prevent wrongly auto committing many messages at once across
|
625
|
-
# possibly many partitions, callers must explicitly indicate which messages
|
626
|
-
# have been successfully processed as some consumed messages may not have
|
627
|
-
# been yielded yet. To do this, the caller should set
|
628
|
-
# `enable.auto.offset.store` to false and pass processed messages to
|
629
|
-
# {store_offset}. It is also possible, though more complex, to set
|
630
|
-
# 'enable.auto.commit' to false and then pass a manually assembled
|
631
|
-
# TopicPartitionList to {commit}.
|
632
|
-
#
|
633
|
-
# As with `each`, iteration will end when the consumer is closed.
|
634
|
-
#
|
635
|
-
# Exception behavior is more complicated than with `each`, in that if
|
636
|
-
# :yield_on_error is true, and an exception is raised during the
|
637
|
-
# poll, and messages have already been received, they will be yielded to
|
638
|
-
# the caller before the exception is allowed to propagate.
|
639
|
-
#
|
640
|
-
# If you are setting either auto.commit or auto.offset.store to false in
|
641
|
-
# the consumer configuration, then you should let yield_on_error keep its
|
642
|
-
# default value of false because you are guaranteed to see these messages
|
643
|
-
# again. However, if both auto.commit and auto.offset.store are set to
|
644
|
-
# true, you should set yield_on_error to true so you can process messages
|
645
|
-
# that you may or may not see again.
|
646
|
-
#
|
647
|
-
# @param max_items [Integer] Maximum size of the yielded array of messages
|
648
|
-
# @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
|
649
|
-
# @param timeout_ms [Integer] max time to wait for up to max_items
|
650
|
-
#
|
651
|
-
# @yieldparam messages [Array] An array of received Message
|
652
|
-
# @yieldparam pending_exception [Exception] normally nil, or an exception
|
653
|
-
#
|
654
|
-
# @yield [messages, pending_exception]
|
655
|
-
# which will be propagated after processing of the partial batch is complete.
|
656
|
-
#
|
657
|
-
# @return [nil]
|
658
|
-
#
|
659
|
-
# @raise [RdkafkaError] When polling fails
|
612
|
+
# Deprecated. Please read the error message for more details.
|
660
613
|
def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
rescue Rdkafka::RdkafkaError => error
|
677
|
-
raise unless yield_on_error
|
678
|
-
raise if slice.empty?
|
679
|
-
yield slice.dup, error
|
680
|
-
raise
|
681
|
-
end
|
682
|
-
if message
|
683
|
-
slice << message
|
684
|
-
bytes += message.payload.bytesize if message.payload
|
685
|
-
end
|
686
|
-
if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
|
687
|
-
yield slice.dup, nil
|
688
|
-
slice.clear
|
689
|
-
bytes = 0
|
690
|
-
end_time = monotonic_now + timeout_ms / 1000.0
|
691
|
-
end
|
692
|
-
end
|
614
|
+
raise NotImplementedError, <<~ERROR
|
615
|
+
`each_batch` has been removed due to data consistency concerns.
|
616
|
+
|
617
|
+
This method was removed because it did not properly handle partition reassignments,
|
618
|
+
which could lead to processing messages from partitions that were no longer owned
|
619
|
+
by this consumer, resulting in duplicate message processing and data inconsistencies.
|
620
|
+
|
621
|
+
Recommended alternatives:
|
622
|
+
|
623
|
+
1. Implement your own batching logic using rebalance callbacks to properly handle
|
624
|
+
partition revocations and ensure message processing correctness.
|
625
|
+
|
626
|
+
2. Use a high-level batching library that supports proper partition reassignment
|
627
|
+
handling out of the box (such as the Karafka framework).
|
628
|
+
ERROR
|
693
629
|
end
|
694
630
|
|
695
631
|
# Returns pointer to the consumer group metadata. It is used only in the context of
|
data/lib/rdkafka/version.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Rdkafka
|
4
|
-
VERSION = "0.
|
5
|
-
LIBRDKAFKA_VERSION = "2.
|
6
|
-
LIBRDKAFKA_SOURCE_SHA256 = "
|
4
|
+
VERSION = "0.20.0"
|
5
|
+
LIBRDKAFKA_VERSION = "2.6.1"
|
6
|
+
LIBRDKAFKA_SOURCE_SHA256 = "0ddf205ad8d36af0bc72a2fec20639ea02e1d583e353163bf7f4683d949e901b"
|
7
7
|
end
|
data/lib/rdkafka.rb
CHANGED
@@ -114,12 +114,18 @@ describe Rdkafka::Consumer::TopicPartitionList do
|
|
114
114
|
end
|
115
115
|
|
116
116
|
describe "#to_s" do
|
117
|
+
let(:expected) do
|
118
|
+
if RUBY_VERSION >= '3.4.0'
|
119
|
+
"<TopicPartitionList: {\"topic1\" => [<Partition 0>, <Partition 1>]}>"
|
120
|
+
else
|
121
|
+
"<TopicPartitionList: {\"topic1\"=>[<Partition 0>, <Partition 1>]}>"
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
117
125
|
it "should return a human readable representation" do
|
118
126
|
list = Rdkafka::Consumer::TopicPartitionList.new
|
119
127
|
list.add_topic("topic1", [0, 1])
|
120
128
|
|
121
|
-
expected = "<TopicPartitionList: {\"topic1\"=>[<Partition 0>, <Partition 1>]}>"
|
122
|
-
|
123
129
|
expect(list.to_s).to eq expected
|
124
130
|
end
|
125
131
|
end
|
@@ -921,236 +921,10 @@ describe Rdkafka::Consumer do
|
|
921
921
|
end
|
922
922
|
|
923
923
|
describe "#each_batch" do
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
end
|
929
|
-
|
930
|
-
after do
|
931
|
-
@topic = nil
|
932
|
-
end
|
933
|
-
|
934
|
-
def topic_name
|
935
|
-
@topic
|
936
|
-
end
|
937
|
-
|
938
|
-
def produce_n(n)
|
939
|
-
handles = []
|
940
|
-
n.times do |i|
|
941
|
-
handles << producer.produce(
|
942
|
-
topic: topic_name,
|
943
|
-
payload: i % 10 == 0 ? nil : Time.new.to_f.to_s,
|
944
|
-
key: i.to_s,
|
945
|
-
partition: 0
|
946
|
-
)
|
947
|
-
end
|
948
|
-
handles.each(&:wait)
|
949
|
-
end
|
950
|
-
|
951
|
-
def new_message
|
952
|
-
instance_double("Rdkafka::Consumer::Message").tap do |message|
|
953
|
-
allow(message).to receive(:payload).and_return(message_payload)
|
954
|
-
end
|
955
|
-
end
|
956
|
-
|
957
|
-
it "retrieves messages produced into a topic" do
|
958
|
-
# This is the only each_batch test that actually produces real messages
|
959
|
-
# into a topic in the real kafka of the container.
|
960
|
-
#
|
961
|
-
# The other tests stub 'poll' which makes them faster and more reliable,
|
962
|
-
# but it makes sense to keep a single test with a fully integrated flow.
|
963
|
-
# This will help to catch breaking changes in the behavior of 'poll',
|
964
|
-
# libdrkafka, or Kafka.
|
965
|
-
#
|
966
|
-
# This is, in effect, an integration test and the subsequent specs are
|
967
|
-
# unit tests.
|
968
|
-
admin = rdkafka_config.admin
|
969
|
-
create_topic_handle = admin.create_topic(topic_name, 1, 1)
|
970
|
-
create_topic_handle.wait(max_wait_timeout: 15.0)
|
971
|
-
consumer.subscribe(topic_name)
|
972
|
-
produce_n 42
|
973
|
-
all_yields = []
|
974
|
-
consumer.each_batch(max_items: 10) do |batch|
|
975
|
-
all_yields << batch
|
976
|
-
break if all_yields.flatten.size >= 42
|
977
|
-
end
|
978
|
-
expect(all_yields.flatten.first).to be_a Rdkafka::Consumer::Message
|
979
|
-
expect(all_yields.flatten.size).to eq 42
|
980
|
-
expect(all_yields.size).to be > 4
|
981
|
-
expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
|
982
|
-
admin.close
|
983
|
-
end
|
984
|
-
|
985
|
-
it "should batch poll results and yield arrays of messages" do
|
986
|
-
consumer.subscribe(topic_name)
|
987
|
-
all_yields = []
|
988
|
-
expect(consumer)
|
989
|
-
.to receive(:poll)
|
990
|
-
.exactly(10).times
|
991
|
-
.and_return(new_message)
|
992
|
-
consumer.each_batch(max_items: 10) do |batch|
|
993
|
-
all_yields << batch
|
994
|
-
break if all_yields.flatten.size >= 10
|
995
|
-
end
|
996
|
-
expect(all_yields.first).to be_instance_of(Array)
|
997
|
-
expect(all_yields.flatten.size).to eq 10
|
998
|
-
non_empty_yields = all_yields.reject { |batch| batch.empty? }
|
999
|
-
expect(non_empty_yields.size).to be < 10
|
1000
|
-
end
|
1001
|
-
|
1002
|
-
it "should yield a partial batch if the timeout is hit with some messages" do
|
1003
|
-
consumer.subscribe(topic_name)
|
1004
|
-
poll_count = 0
|
1005
|
-
expect(consumer)
|
1006
|
-
.to receive(:poll)
|
1007
|
-
.at_least(3).times do
|
1008
|
-
poll_count = poll_count + 1
|
1009
|
-
if poll_count > 2
|
1010
|
-
sleep 0.1
|
1011
|
-
nil
|
1012
|
-
else
|
1013
|
-
new_message
|
1014
|
-
end
|
1015
|
-
end
|
1016
|
-
all_yields = []
|
1017
|
-
consumer.each_batch(max_items: 10) do |batch|
|
1018
|
-
all_yields << batch
|
1019
|
-
break if all_yields.flatten.size >= 2
|
1020
|
-
end
|
1021
|
-
expect(all_yields.flatten.size).to eq 2
|
1022
|
-
end
|
1023
|
-
|
1024
|
-
it "should yield [] if nothing is received before the timeout" do
|
1025
|
-
admin = rdkafka_config.admin
|
1026
|
-
create_topic_handle = admin.create_topic(topic_name, 1, 1)
|
1027
|
-
create_topic_handle.wait(max_wait_timeout: 15.0)
|
1028
|
-
consumer.subscribe(topic_name)
|
1029
|
-
consumer.each_batch do |batch|
|
1030
|
-
expect(batch).to eq([])
|
1031
|
-
break
|
1032
|
-
end
|
1033
|
-
admin.close
|
1034
|
-
end
|
1035
|
-
|
1036
|
-
it "should yield batchs of max_items in size if messages are already fetched" do
|
1037
|
-
yielded_batches = []
|
1038
|
-
expect(consumer)
|
1039
|
-
.to receive(:poll)
|
1040
|
-
.with(anything)
|
1041
|
-
.exactly(20).times
|
1042
|
-
.and_return(new_message)
|
1043
|
-
|
1044
|
-
consumer.each_batch(max_items: 10, timeout_ms: 500) do |batch|
|
1045
|
-
yielded_batches << batch
|
1046
|
-
break if yielded_batches.flatten.size >= 20
|
1047
|
-
break if yielded_batches.size >= 20 # so failure doesn't hang
|
1048
|
-
end
|
1049
|
-
expect(yielded_batches.size).to eq 2
|
1050
|
-
expect(yielded_batches.map(&:size)).to eq 2.times.map { 10 }
|
1051
|
-
end
|
1052
|
-
|
1053
|
-
it "should yield batchs as soon as bytes_threshold is hit" do
|
1054
|
-
yielded_batches = []
|
1055
|
-
expect(consumer)
|
1056
|
-
.to receive(:poll)
|
1057
|
-
.with(anything)
|
1058
|
-
.exactly(20).times
|
1059
|
-
.and_return(new_message)
|
1060
|
-
|
1061
|
-
consumer.each_batch(bytes_threshold: message_payload.size * 4, timeout_ms: 500) do |batch|
|
1062
|
-
yielded_batches << batch
|
1063
|
-
break if yielded_batches.flatten.size >= 20
|
1064
|
-
break if yielded_batches.size >= 20 # so failure doesn't hang
|
1065
|
-
end
|
1066
|
-
expect(yielded_batches.size).to eq 5
|
1067
|
-
expect(yielded_batches.map(&:size)).to eq 5.times.map { 4 }
|
1068
|
-
end
|
1069
|
-
|
1070
|
-
context "error raised from poll and yield_on_error is true" do
|
1071
|
-
it "should yield buffered exceptions on rebalance, then break" do
|
1072
|
-
config = rdkafka_consumer_config(
|
1073
|
-
{
|
1074
|
-
:"enable.auto.commit" => false,
|
1075
|
-
:"enable.auto.offset.store" => false
|
1076
|
-
}
|
1077
|
-
)
|
1078
|
-
consumer = config.consumer
|
1079
|
-
consumer.subscribe(topic_name)
|
1080
|
-
batches_yielded = []
|
1081
|
-
exceptions_yielded = []
|
1082
|
-
each_batch_iterations = 0
|
1083
|
-
poll_count = 0
|
1084
|
-
expect(consumer)
|
1085
|
-
.to receive(:poll)
|
1086
|
-
.with(anything)
|
1087
|
-
.exactly(3).times
|
1088
|
-
.and_wrap_original do |method, *args|
|
1089
|
-
poll_count = poll_count + 1
|
1090
|
-
if poll_count == 3
|
1091
|
-
raise Rdkafka::RdkafkaError.new(27,
|
1092
|
-
"partitions ... too ... heavy ... must ... rebalance")
|
1093
|
-
else
|
1094
|
-
new_message
|
1095
|
-
end
|
1096
|
-
end
|
1097
|
-
expect {
|
1098
|
-
consumer.each_batch(max_items: 30, yield_on_error: true) do |batch, pending_error|
|
1099
|
-
batches_yielded << batch
|
1100
|
-
exceptions_yielded << pending_error
|
1101
|
-
each_batch_iterations = each_batch_iterations + 1
|
1102
|
-
end
|
1103
|
-
}.to raise_error(Rdkafka::RdkafkaError)
|
1104
|
-
expect(poll_count).to eq 3
|
1105
|
-
expect(each_batch_iterations).to eq 1
|
1106
|
-
expect(batches_yielded.size).to eq 1
|
1107
|
-
expect(batches_yielded.first.size).to eq 2
|
1108
|
-
expect(exceptions_yielded.flatten.size).to eq 1
|
1109
|
-
expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
|
1110
|
-
consumer.close
|
1111
|
-
end
|
1112
|
-
end
|
1113
|
-
|
1114
|
-
context "error raised from poll and yield_on_error is false" do
|
1115
|
-
it "should yield buffered exceptions on rebalance, then break" do
|
1116
|
-
config = rdkafka_consumer_config(
|
1117
|
-
{
|
1118
|
-
:"enable.auto.commit" => false,
|
1119
|
-
:"enable.auto.offset.store" => false
|
1120
|
-
}
|
1121
|
-
)
|
1122
|
-
consumer = config.consumer
|
1123
|
-
consumer.subscribe(topic_name)
|
1124
|
-
batches_yielded = []
|
1125
|
-
exceptions_yielded = []
|
1126
|
-
each_batch_iterations = 0
|
1127
|
-
poll_count = 0
|
1128
|
-
expect(consumer)
|
1129
|
-
.to receive(:poll)
|
1130
|
-
.with(anything)
|
1131
|
-
.exactly(3).times
|
1132
|
-
.and_wrap_original do |method, *args|
|
1133
|
-
poll_count = poll_count + 1
|
1134
|
-
if poll_count == 3
|
1135
|
-
raise Rdkafka::RdkafkaError.new(27,
|
1136
|
-
"partitions ... too ... heavy ... must ... rebalance")
|
1137
|
-
else
|
1138
|
-
new_message
|
1139
|
-
end
|
1140
|
-
end
|
1141
|
-
expect {
|
1142
|
-
consumer.each_batch(max_items: 30, yield_on_error: false) do |batch, pending_error|
|
1143
|
-
batches_yielded << batch
|
1144
|
-
exceptions_yielded << pending_error
|
1145
|
-
each_batch_iterations = each_batch_iterations + 1
|
1146
|
-
end
|
1147
|
-
}.to raise_error(Rdkafka::RdkafkaError)
|
1148
|
-
expect(poll_count).to eq 3
|
1149
|
-
expect(each_batch_iterations).to eq 0
|
1150
|
-
expect(batches_yielded.size).to eq 0
|
1151
|
-
expect(exceptions_yielded.size).to eq 0
|
1152
|
-
consumer.close
|
1153
|
-
end
|
924
|
+
it 'expect to raise an error' do
|
925
|
+
expect do
|
926
|
+
consumer.each_batch {}
|
927
|
+
end.to raise_error(NotImplementedError)
|
1154
928
|
end
|
1155
929
|
end
|
1156
930
|
|
@@ -1317,7 +1091,6 @@ describe Rdkafka::Consumer do
|
|
1317
1091
|
{
|
1318
1092
|
:subscribe => [ nil ],
|
1319
1093
|
:unsubscribe => nil,
|
1320
|
-
:each_batch => nil,
|
1321
1094
|
:pause => [ nil ],
|
1322
1095
|
:resume => [ nil ],
|
1323
1096
|
:subscription => nil,
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,12 +1,11 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rdkafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.20.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Thijs Cadier
|
8
8
|
- Maciej Mensfeld
|
9
|
-
autorequire:
|
10
9
|
bindir: bin
|
11
10
|
cert_chain:
|
12
11
|
- |
|
@@ -36,7 +35,7 @@ cert_chain:
|
|
36
35
|
i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
|
37
36
|
ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
|
38
37
|
-----END CERTIFICATE-----
|
39
|
-
date:
|
38
|
+
date: 2025-01-07 00:00:00.000000000 Z
|
40
39
|
dependencies:
|
41
40
|
- !ruby/object:Gem::Dependency
|
42
41
|
name: ffi
|
@@ -186,8 +185,8 @@ files:
|
|
186
185
|
- README.md
|
187
186
|
- Rakefile
|
188
187
|
- certs/cert.pem
|
189
|
-
- dist/
|
190
|
-
- dist/patches/
|
188
|
+
- dist/librdkafka-2.6.1.tar.gz
|
189
|
+
- dist/patches/rdkafka_global_init.patch
|
191
190
|
- docker-compose.yml
|
192
191
|
- ext/README.md
|
193
192
|
- ext/Rakefile
|
@@ -261,7 +260,6 @@ files:
|
|
261
260
|
- spec/rdkafka/producer/delivery_report_spec.rb
|
262
261
|
- spec/rdkafka/producer_spec.rb
|
263
262
|
- spec/spec_helper.rb
|
264
|
-
homepage:
|
265
263
|
licenses:
|
266
264
|
- MIT
|
267
265
|
metadata:
|
@@ -272,7 +270,6 @@ metadata:
|
|
272
270
|
source_code_uri: https://github.com/karafka/rdkafka-ruby
|
273
271
|
documentation_uri: https://github.com/karafka/rdkafka-ruby/blob/main/README.md
|
274
272
|
rubygems_mfa_required: 'true'
|
275
|
-
post_install_message:
|
276
273
|
rdoc_options: []
|
277
274
|
require_paths:
|
278
275
|
- lib
|
@@ -287,8 +284,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
287
284
|
- !ruby/object:Gem::Version
|
288
285
|
version: '0'
|
289
286
|
requirements: []
|
290
|
-
rubygems_version: 3.
|
291
|
-
signing_key:
|
287
|
+
rubygems_version: 3.6.2
|
292
288
|
specification_version: 4
|
293
289
|
summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
|
294
290
|
It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
|
metadata.gz.sig
CHANGED
Binary file
|
Binary file
|
@@ -1,26 +0,0 @@
|
|
1
|
-
# This patch is released under the 2-clause BSD license, same as librdkafka
|
2
|
-
# Fixes: https://github.com/confluentinc/librdkafka/issues/4783
|
3
|
-
#
|
4
|
-
--- librdkafka_2.5.3/src/rdkafka_sticky_assignor.c 2024-07-08 09:47:43.000000000 +0200
|
5
|
-
+++ librdkafka_2.5.3/src/rdkafka_sticky_assignor.c 2024-07-30 09:44:38.529759640 +0200
|
6
|
-
@@ -769,7 +769,7 @@
|
7
|
-
const rd_kafka_topic_partition_list_t *partitions;
|
8
|
-
const char *consumer;
|
9
|
-
const rd_map_elem_t *elem;
|
10
|
-
- int i;
|
11
|
-
+ int i, j;
|
12
|
-
|
13
|
-
/* The assignment is balanced if minimum and maximum numbers of
|
14
|
-
* partitions assigned to consumers differ by at most one. */
|
15
|
-
@@ -836,9 +836,9 @@
|
16
|
-
|
17
|
-
/* Otherwise make sure it can't get any more partitions */
|
18
|
-
|
19
|
-
- for (i = 0; i < potentialTopicPartitions->cnt; i++) {
|
20
|
-
+ for (j = 0; j < potentialTopicPartitions->cnt; j++) {
|
21
|
-
const rd_kafka_topic_partition_t *partition =
|
22
|
-
- &potentialTopicPartitions->elems[i];
|
23
|
-
+ &potentialTopicPartitions->elems[j];
|
24
|
-
const char *otherConsumer;
|
25
|
-
int otherConsumerPartitionCount;
|
26
|
-
|