karafka-rdkafka 0.18.1 → 0.19.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/CODEOWNERS +3 -0
- data/.github/workflows/ci.yml +41 -8
- data/.github/workflows/verify-action-pins.yml +16 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +9 -0
- data/README.md +2 -0
- data/dist/{librdkafka-2.6.1.tar.gz → librdkafka-2.8.0.tar.gz} +0 -0
- data/docker-compose.yml +1 -3
- data/ext/Rakefile +5 -5
- data/lib/rdkafka/config.rb +8 -4
- data/lib/rdkafka/consumer/headers.rb +14 -3
- data/lib/rdkafka/consumer.rb +16 -80
- data/lib/rdkafka/native_kafka.rb +4 -2
- data/lib/rdkafka/producer.rb +18 -6
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +2 -0
- data/renovate.json +13 -1
- data/spec/rdkafka/consumer/headers_spec.rb +26 -10
- data/spec/rdkafka/consumer_spec.rb +4 -231
- data/spec/rdkafka/producer_spec.rb +40 -0
- data.tar.gz.sig +3 -3
- metadata +6 -8
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 390b5be16a78ebe2b6f994429cfe32e51bc11d88735ef28bfa57b9b8ba34d73e
|
4
|
+
data.tar.gz: 66003597faaddea33ab464aa3ec9dd906f3676f85e12e1e7c7bc2379e35feef4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b7856bee34f2d3d4abe28013d3a8905d4166067f5ce4a1cddc4dc6572d0ebfb03f412a3d4a53ac515a3523cbc8088774699da8fa2fad8d88272db4d08e2ec81e
|
7
|
+
data.tar.gz: 0d1fbbfce6be2bb41cfc7db66f53030e907f4c9c7f8e2251aabdeb7ca9786d120e22e971eebf426dbdd57dcb60c198506204cb2b4443333744c65339d9956746
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/.github/CODEOWNERS
ADDED
data/.github/workflows/ci.yml
CHANGED
@@ -6,9 +6,14 @@ concurrency:
|
|
6
6
|
|
7
7
|
on:
|
8
8
|
pull_request:
|
9
|
+
branches: [ main, master ]
|
9
10
|
push:
|
11
|
+
branches: [ main, master ]
|
10
12
|
schedule:
|
11
|
-
- cron:
|
13
|
+
- cron: '0 1 * * *'
|
14
|
+
|
15
|
+
permissions:
|
16
|
+
contents: read
|
12
17
|
|
13
18
|
env:
|
14
19
|
BUNDLE_RETRY: 6
|
@@ -22,24 +27,27 @@ jobs:
|
|
22
27
|
fail-fast: false
|
23
28
|
matrix:
|
24
29
|
ruby:
|
25
|
-
- '3.4
|
30
|
+
- '3.4'
|
26
31
|
- '3.3'
|
27
32
|
- '3.2'
|
28
33
|
- '3.1'
|
29
34
|
include:
|
30
|
-
- ruby: '3.
|
35
|
+
- ruby: '3.4'
|
31
36
|
coverage: 'true'
|
32
37
|
steps:
|
33
|
-
- uses: actions/checkout@v4
|
38
|
+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
39
|
+
with:
|
40
|
+
fetch-depth: 0
|
41
|
+
|
34
42
|
- name: Install package dependencies
|
35
43
|
run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
|
36
44
|
|
37
|
-
- name: Start Kafka with
|
45
|
+
- name: Start Kafka with Docker Compose
|
38
46
|
run: |
|
39
47
|
docker compose up -d || (sleep 5 && docker compose up -d)
|
40
48
|
|
41
49
|
- name: Set up Ruby
|
42
|
-
uses: ruby/setup-ruby@v1
|
50
|
+
uses: ruby/setup-ruby@354a1ad156761f5ee2b7b13fa8e09943a5e8d252 # v1.229.0
|
43
51
|
with:
|
44
52
|
ruby-version: ${{matrix.ruby}}
|
45
53
|
bundler-cache: true
|
@@ -47,10 +55,35 @@ jobs:
|
|
47
55
|
- name: Run all specs
|
48
56
|
env:
|
49
57
|
GITHUB_COVERAGE: ${{matrix.coverage}}
|
50
|
-
|
51
58
|
run: |
|
52
59
|
set -e
|
53
|
-
bundle install --
|
60
|
+
bundle install --jobs 4 --retry 3
|
54
61
|
cd ext && bundle exec rake
|
55
62
|
cd ..
|
56
63
|
bundle exec rspec
|
64
|
+
|
65
|
+
macos_build:
|
66
|
+
timeout-minutes: 30
|
67
|
+
runs-on: macos-latest
|
68
|
+
strategy:
|
69
|
+
fail-fast: false
|
70
|
+
matrix:
|
71
|
+
ruby:
|
72
|
+
- '3.4'
|
73
|
+
- '3.3'
|
74
|
+
- '3.2'
|
75
|
+
- '3.1'
|
76
|
+
steps:
|
77
|
+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
78
|
+
|
79
|
+
- name: Set up Ruby
|
80
|
+
uses: ruby/setup-ruby@354a1ad156761f5ee2b7b13fa8e09943a5e8d252 # v1.229.0
|
81
|
+
with:
|
82
|
+
ruby-version: ${{matrix.ruby}}
|
83
|
+
bundler-cache: false
|
84
|
+
|
85
|
+
- name: Build rdkafka-ruby
|
86
|
+
run: |
|
87
|
+
set -e
|
88
|
+
bundle install --jobs 4 --retry 3
|
89
|
+
cd ext && bundle exec rake
|
@@ -0,0 +1,16 @@
|
|
1
|
+
name: Verify Action Pins
|
2
|
+
on:
|
3
|
+
pull_request:
|
4
|
+
paths:
|
5
|
+
- '.github/workflows/**'
|
6
|
+
jobs:
|
7
|
+
verify:
|
8
|
+
runs-on: ubuntu-latest
|
9
|
+
steps:
|
10
|
+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
11
|
+
- name: Check SHA pins
|
12
|
+
run: |
|
13
|
+
if grep -E -r "uses: .*/.*@(v[0-9]+|main|master)($|[[:space:]]|$)" --include="*.yml" --include="*.yaml" .github/workflows/ | grep -v "#"; then
|
14
|
+
echo "::error::Actions should use SHA pins, not tags or branch names"
|
15
|
+
exit 1
|
16
|
+
fi
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.
|
1
|
+
3.4.2
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,14 @@
|
|
1
1
|
# Rdkafka Changelog
|
2
2
|
|
3
|
+
## 0.19.1 (2025-04-07)
|
4
|
+
- [Enhancement] Support producing and consuming of headers with mulitple values (KIP-82).
|
5
|
+
- [Enhancement] Allow native Kafka customization poll time.
|
6
|
+
|
7
|
+
## 0.19.0 (2025-01-20)
|
8
|
+
- **[Breaking]** Deprecate and remove `#each_batch` due to data consistency concerns.
|
9
|
+
- [Enhancement] Bump librdkafka to 2.8.0
|
10
|
+
- [Fix] Restore `Rdkafka::Bindings.rd_kafka_global_init` as it was not the source of the original issue.
|
11
|
+
|
3
12
|
## 0.18.1 (2024-12-04)
|
4
13
|
- [Fix] Do not run `Rdkafka::Bindings.rd_kafka_global_init` on require to prevent some of macos versions from hanging on Puma fork.
|
5
14
|
|
data/README.md
CHANGED
@@ -163,6 +163,8 @@ bundle exec rake produce_messages
|
|
163
163
|
|
164
164
|
| rdkafka-ruby | librdkafka | patches |
|
165
165
|
|-|-|-|
|
166
|
+
| 0.19.1 (2025-04-07) | 2.8.0 (2025-01-07) | yes |
|
167
|
+
| 0.19.0 (2025-01-20) | 2.8.0 (2025-01-07) | yes |
|
166
168
|
| 0.18.0 (2024-11-26) | 2.6.1 (2024-11-18) | yes |
|
167
169
|
| 0.17.4 (2024-09-02) | 2.5.3 (2024-09-02) | yes |
|
168
170
|
| 0.17.0 (2024-08-01) | 2.5.0 (2024-07-10) | yes |
|
Binary file
|
data/docker-compose.yml
CHANGED
data/ext/Rakefile
CHANGED
@@ -16,11 +16,11 @@ task :default => :clean do
|
|
16
16
|
require "mini_portile2"
|
17
17
|
recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
|
18
18
|
|
19
|
-
# Use default homebrew openssl if we're on mac and the directory exists
|
20
|
-
# and each of flags is not
|
21
|
-
if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
|
22
|
-
ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV
|
23
|
-
ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV
|
19
|
+
# Use default homebrew openssl if we're on mac and the directory exists, is not using nix-prepared libraries
|
20
|
+
# and each of flags is not already set
|
21
|
+
if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}") && ENV.key?("NIX_LDFLAGS")
|
22
|
+
ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV.key?("CPPFLAGS")
|
23
|
+
ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV.key?("LDFLAGS")
|
24
24
|
end
|
25
25
|
|
26
26
|
releases = File.expand_path(File.join(File.dirname(__FILE__), '../dist'))
|
data/lib/rdkafka/config.rb
CHANGED
@@ -233,11 +233,12 @@ module Rdkafka
|
|
233
233
|
#
|
234
234
|
# @param native_kafka_auto_start [Boolean] should the native kafka operations be started
|
235
235
|
# automatically. Defaults to true. Set to false only when doing complex initialization.
|
236
|
+
# @param native_kafka_poll_timeout_ms [Integer] ms poll time of the native Kafka
|
236
237
|
# @return [Producer] The created producer
|
237
238
|
#
|
238
239
|
# @raise [ConfigError] When the configuration contains invalid options
|
239
240
|
# @raise [ClientCreationError] When the native client cannot be created
|
240
|
-
def producer(native_kafka_auto_start: true)
|
241
|
+
def producer(native_kafka_auto_start: true, native_kafka_poll_timeout_ms: 100)
|
241
242
|
# Create opaque
|
242
243
|
opaque = Opaque.new
|
243
244
|
# Create Kafka config
|
@@ -254,7 +255,8 @@ module Rdkafka
|
|
254
255
|
kafka,
|
255
256
|
run_polling_thread: true,
|
256
257
|
opaque: opaque,
|
257
|
-
auto_start: native_kafka_auto_start
|
258
|
+
auto_start: native_kafka_auto_start,
|
259
|
+
timeout_ms: native_kafka_poll_timeout_ms
|
258
260
|
),
|
259
261
|
partitioner_name
|
260
262
|
).tap do |producer|
|
@@ -266,11 +268,12 @@ module Rdkafka
|
|
266
268
|
#
|
267
269
|
# @param native_kafka_auto_start [Boolean] should the native kafka operations be started
|
268
270
|
# automatically. Defaults to true. Set to false only when doing complex initialization.
|
271
|
+
# @param native_kafka_poll_timeout_ms [Integer] ms poll time of the native Kafka
|
269
272
|
# @return [Admin] The created admin instance
|
270
273
|
#
|
271
274
|
# @raise [ConfigError] When the configuration contains invalid options
|
272
275
|
# @raise [ClientCreationError] When the native client cannot be created
|
273
|
-
def admin(native_kafka_auto_start: true)
|
276
|
+
def admin(native_kafka_auto_start: true, native_kafka_poll_timeout_ms: 100)
|
274
277
|
opaque = Opaque.new
|
275
278
|
config = native_config(opaque)
|
276
279
|
Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
|
@@ -282,7 +285,8 @@ module Rdkafka
|
|
282
285
|
kafka,
|
283
286
|
run_polling_thread: true,
|
284
287
|
opaque: opaque,
|
285
|
-
auto_start: native_kafka_auto_start
|
288
|
+
auto_start: native_kafka_auto_start,
|
289
|
+
timeout_ms: native_kafka_poll_timeout_ms
|
286
290
|
)
|
287
291
|
)
|
288
292
|
end
|
@@ -7,11 +7,13 @@ module Rdkafka
|
|
7
7
|
EMPTY_HEADERS = {}.freeze
|
8
8
|
|
9
9
|
# Reads a librdkafka native message's headers and returns them as a Ruby Hash
|
10
|
+
# where each key maps to either a String (single value) or Array<String> (multiple values)
|
11
|
+
# to support duplicate headers per KIP-82
|
10
12
|
#
|
11
13
|
# @private
|
12
14
|
#
|
13
15
|
# @param [Rdkafka::Bindings::Message] native_message
|
14
|
-
# @return [Hash<String, String
|
16
|
+
# @return [Hash<String, String|Array<String>>] headers Hash for the native_message
|
15
17
|
# @raise [Rdkafka::RdkafkaError] when fail to read headers
|
16
18
|
def self.from_native(native_message)
|
17
19
|
headers_ptrptr = FFI::MemoryPointer.new(:pointer)
|
@@ -53,10 +55,19 @@ module Rdkafka
|
|
53
55
|
size = size_ptr[:value]
|
54
56
|
|
55
57
|
value_ptr = value_ptrptr.read_pointer
|
56
|
-
|
57
58
|
value = value_ptr.read_string(size)
|
58
59
|
|
59
|
-
headers
|
60
|
+
if headers.key?(name)
|
61
|
+
# If we've seen this header before, convert to array if needed and append
|
62
|
+
if headers[name].is_a?(Array)
|
63
|
+
headers[name] << value
|
64
|
+
else
|
65
|
+
headers[name] = [headers[name], value]
|
66
|
+
end
|
67
|
+
else
|
68
|
+
# First occurrence - store as single value
|
69
|
+
headers[name] = value
|
70
|
+
end
|
60
71
|
|
61
72
|
idx += 1
|
62
73
|
end
|
data/lib/rdkafka/consumer.rb
CHANGED
@@ -619,87 +619,23 @@ module Rdkafka
|
|
619
619
|
end
|
620
620
|
end
|
621
621
|
|
622
|
-
#
|
623
|
-
# messages from more than one partition.
|
624
|
-
#
|
625
|
-
# Rather than yield each message immediately as soon as it is received,
|
626
|
-
# each_batch will attempt to wait for as long as `timeout_ms` in order
|
627
|
-
# to create a batch of up to but no more than `max_items` in size.
|
628
|
-
#
|
629
|
-
# Said differently, if more than `max_items` are available within
|
630
|
-
# `timeout_ms`, then `each_batch` will yield early with `max_items` in the
|
631
|
-
# array, but if `timeout_ms` passes by with fewer messages arriving, it
|
632
|
-
# will yield an array of fewer messages, quite possibly zero.
|
633
|
-
#
|
634
|
-
# In order to prevent wrongly auto committing many messages at once across
|
635
|
-
# possibly many partitions, callers must explicitly indicate which messages
|
636
|
-
# have been successfully processed as some consumed messages may not have
|
637
|
-
# been yielded yet. To do this, the caller should set
|
638
|
-
# `enable.auto.offset.store` to false and pass processed messages to
|
639
|
-
# {store_offset}. It is also possible, though more complex, to set
|
640
|
-
# 'enable.auto.commit' to false and then pass a manually assembled
|
641
|
-
# TopicPartitionList to {commit}.
|
642
|
-
#
|
643
|
-
# As with `each`, iteration will end when the consumer is closed.
|
644
|
-
#
|
645
|
-
# Exception behavior is more complicated than with `each`, in that if
|
646
|
-
# :yield_on_error is true, and an exception is raised during the
|
647
|
-
# poll, and messages have already been received, they will be yielded to
|
648
|
-
# the caller before the exception is allowed to propagate.
|
649
|
-
#
|
650
|
-
# If you are setting either auto.commit or auto.offset.store to false in
|
651
|
-
# the consumer configuration, then you should let yield_on_error keep its
|
652
|
-
# default value of false because you are guaranteed to see these messages
|
653
|
-
# again. However, if both auto.commit and auto.offset.store are set to
|
654
|
-
# true, you should set yield_on_error to true so you can process messages
|
655
|
-
# that you may or may not see again.
|
656
|
-
#
|
657
|
-
# @param max_items [Integer] Maximum size of the yielded array of messages
|
658
|
-
# @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
|
659
|
-
# @param timeout_ms [Integer] max time to wait for up to max_items
|
660
|
-
#
|
661
|
-
# @yieldparam messages [Array] An array of received Message
|
662
|
-
# @yieldparam pending_exception [Exception] normally nil, or an exception
|
663
|
-
#
|
664
|
-
# @yield [messages, pending_exception]
|
665
|
-
# which will be propagated after processing of the partial batch is complete.
|
666
|
-
#
|
667
|
-
# @return [nil]
|
668
|
-
#
|
669
|
-
# @raise [RdkafkaError] When polling fails
|
622
|
+
# Deprecated. Please read the error message for more details.
|
670
623
|
def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
rescue Rdkafka::RdkafkaError => error
|
687
|
-
raise unless yield_on_error
|
688
|
-
raise if slice.empty?
|
689
|
-
yield slice.dup, error
|
690
|
-
raise
|
691
|
-
end
|
692
|
-
if message
|
693
|
-
slice << message
|
694
|
-
bytes += message.payload.bytesize if message.payload
|
695
|
-
end
|
696
|
-
if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
|
697
|
-
yield slice.dup, nil
|
698
|
-
slice.clear
|
699
|
-
bytes = 0
|
700
|
-
end_time = monotonic_now + timeout_ms / 1000.0
|
701
|
-
end
|
702
|
-
end
|
624
|
+
raise NotImplementedError, <<~ERROR
|
625
|
+
`each_batch` has been removed due to data consistency concerns.
|
626
|
+
|
627
|
+
This method was removed because it did not properly handle partition reassignments,
|
628
|
+
which could lead to processing messages from partitions that were no longer owned
|
629
|
+
by this consumer, resulting in duplicate message processing and data inconsistencies.
|
630
|
+
|
631
|
+
Recommended alternatives:
|
632
|
+
|
633
|
+
1. Implement your own batching logic using rebalance callbacks to properly handle
|
634
|
+
partition revocations and ensure message processing correctness.
|
635
|
+
|
636
|
+
2. Use a high-level batching library that supports proper partition reassignment
|
637
|
+
handling out of the box (such as the Karafka framework).
|
638
|
+
ERROR
|
703
639
|
end
|
704
640
|
|
705
641
|
# Returns pointer to the consumer group metadata. It is used only in the context of
|
data/lib/rdkafka/native_kafka.rb
CHANGED
@@ -4,7 +4,7 @@ module Rdkafka
|
|
4
4
|
# @private
|
5
5
|
# A wrapper around a native kafka that polls and cleanly exits
|
6
6
|
class NativeKafka
|
7
|
-
def initialize(inner, run_polling_thread:, opaque:, auto_start: true)
|
7
|
+
def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: 100)
|
8
8
|
@inner = inner
|
9
9
|
@opaque = opaque
|
10
10
|
# Lock around external access
|
@@ -30,6 +30,8 @@ module Rdkafka
|
|
30
30
|
|
31
31
|
@run_polling_thread = run_polling_thread
|
32
32
|
|
33
|
+
@timeout_ms = timeout_ms
|
34
|
+
|
33
35
|
start if auto_start
|
34
36
|
|
35
37
|
@closing = false
|
@@ -50,7 +52,7 @@ module Rdkafka
|
|
50
52
|
@polling_thread = Thread.new do
|
51
53
|
loop do
|
52
54
|
@poll_mutex.synchronize do
|
53
|
-
Rdkafka::Bindings.rd_kafka_poll(@inner,
|
55
|
+
Rdkafka::Bindings.rd_kafka_poll(@inner, @timeout_ms)
|
54
56
|
end
|
55
57
|
|
56
58
|
# Exit thread if closing and the poll queue is empty
|
data/lib/rdkafka/producer.rb
CHANGED
@@ -309,7 +309,7 @@ module Rdkafka
|
|
309
309
|
# @param partition [Integer,nil] Optional partition to produce to
|
310
310
|
# @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
|
311
311
|
# @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
|
312
|
-
# @param headers [Hash<String,String
|
312
|
+
# @param headers [Hash<String,String|Array<String>>] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
|
313
313
|
# @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
|
314
314
|
# @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
|
315
315
|
#
|
@@ -401,11 +401,23 @@ module Rdkafka
|
|
401
401
|
if headers
|
402
402
|
headers.each do |key0, value0|
|
403
403
|
key = key0.to_s
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
404
|
+
if value0.is_a?(Array)
|
405
|
+
# Handle array of values per KIP-82
|
406
|
+
value0.each do |value|
|
407
|
+
value = value.to_s
|
408
|
+
args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
|
409
|
+
args << :string << key
|
410
|
+
args << :pointer << value
|
411
|
+
args << :size_t << value.bytesize
|
412
|
+
end
|
413
|
+
else
|
414
|
+
# Handle single value
|
415
|
+
value = value0.to_s
|
416
|
+
args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
|
417
|
+
args << :string << key
|
418
|
+
args << :pointer << value
|
419
|
+
args << :size_t << value.bytesize
|
420
|
+
end
|
409
421
|
end
|
410
422
|
end
|
411
423
|
|
data/lib/rdkafka/version.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Rdkafka
|
4
|
-
VERSION = "0.
|
5
|
-
LIBRDKAFKA_VERSION = "2.
|
6
|
-
LIBRDKAFKA_SOURCE_SHA256 = "
|
4
|
+
VERSION = "0.19.1"
|
5
|
+
LIBRDKAFKA_VERSION = "2.8.0"
|
6
|
+
LIBRDKAFKA_SOURCE_SHA256 = "5bd1c46f63265f31c6bfcedcde78703f77d28238eadf23821c2b43fc30be3e25"
|
7
7
|
end
|
data/lib/rdkafka.rb
CHANGED
data/renovate.json
CHANGED
@@ -1,6 +1,18 @@
|
|
1
1
|
{
|
2
2
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
3
3
|
"extends": [
|
4
|
-
"config:
|
4
|
+
"config:recommended"
|
5
|
+
],
|
6
|
+
"github-actions": {
|
7
|
+
"enabled": true,
|
8
|
+
"pinDigests": true
|
9
|
+
},
|
10
|
+
"packageRules": [
|
11
|
+
{
|
12
|
+
"matchManagers": [
|
13
|
+
"github-actions"
|
14
|
+
],
|
15
|
+
"minimumReleaseAge": "7 days"
|
16
|
+
}
|
5
17
|
]
|
6
18
|
}
|
@@ -3,7 +3,7 @@
|
|
3
3
|
describe Rdkafka::Consumer::Headers do
|
4
4
|
let(:headers) do
|
5
5
|
{ # Note String keys!
|
6
|
-
"version" => "2.1.3",
|
6
|
+
"version" => ["2.1.3", "2.1.4"],
|
7
7
|
"type" => "String"
|
8
8
|
}
|
9
9
|
end
|
@@ -17,27 +17,39 @@ describe Rdkafka::Consumer::Headers do
|
|
17
17
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
18
18
|
end
|
19
19
|
|
20
|
+
# First version header
|
20
21
|
expect(Rdkafka::Bindings).to \
|
21
22
|
receive(:rd_kafka_header_get_all)
|
22
23
|
.with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
23
|
-
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null:
|
24
|
-
expect(size_ptr).to receive(:[]).with(:value).and_return(headers
|
25
|
-
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers
|
24
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: "version"))
|
25
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][0].size)
|
26
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers["version"][0]))
|
26
27
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
27
28
|
end
|
28
29
|
|
30
|
+
# Second version header
|
29
31
|
expect(Rdkafka::Bindings).to \
|
30
32
|
receive(:rd_kafka_header_get_all)
|
31
33
|
.with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
32
|
-
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null:
|
33
|
-
expect(size_ptr).to receive(:[]).with(:value).and_return(headers
|
34
|
-
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers
|
34
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: "version"))
|
35
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][1].size)
|
36
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers["version"][1]))
|
35
37
|
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
36
38
|
end
|
37
39
|
|
40
|
+
# Single type header
|
38
41
|
expect(Rdkafka::Bindings).to \
|
39
42
|
receive(:rd_kafka_header_get_all)
|
40
|
-
.with(headers_ptr, 2, anything, anything, anything)
|
43
|
+
.with(headers_ptr, 2, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
|
44
|
+
expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 2", read_string_to_null: "type"))
|
45
|
+
expect(size_ptr).to receive(:[]).with(:value).and_return(headers["type"].size)
|
46
|
+
expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 2", read_string: headers["type"]))
|
47
|
+
Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
48
|
+
end
|
49
|
+
|
50
|
+
expect(Rdkafka::Bindings).to \
|
51
|
+
receive(:rd_kafka_header_get_all)
|
52
|
+
.with(headers_ptr, 3, anything, anything, anything)
|
41
53
|
.and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
|
42
54
|
end
|
43
55
|
|
@@ -46,8 +58,12 @@ describe Rdkafka::Consumer::Headers do
|
|
46
58
|
it { is_expected.to eq(headers) }
|
47
59
|
it { is_expected.to be_frozen }
|
48
60
|
|
49
|
-
it '
|
50
|
-
expect(subject['version']).to eq("2.1.3")
|
61
|
+
it 'returns array for duplicate headers' do
|
62
|
+
expect(subject['version']).to eq(["2.1.3", "2.1.4"])
|
63
|
+
end
|
64
|
+
|
65
|
+
it 'returns string for single headers' do
|
66
|
+
expect(subject['type']).to eq("String")
|
51
67
|
end
|
52
68
|
|
53
69
|
it 'does not support symbols mappings' do
|
@@ -948,236 +948,10 @@ describe Rdkafka::Consumer do
|
|
948
948
|
end
|
949
949
|
|
950
950
|
describe "#each_batch" do
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
end
|
956
|
-
|
957
|
-
after do
|
958
|
-
@topic = nil
|
959
|
-
end
|
960
|
-
|
961
|
-
def topic_name
|
962
|
-
@topic
|
963
|
-
end
|
964
|
-
|
965
|
-
def produce_n(n)
|
966
|
-
handles = []
|
967
|
-
n.times do |i|
|
968
|
-
handles << producer.produce(
|
969
|
-
topic: topic_name,
|
970
|
-
payload: i % 10 == 0 ? nil : Time.new.to_f.to_s,
|
971
|
-
key: i.to_s,
|
972
|
-
partition: 0
|
973
|
-
)
|
974
|
-
end
|
975
|
-
handles.each(&:wait)
|
976
|
-
end
|
977
|
-
|
978
|
-
def new_message
|
979
|
-
instance_double("Rdkafka::Consumer::Message").tap do |message|
|
980
|
-
allow(message).to receive(:payload).and_return(message_payload)
|
981
|
-
end
|
982
|
-
end
|
983
|
-
|
984
|
-
it "retrieves messages produced into a topic" do
|
985
|
-
# This is the only each_batch test that actually produces real messages
|
986
|
-
# into a topic in the real kafka of the container.
|
987
|
-
#
|
988
|
-
# The other tests stub 'poll' which makes them faster and more reliable,
|
989
|
-
# but it makes sense to keep a single test with a fully integrated flow.
|
990
|
-
# This will help to catch breaking changes in the behavior of 'poll',
|
991
|
-
# libdrkafka, or Kafka.
|
992
|
-
#
|
993
|
-
# This is, in effect, an integration test and the subsequent specs are
|
994
|
-
# unit tests.
|
995
|
-
admin = rdkafka_config.admin
|
996
|
-
create_topic_handle = admin.create_topic(topic_name, 1, 1)
|
997
|
-
create_topic_handle.wait(max_wait_timeout: 15.0)
|
998
|
-
consumer.subscribe(topic_name)
|
999
|
-
produce_n 42
|
1000
|
-
all_yields = []
|
1001
|
-
consumer.each_batch(max_items: 10) do |batch|
|
1002
|
-
all_yields << batch
|
1003
|
-
break if all_yields.flatten.size >= 42
|
1004
|
-
end
|
1005
|
-
expect(all_yields.flatten.first).to be_a Rdkafka::Consumer::Message
|
1006
|
-
expect(all_yields.flatten.size).to eq 42
|
1007
|
-
expect(all_yields.size).to be > 4
|
1008
|
-
expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
|
1009
|
-
admin.close
|
1010
|
-
end
|
1011
|
-
|
1012
|
-
it "should batch poll results and yield arrays of messages" do
|
1013
|
-
consumer.subscribe(topic_name)
|
1014
|
-
all_yields = []
|
1015
|
-
expect(consumer)
|
1016
|
-
.to receive(:poll)
|
1017
|
-
.exactly(10).times
|
1018
|
-
.and_return(new_message)
|
1019
|
-
consumer.each_batch(max_items: 10) do |batch|
|
1020
|
-
all_yields << batch
|
1021
|
-
break if all_yields.flatten.size >= 10
|
1022
|
-
end
|
1023
|
-
expect(all_yields.first).to be_instance_of(Array)
|
1024
|
-
expect(all_yields.flatten.size).to eq 10
|
1025
|
-
non_empty_yields = all_yields.reject { |batch| batch.empty? }
|
1026
|
-
expect(non_empty_yields.size).to be < 10
|
1027
|
-
end
|
1028
|
-
|
1029
|
-
it "should yield a partial batch if the timeout is hit with some messages" do
|
1030
|
-
consumer.subscribe(topic_name)
|
1031
|
-
poll_count = 0
|
1032
|
-
expect(consumer)
|
1033
|
-
.to receive(:poll)
|
1034
|
-
.at_least(3).times do
|
1035
|
-
poll_count = poll_count + 1
|
1036
|
-
if poll_count > 2
|
1037
|
-
sleep 0.1
|
1038
|
-
nil
|
1039
|
-
else
|
1040
|
-
new_message
|
1041
|
-
end
|
1042
|
-
end
|
1043
|
-
all_yields = []
|
1044
|
-
consumer.each_batch(max_items: 10) do |batch|
|
1045
|
-
all_yields << batch
|
1046
|
-
break if all_yields.flatten.size >= 2
|
1047
|
-
end
|
1048
|
-
expect(all_yields.flatten.size).to eq 2
|
1049
|
-
end
|
1050
|
-
|
1051
|
-
it "should yield [] if nothing is received before the timeout" do
|
1052
|
-
admin = rdkafka_config.admin
|
1053
|
-
create_topic_handle = admin.create_topic(topic_name, 1, 1)
|
1054
|
-
create_topic_handle.wait(max_wait_timeout: 15.0)
|
1055
|
-
consumer.subscribe(topic_name)
|
1056
|
-
consumer.each_batch do |batch|
|
1057
|
-
expect(batch).to eq([])
|
1058
|
-
break
|
1059
|
-
end
|
1060
|
-
admin.close
|
1061
|
-
end
|
1062
|
-
|
1063
|
-
it "should yield batchs of max_items in size if messages are already fetched" do
|
1064
|
-
yielded_batches = []
|
1065
|
-
expect(consumer)
|
1066
|
-
.to receive(:poll)
|
1067
|
-
.with(anything)
|
1068
|
-
.exactly(20).times
|
1069
|
-
.and_return(new_message)
|
1070
|
-
|
1071
|
-
consumer.each_batch(max_items: 10, timeout_ms: 500) do |batch|
|
1072
|
-
yielded_batches << batch
|
1073
|
-
break if yielded_batches.flatten.size >= 20
|
1074
|
-
break if yielded_batches.size >= 20 # so failure doesn't hang
|
1075
|
-
end
|
1076
|
-
expect(yielded_batches.size).to eq 2
|
1077
|
-
expect(yielded_batches.map(&:size)).to eq 2.times.map { 10 }
|
1078
|
-
end
|
1079
|
-
|
1080
|
-
it "should yield batchs as soon as bytes_threshold is hit" do
|
1081
|
-
yielded_batches = []
|
1082
|
-
expect(consumer)
|
1083
|
-
.to receive(:poll)
|
1084
|
-
.with(anything)
|
1085
|
-
.exactly(20).times
|
1086
|
-
.and_return(new_message)
|
1087
|
-
|
1088
|
-
consumer.each_batch(bytes_threshold: message_payload.size * 4, timeout_ms: 500) do |batch|
|
1089
|
-
yielded_batches << batch
|
1090
|
-
break if yielded_batches.flatten.size >= 20
|
1091
|
-
break if yielded_batches.size >= 20 # so failure doesn't hang
|
1092
|
-
end
|
1093
|
-
expect(yielded_batches.size).to eq 5
|
1094
|
-
expect(yielded_batches.map(&:size)).to eq 5.times.map { 4 }
|
1095
|
-
end
|
1096
|
-
|
1097
|
-
context "error raised from poll and yield_on_error is true" do
|
1098
|
-
it "should yield buffered exceptions on rebalance, then break" do
|
1099
|
-
config = rdkafka_consumer_config(
|
1100
|
-
{
|
1101
|
-
:"enable.auto.commit" => false,
|
1102
|
-
:"enable.auto.offset.store" => false
|
1103
|
-
}
|
1104
|
-
)
|
1105
|
-
consumer = config.consumer
|
1106
|
-
consumer.subscribe(topic_name)
|
1107
|
-
batches_yielded = []
|
1108
|
-
exceptions_yielded = []
|
1109
|
-
each_batch_iterations = 0
|
1110
|
-
poll_count = 0
|
1111
|
-
expect(consumer)
|
1112
|
-
.to receive(:poll)
|
1113
|
-
.with(anything)
|
1114
|
-
.exactly(3).times
|
1115
|
-
.and_wrap_original do |method, *args|
|
1116
|
-
poll_count = poll_count + 1
|
1117
|
-
if poll_count == 3
|
1118
|
-
raise Rdkafka::RdkafkaError.new(27,
|
1119
|
-
"partitions ... too ... heavy ... must ... rebalance")
|
1120
|
-
else
|
1121
|
-
new_message
|
1122
|
-
end
|
1123
|
-
end
|
1124
|
-
expect {
|
1125
|
-
consumer.each_batch(max_items: 30, yield_on_error: true) do |batch, pending_error|
|
1126
|
-
batches_yielded << batch
|
1127
|
-
exceptions_yielded << pending_error
|
1128
|
-
each_batch_iterations = each_batch_iterations + 1
|
1129
|
-
end
|
1130
|
-
}.to raise_error(Rdkafka::RdkafkaError)
|
1131
|
-
expect(poll_count).to eq 3
|
1132
|
-
expect(each_batch_iterations).to eq 1
|
1133
|
-
expect(batches_yielded.size).to eq 1
|
1134
|
-
expect(batches_yielded.first.size).to eq 2
|
1135
|
-
expect(exceptions_yielded.flatten.size).to eq 1
|
1136
|
-
expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
|
1137
|
-
consumer.close
|
1138
|
-
end
|
1139
|
-
end
|
1140
|
-
|
1141
|
-
context "error raised from poll and yield_on_error is false" do
|
1142
|
-
it "should yield buffered exceptions on rebalance, then break" do
|
1143
|
-
config = rdkafka_consumer_config(
|
1144
|
-
{
|
1145
|
-
:"enable.auto.commit" => false,
|
1146
|
-
:"enable.auto.offset.store" => false
|
1147
|
-
}
|
1148
|
-
)
|
1149
|
-
consumer = config.consumer
|
1150
|
-
consumer.subscribe(topic_name)
|
1151
|
-
batches_yielded = []
|
1152
|
-
exceptions_yielded = []
|
1153
|
-
each_batch_iterations = 0
|
1154
|
-
poll_count = 0
|
1155
|
-
expect(consumer)
|
1156
|
-
.to receive(:poll)
|
1157
|
-
.with(anything)
|
1158
|
-
.exactly(3).times
|
1159
|
-
.and_wrap_original do |method, *args|
|
1160
|
-
poll_count = poll_count + 1
|
1161
|
-
if poll_count == 3
|
1162
|
-
raise Rdkafka::RdkafkaError.new(27,
|
1163
|
-
"partitions ... too ... heavy ... must ... rebalance")
|
1164
|
-
else
|
1165
|
-
new_message
|
1166
|
-
end
|
1167
|
-
end
|
1168
|
-
expect {
|
1169
|
-
consumer.each_batch(max_items: 30, yield_on_error: false) do |batch, pending_error|
|
1170
|
-
batches_yielded << batch
|
1171
|
-
exceptions_yielded << pending_error
|
1172
|
-
each_batch_iterations = each_batch_iterations + 1
|
1173
|
-
end
|
1174
|
-
}.to raise_error(Rdkafka::RdkafkaError)
|
1175
|
-
expect(poll_count).to eq 3
|
1176
|
-
expect(each_batch_iterations).to eq 0
|
1177
|
-
expect(batches_yielded.size).to eq 0
|
1178
|
-
expect(exceptions_yielded.size).to eq 0
|
1179
|
-
consumer.close
|
1180
|
-
end
|
951
|
+
it 'expect to raise an error' do
|
952
|
+
expect do
|
953
|
+
consumer.each_batch {}
|
954
|
+
end.to raise_error(NotImplementedError)
|
1181
955
|
end
|
1182
956
|
end
|
1183
957
|
|
@@ -1344,7 +1118,6 @@ describe Rdkafka::Consumer do
|
|
1344
1118
|
{
|
1345
1119
|
:subscribe => [ nil ],
|
1346
1120
|
:unsubscribe => nil,
|
1347
|
-
:each_batch => nil,
|
1348
1121
|
:pause => [ nil ],
|
1349
1122
|
:resume => [ nil ],
|
1350
1123
|
:subscription => nil,
|
@@ -1002,4 +1002,44 @@ describe Rdkafka::Producer do
|
|
1002
1002
|
end
|
1003
1003
|
end
|
1004
1004
|
end
|
1005
|
+
|
1006
|
+
describe "#produce with headers" do
|
1007
|
+
it "should produce a message with array headers" do
|
1008
|
+
headers = {
|
1009
|
+
"version" => ["2.1.3", "2.1.4"],
|
1010
|
+
"type" => "String"
|
1011
|
+
}
|
1012
|
+
|
1013
|
+
report = producer.produce(
|
1014
|
+
topic: "consume_test_topic",
|
1015
|
+
key: "key headers",
|
1016
|
+
headers: headers
|
1017
|
+
).wait
|
1018
|
+
|
1019
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
1020
|
+
expect(message).to be
|
1021
|
+
expect(message.key).to eq('key headers')
|
1022
|
+
expect(message.headers['type']).to eq('String')
|
1023
|
+
expect(message.headers['version']).to eq(["2.1.3", "2.1.4"])
|
1024
|
+
end
|
1025
|
+
|
1026
|
+
it "should produce a message with single value headers" do
|
1027
|
+
headers = {
|
1028
|
+
"version" => "2.1.3",
|
1029
|
+
"type" => "String"
|
1030
|
+
}
|
1031
|
+
|
1032
|
+
report = producer.produce(
|
1033
|
+
topic: "consume_test_topic",
|
1034
|
+
key: "key headers",
|
1035
|
+
headers: headers
|
1036
|
+
).wait
|
1037
|
+
|
1038
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
1039
|
+
expect(message).to be
|
1040
|
+
expect(message.key).to eq('key headers')
|
1041
|
+
expect(message.headers['type']).to eq('String')
|
1042
|
+
expect(message.headers['version']).to eq('2.1.3')
|
1043
|
+
end
|
1044
|
+
end
|
1005
1045
|
end
|
data.tar.gz.sig
CHANGED
@@ -1,3 +1,3 @@
|
|
1
|
-
E
|
2
|
-
|
3
|
-
|
1
|
+
��E�d/�;��u'q�������ˏn)��}-^ �V�l-���^@M��%b���s��{i�l�ý��-��?�L���1�l�s��c��Z$xɪW�Ƚ��l(��D�|[����)�5V̚��k%�X~���%
|
2
|
+
���1}kڃ��29���ؔ�1!T�ޯ[��8�� ���^�Ԥ�gk� 3�kP��o�&)�=`]c|u(�$
|
3
|
+
!%������,Ȓ �������`��V�m+Y.�бt�M�������z�hc,���
|
metadata
CHANGED
@@ -1,12 +1,11 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka-rdkafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.19.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Thijs Cadier
|
8
8
|
- Maciej Mensfeld
|
9
|
-
autorequire:
|
10
9
|
bindir: bin
|
11
10
|
cert_chain:
|
12
11
|
- |
|
@@ -36,7 +35,7 @@ cert_chain:
|
|
36
35
|
i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
|
37
36
|
ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
|
38
37
|
-----END CERTIFICATE-----
|
39
|
-
date:
|
38
|
+
date: 2025-04-07 00:00:00.000000000 Z
|
40
39
|
dependencies:
|
41
40
|
- !ruby/object:Gem::Dependency
|
42
41
|
name: ffi
|
@@ -172,8 +171,10 @@ extensions:
|
|
172
171
|
- ext/Rakefile
|
173
172
|
extra_rdoc_files: []
|
174
173
|
files:
|
174
|
+
- ".github/CODEOWNERS"
|
175
175
|
- ".github/FUNDING.yml"
|
176
176
|
- ".github/workflows/ci.yml"
|
177
|
+
- ".github/workflows/verify-action-pins.yml"
|
177
178
|
- ".gitignore"
|
178
179
|
- ".rspec"
|
179
180
|
- ".ruby-gemset"
|
@@ -186,7 +187,7 @@ files:
|
|
186
187
|
- README.md
|
187
188
|
- Rakefile
|
188
189
|
- certs/cert.pem
|
189
|
-
- dist/librdkafka-2.
|
190
|
+
- dist/librdkafka-2.8.0.tar.gz
|
190
191
|
- dist/patches/rdkafka_global_init.patch
|
191
192
|
- docker-compose.yml
|
192
193
|
- ext/README.md
|
@@ -261,7 +262,6 @@ files:
|
|
261
262
|
- spec/rdkafka/producer/delivery_report_spec.rb
|
262
263
|
- spec/rdkafka/producer_spec.rb
|
263
264
|
- spec/spec_helper.rb
|
264
|
-
homepage:
|
265
265
|
licenses:
|
266
266
|
- MIT
|
267
267
|
metadata:
|
@@ -272,7 +272,6 @@ metadata:
|
|
272
272
|
source_code_uri: https://github.com/karafka/karafka-rdkafka
|
273
273
|
documentation_uri: https://karafka.io/docs
|
274
274
|
rubygems_mfa_required: 'true'
|
275
|
-
post_install_message:
|
276
275
|
rdoc_options: []
|
277
276
|
require_paths:
|
278
277
|
- lib
|
@@ -287,8 +286,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
287
286
|
- !ruby/object:Gem::Version
|
288
287
|
version: '0'
|
289
288
|
requirements: []
|
290
|
-
rubygems_version: 3.
|
291
|
-
signing_key:
|
289
|
+
rubygems_version: 3.6.2
|
292
290
|
specification_version: 4
|
293
291
|
summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
|
294
292
|
It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
|
metadata.gz.sig
CHANGED
Binary file
|