karafka-rdkafka 0.26.1-aarch64-linux-gnu → 0.27.0-aarch64-linux-gnu
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/README.md +2 -1
- data/ext/librdkafka.so +0 -0
- data/lib/rdkafka/admin.rb +6 -0
- data/lib/rdkafka/bindings.rb +3 -0
- data/lib/rdkafka/config.rb +3 -4
- data/lib/rdkafka/consumer.rb +150 -0
- data/lib/rdkafka/version.rb +3 -3
- data/renovate.json +4 -1
- metadata +1 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: f997aa68021eb19e490afc55841e4009da71e85a03a98decf53009b0284471e5
|
|
4
|
+
data.tar.gz: ddab9a03905ae72c7d984718f66736e1e1e9dad9ee48a40f04d55e9ab697a672
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: b0af437d10ef0c21da45c0d33f79adcd190ce2db7e6243cd2058aa6988067f319f4ca6fb942893a79e24e19d8298b324de00577168e3d792693161c722c5a221
|
|
7
|
+
data.tar.gz: 3bf2f40df79b4754751bb250f1adf2775d0c5c18fb887366cb75812e7e63b093fa660c229b8f2d1f58059cbbdcad7ab4a3a4bf507be8ba214eb00ad8e0f5dff4
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
# Rdkafka Changelog
|
|
2
2
|
|
|
3
|
+
## 0.27.0 (2026-05-08)
|
|
4
|
+
- [Feature] Add `Consumer#poll_batch(timeout_ms, max_items:)` and `Consumer#poll_batch_nb(timeout_ms, max_items:)` for batch message polling via `rd_kafka_consume_batch_queue` (from upstream).
|
|
5
|
+
- [Enhancement] Bump librdkafka to `2.14.1`.
|
|
6
|
+
- [Fix] Fix resource leak in `Admin#describe_configs` and `Admin#incremental_alter_configs` where `admin_options_ptr` and `queue_ptr` were not destroyed in the ensure block (from upstream).
|
|
7
|
+
- [Fix] Fix leaked queue reference in `Config#native_kafka` where `rd_kafka_queue_get_main` return value was not destroyed after passing to `rd_kafka_set_log_queue` (from upstream).
|
|
8
|
+
- [Fix] Fix native topic partition list leak in `Consumer#position` where `tpl` was never destroyed (from upstream).
|
|
9
|
+
|
|
3
10
|
## 0.26.1 (2026-04-13)
|
|
4
11
|
- [Feature] Add `Config#describe_properties` to dump all librdkafka configuration properties (including defaults and hidden properties) as a Hash via `rd_kafka_conf_dump` (from upstream).
|
|
5
12
|
|
data/README.md
CHANGED
|
@@ -63,8 +63,9 @@ Contributions should generally be made to the upstream [rdkafka-ruby repository]
|
|
|
63
63
|
|
|
64
64
|
| rdkafka-ruby | librdkafka | patches |
|
|
65
65
|
|-|-|-|
|
|
66
|
+
| 0.27.x (2026-05-08) | 2.14.1 (2026-04-15) | yes |
|
|
66
67
|
| 0.26.x (2026-04-11) | 2.14.0 (2026-04-01) | yes |
|
|
67
|
-
| 0.25.x (2026-04-02) | 2.13.2 (2026-03-
|
|
68
|
+
| 0.25.x (2026-04-02) | 2.13.2 (2026-03-02) | yes |
|
|
68
69
|
| 0.24.x (2026-02-25) | 2.13.0 (2026-01-05) | yes |
|
|
69
70
|
| 0.23.x (2025-11-01) | 2.12.1 (2025-10-16) | yes |
|
|
70
71
|
| 0.22.x (2025-09-26) | 2.11.1 (2025-08-18) | yes |
|
data/ext/librdkafka.so
CHANGED
|
Binary file
|
data/lib/rdkafka/admin.rb
CHANGED
|
@@ -777,6 +777,9 @@ module Rdkafka
|
|
|
777
777
|
|
|
778
778
|
raise
|
|
779
779
|
ensure
|
|
780
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
|
781
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
|
782
|
+
|
|
780
783
|
if configs_array_ptr
|
|
781
784
|
Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
|
|
782
785
|
configs_array_ptr,
|
|
@@ -865,6 +868,9 @@ module Rdkafka
|
|
|
865
868
|
|
|
866
869
|
raise
|
|
867
870
|
ensure
|
|
871
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
|
872
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
|
873
|
+
|
|
868
874
|
if configs_array_ptr
|
|
869
875
|
Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
|
|
870
876
|
configs_array_ptr,
|
data/lib/rdkafka/bindings.rb
CHANGED
|
@@ -435,6 +435,9 @@ module Rdkafka
|
|
|
435
435
|
# More efficient for poll(0) calls in fiber schedulers.
|
|
436
436
|
attach_function :rd_kafka_consumer_poll_nb, :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: false
|
|
437
437
|
attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
|
|
438
|
+
attach_function :rd_kafka_queue_get_consumer, [:pointer], :pointer
|
|
439
|
+
attach_function :rd_kafka_consume_batch_queue, [:pointer, :int, :pointer, :size_t], :ssize_t, blocking: true
|
|
440
|
+
attach_function :rd_kafka_consume_batch_queue_nb, :rd_kafka_consume_batch_queue, [:pointer, :int, :pointer, :size_t], :ssize_t, blocking: false
|
|
438
441
|
attach_function :rd_kafka_offsets_store, [:pointer, :pointer], :int, blocking: true
|
|
439
442
|
attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
|
|
440
443
|
attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
|
data/lib/rdkafka/config.rb
CHANGED
|
@@ -404,10 +404,9 @@ module Rdkafka
|
|
|
404
404
|
end
|
|
405
405
|
|
|
406
406
|
# Redirect log to handle's queue
|
|
407
|
-
Rdkafka::Bindings.
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
)
|
|
407
|
+
main_queue = Rdkafka::Bindings.rd_kafka_queue_get_main(handle)
|
|
408
|
+
Rdkafka::Bindings.rd_kafka_set_log_queue(handle, main_queue)
|
|
409
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(main_queue)
|
|
411
410
|
|
|
412
411
|
# Return handle which should be closed using rd_kafka_destroy after usage.
|
|
413
412
|
handle
|
data/lib/rdkafka/consumer.rb
CHANGED
|
@@ -183,6 +183,11 @@ module Rdkafka
|
|
|
183
183
|
|
|
184
184
|
@native_kafka.synchronize do |inner|
|
|
185
185
|
Rdkafka::Bindings.rd_kafka_consumer_close(inner)
|
|
186
|
+
|
|
187
|
+
if @consumer_queue
|
|
188
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(@consumer_queue)
|
|
189
|
+
@consumer_queue = nil
|
|
190
|
+
end
|
|
186
191
|
end
|
|
187
192
|
|
|
188
193
|
@native_kafka.close
|
|
@@ -421,6 +426,8 @@ module Rdkafka
|
|
|
421
426
|
end
|
|
422
427
|
|
|
423
428
|
TopicPartitionList.from_native_tpl(tpl)
|
|
429
|
+
ensure
|
|
430
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
|
|
424
431
|
end
|
|
425
432
|
|
|
426
433
|
# Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
|
|
@@ -783,6 +790,130 @@ module Rdkafka
|
|
|
783
790
|
end
|
|
784
791
|
end
|
|
785
792
|
|
|
793
|
+
# Poll for a batch of messages from the consumer queue in a single FFI call.
|
|
794
|
+
#
|
|
795
|
+
# This is more efficient than calling {#poll} in a loop because it crosses the FFI
|
|
796
|
+
# boundary only once to fetch up to `max_items` messages.
|
|
797
|
+
#
|
|
798
|
+
# The timeout controls how long to wait for the **first** message. Once any message
|
|
799
|
+
# is available, librdkafka fills the buffer with whatever is immediately ready and
|
|
800
|
+
# returns without further waiting.
|
|
801
|
+
#
|
|
802
|
+
# @param timeout_ms [Integer] Timeout waiting for the first message (-1 for infinite)
|
|
803
|
+
# @param max_items [Integer] Maximum number of messages to return per call
|
|
804
|
+
# @return [Array<Message>] Array of messages (empty if none available within timeout)
|
|
805
|
+
# @raise [RdkafkaError] When a consumed message contains an error
|
|
806
|
+
# @raise [ClosedConsumerError] When called on a closed consumer
|
|
807
|
+
def poll_batch(timeout_ms, max_items: 100)
|
|
808
|
+
closed_consumer_check(__method__)
|
|
809
|
+
|
|
810
|
+
buffer = batch_buffer(max_items)
|
|
811
|
+
messages = []
|
|
812
|
+
|
|
813
|
+
count = @native_kafka.with_inner do |_inner|
|
|
814
|
+
Rdkafka::Bindings.rd_kafka_consume_batch_queue(
|
|
815
|
+
consumer_queue,
|
|
816
|
+
timeout_ms,
|
|
817
|
+
buffer,
|
|
818
|
+
max_items
|
|
819
|
+
)
|
|
820
|
+
end
|
|
821
|
+
|
|
822
|
+
return messages if count <= 0
|
|
823
|
+
|
|
824
|
+
i = 0
|
|
825
|
+
begin
|
|
826
|
+
while i < count
|
|
827
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
828
|
+
|
|
829
|
+
if ptr.null?
|
|
830
|
+
i += 1
|
|
831
|
+
next
|
|
832
|
+
end
|
|
833
|
+
|
|
834
|
+
native_message = Rdkafka::Bindings::Message.new(ptr)
|
|
835
|
+
|
|
836
|
+
if native_message[:err] != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
837
|
+
raise Rdkafka::RdkafkaError.new(native_message[:err])
|
|
838
|
+
end
|
|
839
|
+
|
|
840
|
+
messages << Rdkafka::Consumer::Message.new(native_message)
|
|
841
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr)
|
|
842
|
+
i += 1
|
|
843
|
+
end
|
|
844
|
+
ensure
|
|
845
|
+
while i < count
|
|
846
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
847
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr) unless ptr.null?
|
|
848
|
+
i += 1
|
|
849
|
+
end
|
|
850
|
+
end
|
|
851
|
+
|
|
852
|
+
messages
|
|
853
|
+
end
|
|
854
|
+
|
|
855
|
+
# Poll for a batch of messages without releasing the GVL (Global VM Lock).
|
|
856
|
+
#
|
|
857
|
+
# This is more efficient than {#poll_batch} for non-blocking poll(0) calls,
|
|
858
|
+
# particularly useful in fiber scheduler contexts where GVL release/reacquire
|
|
859
|
+
# overhead is wasteful since we don't expect to wait.
|
|
860
|
+
#
|
|
861
|
+
# @note Since the GVL is not released, a non-zero timeout_ms will block all Ruby
|
|
862
|
+
# threads/fibers for the duration. Use {#poll_batch} if you need a blocking wait.
|
|
863
|
+
#
|
|
864
|
+
# @param timeout_ms [Integer] Timeout waiting for the first message (default: 0 for non-blocking)
|
|
865
|
+
# @param max_items [Integer] Maximum number of messages to return per call
|
|
866
|
+
# @return [Array<Message>] Array of messages (empty if none available within timeout)
|
|
867
|
+
# @raise [RdkafkaError] When a consumed message contains an error
|
|
868
|
+
# @raise [ClosedConsumerError] When called on a closed consumer
|
|
869
|
+
def poll_batch_nb(timeout_ms = 0, max_items: 100)
|
|
870
|
+
closed_consumer_check(__method__)
|
|
871
|
+
|
|
872
|
+
buffer = batch_buffer(max_items)
|
|
873
|
+
messages = []
|
|
874
|
+
|
|
875
|
+
count = @native_kafka.with_inner do |_inner|
|
|
876
|
+
Rdkafka::Bindings.rd_kafka_consume_batch_queue_nb(
|
|
877
|
+
consumer_queue,
|
|
878
|
+
timeout_ms,
|
|
879
|
+
buffer,
|
|
880
|
+
max_items
|
|
881
|
+
)
|
|
882
|
+
end
|
|
883
|
+
|
|
884
|
+
return messages if count <= 0
|
|
885
|
+
|
|
886
|
+
i = 0
|
|
887
|
+
begin
|
|
888
|
+
while i < count
|
|
889
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
890
|
+
|
|
891
|
+
if ptr.null?
|
|
892
|
+
i += 1
|
|
893
|
+
next
|
|
894
|
+
end
|
|
895
|
+
|
|
896
|
+
native_message = Rdkafka::Bindings::Message.new(ptr)
|
|
897
|
+
|
|
898
|
+
if native_message[:err] != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
899
|
+
raise Rdkafka::RdkafkaError.new(native_message[:err])
|
|
900
|
+
end
|
|
901
|
+
|
|
902
|
+
messages << Rdkafka::Consumer::Message.new(native_message)
|
|
903
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr)
|
|
904
|
+
i += 1
|
|
905
|
+
end
|
|
906
|
+
ensure
|
|
907
|
+
while i < count
|
|
908
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
909
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr) unless ptr.null?
|
|
910
|
+
i += 1
|
|
911
|
+
end
|
|
912
|
+
end
|
|
913
|
+
|
|
914
|
+
messages
|
|
915
|
+
end
|
|
916
|
+
|
|
786
917
|
# Poll for new messages and yield for each received one. Iteration
|
|
787
918
|
# will end when the consumer is closed.
|
|
788
919
|
#
|
|
@@ -855,5 +986,24 @@ module Rdkafka
|
|
|
855
986
|
def closed_consumer_check(method)
|
|
856
987
|
raise Rdkafka::ClosedConsumerError.new(method) if closed?
|
|
857
988
|
end
|
|
989
|
+
|
|
990
|
+
# Returns the consumer queue pointer, lazily initialized
|
|
991
|
+
# @return [FFI::Pointer] consumer queue handle
|
|
992
|
+
def consumer_queue
|
|
993
|
+
@consumer_queue ||= @native_kafka.with_inner do |inner|
|
|
994
|
+
Rdkafka::Bindings.rd_kafka_queue_get_consumer(inner)
|
|
995
|
+
end
|
|
996
|
+
end
|
|
997
|
+
|
|
998
|
+
# Returns a reusable FFI buffer for batch polling, growing if needed
|
|
999
|
+
# @param max_items [Integer] minimum buffer capacity
|
|
1000
|
+
# @return [FFI::MemoryPointer] pointer buffer
|
|
1001
|
+
def batch_buffer(max_items)
|
|
1002
|
+
if @batch_buffer.nil? || @batch_buffer_size < max_items
|
|
1003
|
+
@batch_buffer = FFI::MemoryPointer.new(:pointer, max_items)
|
|
1004
|
+
@batch_buffer_size = max_items
|
|
1005
|
+
end
|
|
1006
|
+
@batch_buffer
|
|
1007
|
+
end
|
|
858
1008
|
end
|
|
859
1009
|
end
|
data/lib/rdkafka/version.rb
CHANGED
|
@@ -2,9 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
module Rdkafka
|
|
4
4
|
# Current rdkafka-ruby gem version
|
|
5
|
-
VERSION = "0.
|
|
5
|
+
VERSION = "0.27.0"
|
|
6
6
|
# Target librdkafka version to be used
|
|
7
|
-
LIBRDKAFKA_VERSION = "2.14.
|
|
7
|
+
LIBRDKAFKA_VERSION = "2.14.1"
|
|
8
8
|
# SHA256 hash of the librdkafka source tarball for verification
|
|
9
|
-
LIBRDKAFKA_SOURCE_SHA256 = "
|
|
9
|
+
LIBRDKAFKA_SOURCE_SHA256 = "bb246e754dee3560e9b42bf4e844dc05de4b146a3cae937e36301ffacdc456e7"
|
|
10
10
|
end
|
data/renovate.json
CHANGED