rdkafka 0.25.1-x86_64-linux-gnu → 0.27.0-x86_64-linux-gnu
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +17 -0
- data/README.md +2 -0
- data/bin/verify_kafka_warnings +2 -0
- data/docker-compose-ssl.yml +2 -2
- data/docker-compose.yml +2 -2
- data/ext/librdkafka.so +0 -0
- data/lib/rdkafka/admin/list_offsets_handle.rb +36 -0
- data/lib/rdkafka/admin/list_offsets_report.rb +51 -0
- data/lib/rdkafka/admin.rb +111 -6
- data/lib/rdkafka/bindings.rb +30 -2
- data/lib/rdkafka/callbacks.rb +43 -0
- data/lib/rdkafka/config.rb +45 -6
- data/lib/rdkafka/consumer.rb +153 -6
- data/lib/rdkafka/error.rb +13 -2
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +2 -0
- data/package-lock.json +3 -3
- data/renovate.json +4 -1
- metadata +4 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: d0c56d579e4d1a59b3d55fda47b2685249d5eeaf4bfaf4c9b12155d6d37ab7dd
|
|
4
|
+
data.tar.gz: baaeb33d9c536e8974b97800f8918693b946a93f7ef84e239905845c31a74e77
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 70aaae7594ec3f565e0d8e8af3b45c67232a1e57719c9e8c779da32789cb0eb8362c568af67bc263d6428815afcb531df4fb44add4f36fdf5edfa1d432514c34
|
|
7
|
+
data.tar.gz: 308256385ba96717ad97a8f8c609629fa513f161bba63b5d22d5cc4295921094712a22f1632ac63d692e78d18d6d9e3a8e0068ff2d655073f049eb3ccac945fd
|
data/CHANGELOG.md
CHANGED
|
@@ -1,6 +1,23 @@
|
|
|
1
1
|
# Rdkafka Changelog
|
|
2
2
|
|
|
3
|
+
## 0.27.0 (2026-05-07)
|
|
4
|
+
- [Feature] Add `Consumer#poll_batch(timeout_ms, max_items:)` and `Consumer#poll_batch_nb(timeout_ms, max_items:)` for batch message polling via `rd_kafka_consume_batch_queue`.
|
|
5
|
+
- [Feature] Add `Config#describe_properties` to dump all librdkafka configuration properties (including defaults and hidden properties) as a Hash via `rd_kafka_conf_dump`.
|
|
6
|
+
- [Enhancement] Bump librdkafka to `2.14.0`
|
|
7
|
+
- [Fix] Fix resource leak in `Admin#describe_configs` and `Admin#incremental_alter_configs` where `admin_options_ptr` and `queue_ptr` were not destroyed in the ensure block.
|
|
8
|
+
- [Fix] Fix leaked queue reference in `Config#native_kafka` where `rd_kafka_queue_get_main` return value was not destroyed after passing to `rd_kafka_set_log_queue`.
|
|
9
|
+
- [Fix] Fix native topic partition list leak in `Consumer#position` where `tpl` was never destroyed.
|
|
10
|
+
|
|
11
|
+
## 0.26.0 (2026-04-02)
|
|
12
|
+
- [Enhancement] Bump librdkafka to `2.13.2`
|
|
13
|
+
- [Enhancement] Embed a per-file SPEC_HASH in test topic and consumer group names for tracing Kafka warnings back to specific spec files.
|
|
14
|
+
- [Fix] Register `ObjectSpace.define_finalizer` in `Rdkafka::Consumer` to prevent segfaults when a consumer is GC'd without being explicitly closed (jturkel).
|
|
15
|
+
- [Fix] Remove dead `#finalizer` instance methods from `Consumer` and `Admin` that could never work as GC finalizers.
|
|
16
|
+
- [Fix] Prevent cascading test failures in admin specs when a single handle leaks into the registry.
|
|
17
|
+
- [Feature] Extend `Rdkafka::RdkafkaError` with `instance_name` attribute containing the `rd_kafka_name` for tying errors back to specific native Kafka instances (#181).
|
|
18
|
+
|
|
3
19
|
## 0.25.1 (2026-02-26)
|
|
20
|
+
- [Feature] Support `rd_kafka_ListOffsets` admin API for querying partition offsets by specification (earliest, latest, max_timestamp, or by timestamp) without requiring a consumer group (#613).
|
|
4
21
|
- [Enhancement] Use native ARM64 runners instead of QEMU emulation for Alpine musl aarch64 builds, improving build performance and reliability.
|
|
5
22
|
- [Enhancement] Enable parallel compilation (`make -j$(nproc)`) for ARM64 Alpine musl builds.
|
|
6
23
|
- [Enhancement] Add file descriptor API for fiber scheduler integration. Expose `enable_queue_io_events` and `enable_background_queue_io_events` on `Consumer`, `Producer`, and `Admin` to enable non-blocking monitoring with select/poll/epoll for integration with Ruby fiber schedulers (Falcon, Async) and custom event loops.
|
data/README.md
CHANGED
|
@@ -163,6 +163,8 @@ bundle exec rake produce_messages
|
|
|
163
163
|
|
|
164
164
|
| rdkafka-ruby | librdkafka | patches |
|
|
165
165
|
|-|-|-|
|
|
166
|
+
| 0.27.x (2026-05-07) | 2.14.0 (2026-04-01) | yes |
|
|
167
|
+
| 0.26.x (2026-03-30) | 2.13.2 (2026-03-02) | yes |
|
|
166
168
|
| 0.25.x (2026-01-21) | 2.12.1 (2025-10-21) | yes |
|
|
167
169
|
| 0.24.x (2025-10-10) | 2.11.1 (2025-08-18) | yes |
|
|
168
170
|
| 0.23.x (2025-09-04) | 2.11.0 (2025-07-03) | yes |
|
data/bin/verify_kafka_warnings
CHANGED
data/docker-compose-ssl.yml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
services:
|
|
2
2
|
kafka:
|
|
3
3
|
container_name: kafka
|
|
4
|
-
image: confluentinc/cp-kafka:8.
|
|
4
|
+
image: confluentinc/cp-kafka:8.2.0
|
|
5
5
|
ports:
|
|
6
6
|
- 9092:9092 # Support PLAINTEXT so we can run one docker setup for SSL and PLAINTEXT
|
|
7
7
|
- 9093:9093
|
|
@@ -19,7 +19,7 @@ services:
|
|
|
19
19
|
KAFKA_BROKER_ID: 1
|
|
20
20
|
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@127.0.0.1:9094
|
|
21
21
|
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
|
22
|
-
KAFKA_AUTO_CREATE_TOPICS_ENABLE: '
|
|
22
|
+
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
|
|
23
23
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
|
24
24
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
|
25
25
|
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
|
data/docker-compose.yml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
services:
|
|
2
2
|
kafka:
|
|
3
3
|
container_name: kafka
|
|
4
|
-
image: confluentinc/cp-kafka:8.
|
|
4
|
+
image: confluentinc/cp-kafka:8.2.0
|
|
5
5
|
|
|
6
6
|
ports:
|
|
7
7
|
- 9092:9092
|
|
@@ -18,7 +18,7 @@ services:
|
|
|
18
18
|
KAFKA_BROKER_ID: 1
|
|
19
19
|
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@127.0.0.1:9093
|
|
20
20
|
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
|
21
|
-
KAFKA_AUTO_CREATE_TOPICS_ENABLE: '
|
|
21
|
+
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
|
|
22
22
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
|
23
23
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
|
24
24
|
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
|
data/ext/librdkafka.so
CHANGED
|
Binary file
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Rdkafka
|
|
4
|
+
class Admin
|
|
5
|
+
# Handle for list offsets operation
|
|
6
|
+
class ListOffsetsHandle < AbstractHandle
|
|
7
|
+
layout :pending, :bool,
|
|
8
|
+
:response, :int,
|
|
9
|
+
:response_string, :pointer,
|
|
10
|
+
:result_infos, :pointer,
|
|
11
|
+
:result_count, :int
|
|
12
|
+
|
|
13
|
+
# @return [String] the name of the operation.
|
|
14
|
+
def operation_name
|
|
15
|
+
"list offsets"
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
# @return [ListOffsetsReport] instance with partition offset information.
|
|
19
|
+
def create_result
|
|
20
|
+
ListOffsetsReport.new(
|
|
21
|
+
result_infos: self[:result_infos],
|
|
22
|
+
result_count: self[:result_count]
|
|
23
|
+
)
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# Raises an error if the operation failed
|
|
27
|
+
# @raise [RdkafkaError]
|
|
28
|
+
def raise_error
|
|
29
|
+
raise RdkafkaError.new(
|
|
30
|
+
self[:response],
|
|
31
|
+
broker_message: self[:response_string].read_string
|
|
32
|
+
)
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Rdkafka
|
|
4
|
+
class Admin
|
|
5
|
+
# Report for list offsets operation result
|
|
6
|
+
class ListOffsetsReport
|
|
7
|
+
attr_reader :offsets
|
|
8
|
+
|
|
9
|
+
# @param result_infos [FFI::Pointer] pointer to result info array
|
|
10
|
+
# @param result_count [Integer] number of result info entries
|
|
11
|
+
def initialize(result_infos:, result_count:)
|
|
12
|
+
@offsets = []
|
|
13
|
+
|
|
14
|
+
return if result_infos.null?
|
|
15
|
+
|
|
16
|
+
result_infos
|
|
17
|
+
.read_array_of_pointer(result_count)
|
|
18
|
+
.each { |result_info_ptr| validate!(result_info_ptr) }
|
|
19
|
+
.each do |result_info_ptr|
|
|
20
|
+
tp_ptr = Bindings.rd_kafka_ListOffsetsResultInfo_topic_partition(result_info_ptr)
|
|
21
|
+
tp = Bindings::TopicPartition.new(tp_ptr)
|
|
22
|
+
timestamp = Bindings.rd_kafka_ListOffsetsResultInfo_timestamp(result_info_ptr)
|
|
23
|
+
leader_epoch = Bindings.rd_kafka_topic_partition_get_leader_epoch(tp_ptr)
|
|
24
|
+
|
|
25
|
+
@offsets << {
|
|
26
|
+
topic: tp[:topic],
|
|
27
|
+
partition: tp[:partition],
|
|
28
|
+
offset: tp[:offset],
|
|
29
|
+
timestamp: timestamp,
|
|
30
|
+
leader_epoch: (leader_epoch == -1) ? nil : leader_epoch
|
|
31
|
+
}
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
private
|
|
36
|
+
|
|
37
|
+
# Validates the partition result and raises an error if invalid
|
|
38
|
+
# @param result_info_ptr [FFI::Pointer] pointer to the result info
|
|
39
|
+
# @raise [RdkafkaError] when the partition has an error
|
|
40
|
+
def validate!(result_info_ptr)
|
|
41
|
+
tp_ptr = Bindings.rd_kafka_ListOffsetsResultInfo_topic_partition(result_info_ptr)
|
|
42
|
+
tp = Bindings::TopicPartition.new(tp_ptr)
|
|
43
|
+
code = tp[:err]
|
|
44
|
+
|
|
45
|
+
return if code.zero?
|
|
46
|
+
|
|
47
|
+
raise RdkafkaError.new(code)
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
end
|
data/lib/rdkafka/admin.rb
CHANGED
|
@@ -130,12 +130,6 @@ module Rdkafka
|
|
|
130
130
|
end
|
|
131
131
|
end
|
|
132
132
|
|
|
133
|
-
# @return [Proc] finalizer proc for closing the admin
|
|
134
|
-
# @private
|
|
135
|
-
def finalizer
|
|
136
|
-
->(_) { close }
|
|
137
|
-
end
|
|
138
|
-
|
|
139
133
|
# Performs the metadata request using admin
|
|
140
134
|
#
|
|
141
135
|
# @param topic_name [String, nil] metadat about particular topic or all if nil
|
|
@@ -783,6 +777,9 @@ module Rdkafka
|
|
|
783
777
|
|
|
784
778
|
raise
|
|
785
779
|
ensure
|
|
780
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
|
781
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
|
782
|
+
|
|
786
783
|
if configs_array_ptr
|
|
787
784
|
Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
|
|
788
785
|
configs_array_ptr,
|
|
@@ -871,6 +868,9 @@ module Rdkafka
|
|
|
871
868
|
|
|
872
869
|
raise
|
|
873
870
|
ensure
|
|
871
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
|
872
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
|
873
|
+
|
|
874
874
|
if configs_array_ptr
|
|
875
875
|
Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
|
|
876
876
|
configs_array_ptr,
|
|
@@ -882,6 +882,111 @@ module Rdkafka
|
|
|
882
882
|
handle
|
|
883
883
|
end
|
|
884
884
|
|
|
885
|
+
# Queries partition offsets by specification (earliest, latest, max_timestamp, or by
|
|
886
|
+
# timestamp) without requiring a consumer group.
|
|
887
|
+
#
|
|
888
|
+
# @param topic_partition_offsets [Hash{String => Array<Hash>}] hash mapping topic names to
|
|
889
|
+
# arrays of partition offset specifications. Each specification is a hash with:
|
|
890
|
+
# - `:partition` [Integer] partition number
|
|
891
|
+
# - `:offset` [Symbol, Integer] offset specification - `:earliest`, `:latest`,
|
|
892
|
+
# `:max_timestamp`, or an integer timestamp in milliseconds
|
|
893
|
+
# @param isolation_level [Integer, nil] optional isolation level:
|
|
894
|
+
# - `RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED` (0) - default
|
|
895
|
+
# - `RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED` (1)
|
|
896
|
+
#
|
|
897
|
+
# @return [ListOffsetsHandle] handle that can be used to wait for the result
|
|
898
|
+
#
|
|
899
|
+
# @raise [ClosedAdminError] when the admin is closed
|
|
900
|
+
# @raise [ConfigError] when the background queue is unavailable
|
|
901
|
+
#
|
|
902
|
+
# @example Query earliest and latest offsets
|
|
903
|
+
# handle = admin.list_offsets(
|
|
904
|
+
# { "my_topic" => [
|
|
905
|
+
# { partition: 0, offset: :earliest },
|
|
906
|
+
# { partition: 1, offset: :latest }
|
|
907
|
+
# ] }
|
|
908
|
+
# )
|
|
909
|
+
# report = handle.wait(max_wait_timeout_ms: 15_000)
|
|
910
|
+
# report.offsets
|
|
911
|
+
# # => [{ topic: "my_topic", partition: 0, offset: 0, ... }, ...]
|
|
912
|
+
def list_offsets(topic_partition_offsets, isolation_level: nil)
|
|
913
|
+
closed_admin_check(__method__)
|
|
914
|
+
|
|
915
|
+
# Count total partitions for pre-allocation
|
|
916
|
+
total = topic_partition_offsets.sum { |_, partitions| partitions.size }
|
|
917
|
+
|
|
918
|
+
# Build native topic partition list
|
|
919
|
+
tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(total)
|
|
920
|
+
|
|
921
|
+
topic_partition_offsets.each do |topic, partitions|
|
|
922
|
+
partitions.each do |spec|
|
|
923
|
+
partition = spec.fetch(:partition)
|
|
924
|
+
offset = spec.fetch(:offset)
|
|
925
|
+
|
|
926
|
+
native_offset = case offset
|
|
927
|
+
when :earliest then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_EARLIEST
|
|
928
|
+
when :latest then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_LATEST
|
|
929
|
+
when :max_timestamp then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP
|
|
930
|
+
when Integer then offset
|
|
931
|
+
else
|
|
932
|
+
raise ArgumentError, "Unknown offset specification: #{offset.inspect}"
|
|
933
|
+
end
|
|
934
|
+
|
|
935
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, partition)
|
|
936
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(tpl, topic, partition, native_offset)
|
|
937
|
+
end
|
|
938
|
+
end
|
|
939
|
+
|
|
940
|
+
# Get a pointer to the queue that our request will be enqueued on
|
|
941
|
+
queue_ptr = @native_kafka.with_inner do |inner|
|
|
942
|
+
Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
|
|
943
|
+
end
|
|
944
|
+
|
|
945
|
+
if queue_ptr.null?
|
|
946
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
|
|
947
|
+
raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
|
|
948
|
+
end
|
|
949
|
+
|
|
950
|
+
# Create and register the handle we will return to the caller
|
|
951
|
+
handle = ListOffsetsHandle.new
|
|
952
|
+
handle[:pending] = true
|
|
953
|
+
handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
|
|
954
|
+
|
|
955
|
+
admin_options_ptr = @native_kafka.with_inner do |inner|
|
|
956
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_new(
|
|
957
|
+
inner,
|
|
958
|
+
Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_LISTOFFSETS
|
|
959
|
+
)
|
|
960
|
+
end
|
|
961
|
+
|
|
962
|
+
if isolation_level
|
|
963
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_set_isolation_level(admin_options_ptr, isolation_level)
|
|
964
|
+
end
|
|
965
|
+
|
|
966
|
+
ListOffsetsHandle.register(handle)
|
|
967
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, handle.to_ptr)
|
|
968
|
+
|
|
969
|
+
begin
|
|
970
|
+
@native_kafka.with_inner do |inner|
|
|
971
|
+
Rdkafka::Bindings.rd_kafka_ListOffsets(
|
|
972
|
+
inner,
|
|
973
|
+
tpl,
|
|
974
|
+
admin_options_ptr,
|
|
975
|
+
queue_ptr
|
|
976
|
+
)
|
|
977
|
+
end
|
|
978
|
+
rescue Exception
|
|
979
|
+
ListOffsetsHandle.remove(handle.to_ptr.address)
|
|
980
|
+
raise
|
|
981
|
+
ensure
|
|
982
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
|
983
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
|
984
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
|
|
985
|
+
end
|
|
986
|
+
|
|
987
|
+
handle
|
|
988
|
+
end
|
|
989
|
+
|
|
885
990
|
private
|
|
886
991
|
|
|
887
992
|
# Checks if the admin is closed and raises an error if so
|
data/lib/rdkafka/bindings.rb
CHANGED
|
@@ -188,6 +188,27 @@ module Rdkafka
|
|
|
188
188
|
RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2
|
|
189
189
|
RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3
|
|
190
190
|
|
|
191
|
+
# List Offsets
|
|
192
|
+
RD_KAFKA_ADMIN_OP_LISTOFFSETS = 20
|
|
193
|
+
RD_KAFKA_EVENT_LISTOFFSETS_RESULT = 0x400000
|
|
194
|
+
|
|
195
|
+
# rd_kafka_IsolationLevel_t
|
|
196
|
+
RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED = 0
|
|
197
|
+
RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED = 1
|
|
198
|
+
|
|
199
|
+
# rd_kafka_OffsetSpec_t
|
|
200
|
+
RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP = -3
|
|
201
|
+
RD_KAFKA_OFFSET_SPEC_EARLIEST = -2
|
|
202
|
+
RD_KAFKA_OFFSET_SPEC_LATEST = -1
|
|
203
|
+
|
|
204
|
+
attach_function :rd_kafka_ListOffsets, [:pointer, :pointer, :pointer, :pointer], :void, blocking: true
|
|
205
|
+
attach_function :rd_kafka_event_ListOffsets_result, [:pointer], :pointer
|
|
206
|
+
attach_function :rd_kafka_ListOffsets_result_infos, [:pointer, :pointer], :pointer
|
|
207
|
+
attach_function :rd_kafka_ListOffsetsResultInfo_topic_partition, [:pointer], :pointer
|
|
208
|
+
attach_function :rd_kafka_ListOffsetsResultInfo_timestamp, [:pointer], :int64
|
|
209
|
+
attach_function :rd_kafka_AdminOptions_set_isolation_level, [:pointer, :int], :pointer
|
|
210
|
+
attach_function :rd_kafka_topic_partition_get_leader_epoch, [:pointer], :int32
|
|
211
|
+
|
|
191
212
|
# FFI struct for error description (rd_kafka_err_desc)
|
|
192
213
|
class NativeErrorDesc < FFI::Struct
|
|
193
214
|
layout :code, :int,
|
|
@@ -213,6 +234,9 @@ module Rdkafka
|
|
|
213
234
|
|
|
214
235
|
attach_function :rd_kafka_conf_new, [], :pointer
|
|
215
236
|
attach_function :rd_kafka_conf_set, [:pointer, :string, :string, :pointer, :int], :kafka_config_response
|
|
237
|
+
attach_function :rd_kafka_conf_dump, [:pointer, :pointer], :pointer
|
|
238
|
+
attach_function :rd_kafka_conf_dump_free, [:pointer, :size_t], :void
|
|
239
|
+
attach_function :rd_kafka_conf_destroy, [:pointer], :void
|
|
216
240
|
callback :log_cb, [:pointer, :int, :string, :string], :void
|
|
217
241
|
attach_function :rd_kafka_conf_set_log_cb, [:pointer, :log_cb], :void
|
|
218
242
|
attach_function :rd_kafka_conf_set_opaque, [:pointer, :pointer], :void
|
|
@@ -300,9 +324,10 @@ module Rdkafka
|
|
|
300
324
|
|
|
301
325
|
ErrorCallback = FFI::Function.new(
|
|
302
326
|
:void, [:pointer, :int, :string, :pointer]
|
|
303
|
-
) do |
|
|
327
|
+
) do |client_ptr, err_code, reason, _opaque|
|
|
304
328
|
if Rdkafka::Config.error_callback
|
|
305
|
-
|
|
329
|
+
instance_name = client_ptr.null? ? nil : Rdkafka::Bindings.rd_kafka_name(client_ptr)
|
|
330
|
+
error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason, instance_name: instance_name)
|
|
306
331
|
error.set_backtrace(caller)
|
|
307
332
|
Rdkafka::Config.error_callback.call(error)
|
|
308
333
|
end
|
|
@@ -361,6 +386,9 @@ module Rdkafka
|
|
|
361
386
|
# More efficient for poll(0) calls in fiber schedulers.
|
|
362
387
|
attach_function :rd_kafka_consumer_poll_nb, :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: false
|
|
363
388
|
attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
|
|
389
|
+
attach_function :rd_kafka_queue_get_consumer, [:pointer], :pointer
|
|
390
|
+
attach_function :rd_kafka_consume_batch_queue, [:pointer, :int, :pointer, :size_t], :ssize_t, blocking: true
|
|
391
|
+
attach_function :rd_kafka_consume_batch_queue_nb, :rd_kafka_consume_batch_queue, [:pointer, :int, :pointer, :size_t], :ssize_t, blocking: false
|
|
364
392
|
attach_function :rd_kafka_offsets_store, [:pointer, :pointer], :int, blocking: true
|
|
365
393
|
attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
|
|
366
394
|
attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
|
data/lib/rdkafka/callbacks.rb
CHANGED
|
@@ -180,6 +180,27 @@ module Rdkafka
|
|
|
180
180
|
end
|
|
181
181
|
end
|
|
182
182
|
|
|
183
|
+
# Extracts attributes of rd_kafka_ListOffsets_result_t
|
|
184
|
+
#
|
|
185
|
+
# @private
|
|
186
|
+
class ListOffsetsResult
|
|
187
|
+
attr_reader :result_error, :error_string, :result_infos, :result_count
|
|
188
|
+
|
|
189
|
+
# @param event_ptr [FFI::Pointer] pointer to the event
|
|
190
|
+
def initialize(event_ptr)
|
|
191
|
+
@result_infos = FFI::Pointer::NULL
|
|
192
|
+
@result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
|
|
193
|
+
@error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
|
|
194
|
+
|
|
195
|
+
if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
196
|
+
list_offsets_result = Rdkafka::Bindings.rd_kafka_event_ListOffsets_result(event_ptr)
|
|
197
|
+
pointer_to_size_t = FFI::MemoryPointer.new(:size_t)
|
|
198
|
+
@result_infos = Rdkafka::Bindings.rd_kafka_ListOffsets_result_infos(list_offsets_result, pointer_to_size_t)
|
|
199
|
+
@result_count = pointer_to_size_t.read(:size_t)
|
|
200
|
+
end
|
|
201
|
+
end
|
|
202
|
+
end
|
|
203
|
+
|
|
183
204
|
# @private
|
|
184
205
|
class BackgroundEventCallback
|
|
185
206
|
# Handles background events from librdkafka
|
|
@@ -206,6 +227,8 @@ module Rdkafka
|
|
|
206
227
|
process_describe_acl(event_ptr)
|
|
207
228
|
when Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEGROUPS_RESULT
|
|
208
229
|
process_delete_groups(event_ptr)
|
|
230
|
+
when Rdkafka::Bindings::RD_KAFKA_EVENT_LISTOFFSETS_RESULT
|
|
231
|
+
process_list_offsets(event_ptr)
|
|
209
232
|
end
|
|
210
233
|
end
|
|
211
234
|
|
|
@@ -392,6 +415,26 @@ module Rdkafka
|
|
|
392
415
|
describe_acl_handle.unlock
|
|
393
416
|
end
|
|
394
417
|
end
|
|
418
|
+
|
|
419
|
+
# Processes list offsets result event
|
|
420
|
+
# @param event_ptr [FFI::Pointer] pointer to the event
|
|
421
|
+
def self.process_list_offsets(event_ptr)
|
|
422
|
+
list_offsets = ListOffsetsResult.new(event_ptr)
|
|
423
|
+
list_offsets_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
|
|
424
|
+
|
|
425
|
+
if list_offsets_handle = Rdkafka::Admin::ListOffsetsHandle.remove(list_offsets_handle_ptr.address)
|
|
426
|
+
list_offsets_handle[:response] = list_offsets.result_error
|
|
427
|
+
list_offsets_handle[:response_string] = list_offsets.error_string
|
|
428
|
+
list_offsets_handle[:pending] = false
|
|
429
|
+
|
|
430
|
+
if list_offsets.result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
431
|
+
list_offsets_handle[:result_infos] = list_offsets.result_infos
|
|
432
|
+
list_offsets_handle[:result_count] = list_offsets.result_count
|
|
433
|
+
end
|
|
434
|
+
|
|
435
|
+
list_offsets_handle.unlock
|
|
436
|
+
end
|
|
437
|
+
end
|
|
395
438
|
end
|
|
396
439
|
|
|
397
440
|
# @private
|
data/lib/rdkafka/config.rb
CHANGED
|
@@ -89,10 +89,10 @@ module Rdkafka
|
|
|
89
89
|
# If this callback is not set, global errors such as brokers becoming unavailable will only be sent to the logger, as defined by librdkafka.
|
|
90
90
|
# The callback is called with an instance of RdKafka::Error.
|
|
91
91
|
#
|
|
92
|
-
# @param callback [Proc, #call] callable object to handle errors
|
|
92
|
+
# @param callback [Proc, #call, nil] callable object to handle errors or nil to clear
|
|
93
93
|
# @return [nil]
|
|
94
94
|
def self.error_callback=(callback)
|
|
95
|
-
raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
|
|
95
|
+
raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call) || callback.nil?
|
|
96
96
|
@@error_callback = callback
|
|
97
97
|
end
|
|
98
98
|
|
|
@@ -285,6 +285,46 @@ module Rdkafka
|
|
|
285
285
|
)
|
|
286
286
|
end
|
|
287
287
|
|
|
288
|
+
# Returns all configuration properties and their current values for this config.
|
|
289
|
+
#
|
|
290
|
+
# Uses `rd_kafka_conf_dump` to retrieve every property (including defaults and
|
|
291
|
+
# internal properties like `client.software.name`) as a flat Hash.
|
|
292
|
+
#
|
|
293
|
+
# @note The librdkafka C API does not distinguish between producer-only, consumer-only,
|
|
294
|
+
# and global properties at the configuration level. All properties are returned
|
|
295
|
+
# regardless of the intended client type.
|
|
296
|
+
#
|
|
297
|
+
# @note The returned Hash may include sensitive values such as authentication
|
|
298
|
+
# credentials and key passwords. Do not log or serialize the returned data
|
|
299
|
+
# unless you have explicitly redacted secret entries.
|
|
300
|
+
#
|
|
301
|
+
# @return [Hash{Symbol => String}] property names mapped to their current values
|
|
302
|
+
#
|
|
303
|
+
# @raise [ConfigError] When the configuration contains invalid options
|
|
304
|
+
def describe_properties
|
|
305
|
+
config = nil
|
|
306
|
+
dump_ptr = nil
|
|
307
|
+
count = 0
|
|
308
|
+
|
|
309
|
+
config = native_config
|
|
310
|
+
count_ptr = Rdkafka::Bindings::SizePtr.new
|
|
311
|
+
dump_ptr = Rdkafka::Bindings.rd_kafka_conf_dump(config, count_ptr)
|
|
312
|
+
|
|
313
|
+
count = count_ptr[:value]
|
|
314
|
+
result = {}
|
|
315
|
+
|
|
316
|
+
(0...count).step(2) do |i|
|
|
317
|
+
key = dump_ptr.get_pointer(i * FFI::Pointer.size).read_string
|
|
318
|
+
value = dump_ptr.get_pointer((i + 1) * FFI::Pointer.size).read_string
|
|
319
|
+
result[key.to_sym] = value
|
|
320
|
+
end
|
|
321
|
+
|
|
322
|
+
result
|
|
323
|
+
ensure
|
|
324
|
+
Rdkafka::Bindings.rd_kafka_conf_dump_free(dump_ptr, count) if dump_ptr
|
|
325
|
+
Rdkafka::Bindings.rd_kafka_conf_destroy(config) if config
|
|
326
|
+
end
|
|
327
|
+
|
|
288
328
|
# Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
|
|
289
329
|
class ConfigError < RuntimeError; end
|
|
290
330
|
|
|
@@ -364,10 +404,9 @@ module Rdkafka
|
|
|
364
404
|
end
|
|
365
405
|
|
|
366
406
|
# Redirect log to handle's queue
|
|
367
|
-
Rdkafka::Bindings.
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
)
|
|
407
|
+
main_queue = Rdkafka::Bindings.rd_kafka_queue_get_main(handle)
|
|
408
|
+
Rdkafka::Bindings.rd_kafka_set_log_queue(handle, main_queue)
|
|
409
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(main_queue)
|
|
371
410
|
|
|
372
411
|
# Return handle which should be closed using rd_kafka_destroy after usage.
|
|
373
412
|
handle
|
data/lib/rdkafka/consumer.rb
CHANGED
|
@@ -19,6 +19,9 @@ module Rdkafka
|
|
|
19
19
|
# @param native_kafka [NativeKafka] wrapper around the native Kafka consumer handle
|
|
20
20
|
def initialize(native_kafka)
|
|
21
21
|
@native_kafka = native_kafka
|
|
22
|
+
|
|
23
|
+
# Makes sure, that native kafka gets closed before it gets GCed by Ruby
|
|
24
|
+
ObjectSpace.define_finalizer(self, native_kafka.finalizer)
|
|
22
25
|
end
|
|
23
26
|
|
|
24
27
|
# Starts the native Kafka polling thread and kicks off the init polling
|
|
@@ -172,12 +175,6 @@ module Rdkafka
|
|
|
172
175
|
end
|
|
173
176
|
end
|
|
174
177
|
|
|
175
|
-
# @return [Proc] finalizer proc for closing the consumer
|
|
176
|
-
# @private
|
|
177
|
-
def finalizer
|
|
178
|
-
->(_) { close }
|
|
179
|
-
end
|
|
180
|
-
|
|
181
178
|
# Close this consumer
|
|
182
179
|
# @return [nil]
|
|
183
180
|
def close
|
|
@@ -186,6 +183,11 @@ module Rdkafka
|
|
|
186
183
|
|
|
187
184
|
@native_kafka.synchronize do |inner|
|
|
188
185
|
Rdkafka::Bindings.rd_kafka_consumer_close(inner)
|
|
186
|
+
|
|
187
|
+
if @consumer_queue
|
|
188
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(@consumer_queue)
|
|
189
|
+
@consumer_queue = nil
|
|
190
|
+
end
|
|
189
191
|
end
|
|
190
192
|
|
|
191
193
|
@native_kafka.close
|
|
@@ -437,6 +439,8 @@ module Rdkafka
|
|
|
437
439
|
end
|
|
438
440
|
|
|
439
441
|
TopicPartitionList.from_native_tpl(tpl)
|
|
442
|
+
ensure
|
|
443
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
|
|
440
444
|
end
|
|
441
445
|
|
|
442
446
|
# Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
|
|
@@ -782,6 +786,130 @@ module Rdkafka
|
|
|
782
786
|
end
|
|
783
787
|
end
|
|
784
788
|
|
|
789
|
+
# Poll for a batch of messages from the consumer queue in a single FFI call.
|
|
790
|
+
#
|
|
791
|
+
# This is more efficient than calling {#poll} in a loop because it crosses the FFI
|
|
792
|
+
# boundary only once to fetch up to `max_items` messages.
|
|
793
|
+
#
|
|
794
|
+
# The timeout controls how long to wait for the **first** message. Once any message
|
|
795
|
+
# is available, librdkafka fills the buffer with whatever is immediately ready and
|
|
796
|
+
# returns without further waiting.
|
|
797
|
+
#
|
|
798
|
+
# @param timeout_ms [Integer] Timeout waiting for the first message (-1 for infinite)
|
|
799
|
+
# @param max_items [Integer] Maximum number of messages to return per call
|
|
800
|
+
# @return [Array<Message>] Array of messages (empty if none available within timeout)
|
|
801
|
+
# @raise [RdkafkaError] When a consumed message contains an error
|
|
802
|
+
# @raise [ClosedConsumerError] When called on a closed consumer
|
|
803
|
+
def poll_batch(timeout_ms, max_items: 100)
|
|
804
|
+
closed_consumer_check(__method__)
|
|
805
|
+
|
|
806
|
+
buffer = batch_buffer(max_items)
|
|
807
|
+
messages = []
|
|
808
|
+
|
|
809
|
+
count = @native_kafka.with_inner do |_inner|
|
|
810
|
+
Rdkafka::Bindings.rd_kafka_consume_batch_queue(
|
|
811
|
+
consumer_queue,
|
|
812
|
+
timeout_ms,
|
|
813
|
+
buffer,
|
|
814
|
+
max_items
|
|
815
|
+
)
|
|
816
|
+
end
|
|
817
|
+
|
|
818
|
+
return messages if count <= 0
|
|
819
|
+
|
|
820
|
+
i = 0
|
|
821
|
+
begin
|
|
822
|
+
while i < count
|
|
823
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
824
|
+
|
|
825
|
+
if ptr.null?
|
|
826
|
+
i += 1
|
|
827
|
+
next
|
|
828
|
+
end
|
|
829
|
+
|
|
830
|
+
native_message = Rdkafka::Bindings::Message.new(ptr)
|
|
831
|
+
|
|
832
|
+
if native_message[:err] != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
833
|
+
raise Rdkafka::RdkafkaError.new(native_message[:err])
|
|
834
|
+
end
|
|
835
|
+
|
|
836
|
+
messages << Rdkafka::Consumer::Message.new(native_message)
|
|
837
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr)
|
|
838
|
+
i += 1
|
|
839
|
+
end
|
|
840
|
+
ensure
|
|
841
|
+
while i < count
|
|
842
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
843
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr) unless ptr.null?
|
|
844
|
+
i += 1
|
|
845
|
+
end
|
|
846
|
+
end
|
|
847
|
+
|
|
848
|
+
messages
|
|
849
|
+
end
|
|
850
|
+
|
|
851
|
+
# Poll for a batch of messages without releasing the GVL (Global VM Lock).
|
|
852
|
+
#
|
|
853
|
+
# This is more efficient than {#poll_batch} for non-blocking poll(0) calls,
|
|
854
|
+
# particularly useful in fiber scheduler contexts where GVL release/reacquire
|
|
855
|
+
# overhead is wasteful since we don't expect to wait.
|
|
856
|
+
#
|
|
857
|
+
# @note Since the GVL is not released, a non-zero timeout_ms will block all Ruby
|
|
858
|
+
# threads/fibers for the duration. Use {#poll_batch} if you need a blocking wait.
|
|
859
|
+
#
|
|
860
|
+
# @param timeout_ms [Integer] Timeout waiting for the first message (default: 0 for non-blocking)
|
|
861
|
+
# @param max_items [Integer] Maximum number of messages to return per call
|
|
862
|
+
# @return [Array<Message>] Array of messages (empty if none available within timeout)
|
|
863
|
+
# @raise [RdkafkaError] When a consumed message contains an error
|
|
864
|
+
# @raise [ClosedConsumerError] When called on a closed consumer
|
|
865
|
+
def poll_batch_nb(timeout_ms = 0, max_items: 100)
|
|
866
|
+
closed_consumer_check(__method__)
|
|
867
|
+
|
|
868
|
+
buffer = batch_buffer(max_items)
|
|
869
|
+
messages = []
|
|
870
|
+
|
|
871
|
+
count = @native_kafka.with_inner do |_inner|
|
|
872
|
+
Rdkafka::Bindings.rd_kafka_consume_batch_queue_nb(
|
|
873
|
+
consumer_queue,
|
|
874
|
+
timeout_ms,
|
|
875
|
+
buffer,
|
|
876
|
+
max_items
|
|
877
|
+
)
|
|
878
|
+
end
|
|
879
|
+
|
|
880
|
+
return messages if count <= 0
|
|
881
|
+
|
|
882
|
+
i = 0
|
|
883
|
+
begin
|
|
884
|
+
while i < count
|
|
885
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
886
|
+
|
|
887
|
+
if ptr.null?
|
|
888
|
+
i += 1
|
|
889
|
+
next
|
|
890
|
+
end
|
|
891
|
+
|
|
892
|
+
native_message = Rdkafka::Bindings::Message.new(ptr)
|
|
893
|
+
|
|
894
|
+
if native_message[:err] != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
895
|
+
raise Rdkafka::RdkafkaError.new(native_message[:err])
|
|
896
|
+
end
|
|
897
|
+
|
|
898
|
+
messages << Rdkafka::Consumer::Message.new(native_message)
|
|
899
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr)
|
|
900
|
+
i += 1
|
|
901
|
+
end
|
|
902
|
+
ensure
|
|
903
|
+
while i < count
|
|
904
|
+
ptr = buffer.get_pointer(i * FFI::Pointer.size)
|
|
905
|
+
Rdkafka::Bindings.rd_kafka_message_destroy(ptr) unless ptr.null?
|
|
906
|
+
i += 1
|
|
907
|
+
end
|
|
908
|
+
end
|
|
909
|
+
|
|
910
|
+
messages
|
|
911
|
+
end
|
|
912
|
+
|
|
785
913
|
# Poll for new messages and yield for each received one. Iteration
|
|
786
914
|
# will end when the consumer is closed.
|
|
787
915
|
#
|
|
@@ -854,5 +982,24 @@ module Rdkafka
|
|
|
854
982
|
def closed_consumer_check(method)
|
|
855
983
|
raise Rdkafka::ClosedConsumerError.new(method) if closed?
|
|
856
984
|
end
|
|
985
|
+
|
|
986
|
+
# Returns the consumer queue pointer, lazily initialized
|
|
987
|
+
# @return [FFI::Pointer] consumer queue handle
|
|
988
|
+
def consumer_queue
|
|
989
|
+
@consumer_queue ||= @native_kafka.with_inner do |inner|
|
|
990
|
+
Rdkafka::Bindings.rd_kafka_queue_get_consumer(inner)
|
|
991
|
+
end
|
|
992
|
+
end
|
|
993
|
+
|
|
994
|
+
# Returns a reusable FFI buffer for batch polling, growing if needed
|
|
995
|
+
# @param max_items [Integer] minimum buffer capacity
|
|
996
|
+
# @return [FFI::MemoryPointer] pointer buffer
|
|
997
|
+
def batch_buffer(max_items)
|
|
998
|
+
if @batch_buffer.nil? || @batch_buffer_size < max_items
|
|
999
|
+
@batch_buffer = FFI::MemoryPointer.new(:pointer, max_items)
|
|
1000
|
+
@batch_buffer_size = max_items
|
|
1001
|
+
end
|
|
1002
|
+
@batch_buffer
|
|
1003
|
+
end
|
|
857
1004
|
end
|
|
858
1005
|
end
|
data/lib/rdkafka/error.rb
CHANGED
|
@@ -18,15 +18,21 @@ module Rdkafka
|
|
|
18
18
|
# @return [String]
|
|
19
19
|
attr_reader :broker_message
|
|
20
20
|
|
|
21
|
+
# The name of the rdkafka instance that generated this error
|
|
22
|
+
# @return [String, nil]
|
|
23
|
+
attr_reader :instance_name
|
|
24
|
+
|
|
21
25
|
# @private
|
|
22
26
|
# @param response [Integer] the raw error response code from librdkafka
|
|
23
27
|
# @param message_prefix [String, nil] optional prefix for error messages
|
|
24
28
|
# @param broker_message [String, nil] optional error message from the broker
|
|
25
|
-
|
|
29
|
+
# @param instance_name [String, nil] optional name of the rdkafka instance
|
|
30
|
+
def initialize(response, message_prefix = nil, broker_message: nil, instance_name: nil)
|
|
26
31
|
raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
|
|
27
32
|
@rdkafka_response = response
|
|
28
33
|
@message_prefix = message_prefix
|
|
29
34
|
@broker_message = broker_message
|
|
35
|
+
@instance_name = instance_name
|
|
30
36
|
end
|
|
31
37
|
|
|
32
38
|
# This error's code, for example `:partition_eof`, `:msg_size_too_large`.
|
|
@@ -48,7 +54,12 @@ module Rdkafka
|
|
|
48
54
|
else
|
|
49
55
|
""
|
|
50
56
|
end
|
|
51
|
-
|
|
57
|
+
instance_name_part = if instance_name
|
|
58
|
+
" [#{instance_name}]"
|
|
59
|
+
else
|
|
60
|
+
""
|
|
61
|
+
end
|
|
62
|
+
"#{message_prefix_part}#{Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)} (#{code})#{instance_name_part}"
|
|
52
63
|
end
|
|
53
64
|
|
|
54
65
|
# Whether this error indicates the partition is EOF.
|
data/lib/rdkafka/version.rb
CHANGED
|
@@ -2,9 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
module Rdkafka
|
|
4
4
|
# Current rdkafka-ruby gem version
|
|
5
|
-
VERSION = "0.
|
|
5
|
+
VERSION = "0.27.0"
|
|
6
6
|
# Target librdkafka version to be used
|
|
7
|
-
LIBRDKAFKA_VERSION = "2.
|
|
7
|
+
LIBRDKAFKA_VERSION = "2.14.0"
|
|
8
8
|
# SHA256 hash of the librdkafka source tarball for verification
|
|
9
|
-
LIBRDKAFKA_SOURCE_SHA256 = "
|
|
9
|
+
LIBRDKAFKA_SOURCE_SHA256 = "c05c03ef00a13a8463fac3e8918c04843c416f11ced58c889d806a88ca92cf99"
|
|
10
10
|
end
|
data/lib/rdkafka.rb
CHANGED
|
@@ -29,6 +29,8 @@ require "rdkafka/admin/describe_configs_handle"
|
|
|
29
29
|
require "rdkafka/admin/describe_configs_report"
|
|
30
30
|
require "rdkafka/admin/incremental_alter_configs_handle"
|
|
31
31
|
require "rdkafka/admin/incremental_alter_configs_report"
|
|
32
|
+
require "rdkafka/admin/list_offsets_handle"
|
|
33
|
+
require "rdkafka/admin/list_offsets_report"
|
|
32
34
|
require "rdkafka/admin/acl_binding_result"
|
|
33
35
|
require "rdkafka/admin/config_binding_result"
|
|
34
36
|
require "rdkafka/admin/config_resource_binding_result"
|
data/package-lock.json
CHANGED
|
@@ -217,9 +217,9 @@
|
|
|
217
217
|
}
|
|
218
218
|
},
|
|
219
219
|
"node_modules/picomatch": {
|
|
220
|
-
"version": "2.3.
|
|
221
|
-
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.
|
|
222
|
-
"integrity": "sha512-
|
|
220
|
+
"version": "2.3.2",
|
|
221
|
+
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
|
222
|
+
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
|
223
223
|
"dev": true,
|
|
224
224
|
"license": "MIT",
|
|
225
225
|
"engines": {
|
data/renovate.json
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: rdkafka
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.
|
|
4
|
+
version: 0.27.0
|
|
5
5
|
platform: x86_64-linux-gnu
|
|
6
6
|
authors:
|
|
7
7
|
- Thijs Cadier
|
|
@@ -122,6 +122,8 @@ files:
|
|
|
122
122
|
- lib/rdkafka/admin/describe_configs_report.rb
|
|
123
123
|
- lib/rdkafka/admin/incremental_alter_configs_handle.rb
|
|
124
124
|
- lib/rdkafka/admin/incremental_alter_configs_report.rb
|
|
125
|
+
- lib/rdkafka/admin/list_offsets_handle.rb
|
|
126
|
+
- lib/rdkafka/admin/list_offsets_report.rb
|
|
125
127
|
- lib/rdkafka/bindings.rb
|
|
126
128
|
- lib/rdkafka/callbacks.rb
|
|
127
129
|
- lib/rdkafka/config.rb
|
|
@@ -169,7 +171,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
|
169
171
|
- !ruby/object:Gem::Version
|
|
170
172
|
version: '0'
|
|
171
173
|
requirements: []
|
|
172
|
-
rubygems_version: 4.0.
|
|
174
|
+
rubygems_version: 4.0.6
|
|
173
175
|
specification_version: 4
|
|
174
176
|
summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
|
|
175
177
|
It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
|