rdkafka 0.25.0-aarch64-linux-gnu → 0.26.0-aarch64-linux-gnu
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +17 -0
- data/Gemfile +5 -6
- data/Gemfile.lint +14 -0
- data/Gemfile.lint.lock +123 -0
- data/README.md +2 -1
- data/Rakefile +21 -21
- data/bin/verify_kafka_warnings +2 -0
- data/docker-compose-ssl.yml +2 -2
- data/docker-compose.yml +2 -2
- data/ext/librdkafka.so +0 -0
- data/lib/rdkafka/admin/acl_binding_result.rb +4 -4
- data/lib/rdkafka/admin/create_acl_handle.rb +4 -4
- data/lib/rdkafka/admin/create_acl_report.rb +0 -2
- data/lib/rdkafka/admin/create_partitions_handle.rb +5 -5
- data/lib/rdkafka/admin/create_topic_handle.rb +5 -5
- data/lib/rdkafka/admin/delete_acl_handle.rb +6 -6
- data/lib/rdkafka/admin/delete_acl_report.rb +2 -3
- data/lib/rdkafka/admin/delete_groups_handle.rb +5 -5
- data/lib/rdkafka/admin/delete_topic_handle.rb +5 -5
- data/lib/rdkafka/admin/describe_acl_handle.rb +6 -6
- data/lib/rdkafka/admin/describe_acl_report.rb +2 -3
- data/lib/rdkafka/admin/describe_configs_handle.rb +4 -4
- data/lib/rdkafka/admin/describe_configs_report.rb +1 -1
- data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +4 -4
- data/lib/rdkafka/admin/incremental_alter_configs_report.rb +1 -1
- data/lib/rdkafka/admin/list_offsets_handle.rb +36 -0
- data/lib/rdkafka/admin/list_offsets_report.rb +51 -0
- data/lib/rdkafka/admin.rb +189 -24
- data/lib/rdkafka/bindings.rb +121 -84
- data/lib/rdkafka/callbacks.rb +53 -10
- data/lib/rdkafka/config.rb +20 -20
- data/lib/rdkafka/consumer/message.rb +5 -8
- data/lib/rdkafka/consumer/partition.rb +2 -2
- data/lib/rdkafka/consumer/topic_partition_list.rb +10 -10
- data/lib/rdkafka/consumer.rb +208 -18
- data/lib/rdkafka/error.rb +25 -14
- data/lib/rdkafka/helpers/oauth.rb +0 -1
- data/lib/rdkafka/helpers/time.rb +5 -0
- data/lib/rdkafka/metadata.rb +16 -16
- data/lib/rdkafka/native_kafka.rb +63 -2
- data/lib/rdkafka/producer/delivery_handle.rb +5 -5
- data/lib/rdkafka/producer/delivery_report.rb +1 -1
- data/lib/rdkafka/producer/partitions_count_cache.rb +6 -6
- data/lib/rdkafka/producer.rb +117 -57
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +2 -0
- data/package-lock.json +331 -0
- data/package.json +9 -0
- data/rdkafka.gemspec +39 -40
- data/renovate.json +21 -0
- metadata +8 -2
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Rdkafka
|
|
4
|
+
class Admin
|
|
5
|
+
# Handle for list offsets operation
|
|
6
|
+
class ListOffsetsHandle < AbstractHandle
|
|
7
|
+
layout :pending, :bool,
|
|
8
|
+
:response, :int,
|
|
9
|
+
:response_string, :pointer,
|
|
10
|
+
:result_infos, :pointer,
|
|
11
|
+
:result_count, :int
|
|
12
|
+
|
|
13
|
+
# @return [String] the name of the operation.
|
|
14
|
+
def operation_name
|
|
15
|
+
"list offsets"
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
# @return [ListOffsetsReport] instance with partition offset information.
|
|
19
|
+
def create_result
|
|
20
|
+
ListOffsetsReport.new(
|
|
21
|
+
result_infos: self[:result_infos],
|
|
22
|
+
result_count: self[:result_count]
|
|
23
|
+
)
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# Raises an error if the operation failed
|
|
27
|
+
# @raise [RdkafkaError]
|
|
28
|
+
def raise_error
|
|
29
|
+
raise RdkafkaError.new(
|
|
30
|
+
self[:response],
|
|
31
|
+
broker_message: self[:response_string].read_string
|
|
32
|
+
)
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Rdkafka
|
|
4
|
+
class Admin
|
|
5
|
+
# Report for list offsets operation result
|
|
6
|
+
class ListOffsetsReport
|
|
7
|
+
attr_reader :offsets
|
|
8
|
+
|
|
9
|
+
# @param result_infos [FFI::Pointer] pointer to result info array
|
|
10
|
+
# @param result_count [Integer] number of result info entries
|
|
11
|
+
def initialize(result_infos:, result_count:)
|
|
12
|
+
@offsets = []
|
|
13
|
+
|
|
14
|
+
return if result_infos.null?
|
|
15
|
+
|
|
16
|
+
result_infos
|
|
17
|
+
.read_array_of_pointer(result_count)
|
|
18
|
+
.each { |result_info_ptr| validate!(result_info_ptr) }
|
|
19
|
+
.each do |result_info_ptr|
|
|
20
|
+
tp_ptr = Bindings.rd_kafka_ListOffsetsResultInfo_topic_partition(result_info_ptr)
|
|
21
|
+
tp = Bindings::TopicPartition.new(tp_ptr)
|
|
22
|
+
timestamp = Bindings.rd_kafka_ListOffsetsResultInfo_timestamp(result_info_ptr)
|
|
23
|
+
leader_epoch = Bindings.rd_kafka_topic_partition_get_leader_epoch(tp_ptr)
|
|
24
|
+
|
|
25
|
+
@offsets << {
|
|
26
|
+
topic: tp[:topic],
|
|
27
|
+
partition: tp[:partition],
|
|
28
|
+
offset: tp[:offset],
|
|
29
|
+
timestamp: timestamp,
|
|
30
|
+
leader_epoch: (leader_epoch == -1) ? nil : leader_epoch
|
|
31
|
+
}
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
private
|
|
36
|
+
|
|
37
|
+
# Validates the partition result and raises an error if invalid
|
|
38
|
+
# @param result_info_ptr [FFI::Pointer] pointer to the result info
|
|
39
|
+
# @raise [RdkafkaError] when the partition has an error
|
|
40
|
+
def validate!(result_info_ptr)
|
|
41
|
+
tp_ptr = Bindings.rd_kafka_ListOffsetsResultInfo_topic_partition(result_info_ptr)
|
|
42
|
+
tp = Bindings::TopicPartition.new(tp_ptr)
|
|
43
|
+
code = tp[:err]
|
|
44
|
+
|
|
45
|
+
return if code.zero?
|
|
46
|
+
|
|
47
|
+
raise RdkafkaError.new(code)
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
end
|
data/lib/rdkafka/admin.rb
CHANGED
|
@@ -36,8 +36,8 @@ module Rdkafka
|
|
|
36
36
|
# Read values from the struct
|
|
37
37
|
code = error_desc[:code]
|
|
38
38
|
|
|
39
|
-
name =
|
|
40
|
-
desc =
|
|
39
|
+
name = ""
|
|
40
|
+
desc = ""
|
|
41
41
|
|
|
42
42
|
name = error_desc[:name].read_string unless error_desc[:name].null?
|
|
43
43
|
desc = error_desc[:desc].read_string unless error_desc[:desc].null?
|
|
@@ -71,10 +71,63 @@ module Rdkafka
|
|
|
71
71
|
end
|
|
72
72
|
end
|
|
73
73
|
|
|
74
|
-
#
|
|
75
|
-
#
|
|
76
|
-
|
|
77
|
-
|
|
74
|
+
# Enable IO event notifications for fiber scheduler integration
|
|
75
|
+
# When admin operations complete, librdkafka will write to your FD
|
|
76
|
+
#
|
|
77
|
+
# @param fd [Integer] file descriptor to signal (from IO.pipe or eventfd)
|
|
78
|
+
# @param payload [String] data to write to fd (default: "\x01")
|
|
79
|
+
# @return [nil]
|
|
80
|
+
# @raise [ClosedInnerError] when the admin client is closed
|
|
81
|
+
def enable_queue_io_events(fd, payload = "\x01")
|
|
82
|
+
@native_kafka.enable_main_queue_io_events(fd, payload)
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
# Enable IO event notifications for background events
|
|
86
|
+
# @param fd [Integer] file descriptor to signal (from IO.pipe or eventfd)
|
|
87
|
+
# @param payload [String] data to write to fd (default: "\x01")
|
|
88
|
+
# @return [nil]
|
|
89
|
+
# @raise [ClosedInnerError] when the admin client is closed
|
|
90
|
+
def enable_background_queue_io_events(fd, payload = "\x01")
|
|
91
|
+
@native_kafka.enable_background_queue_io_events(fd, payload)
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
# Polls for events in a non-blocking loop, yielding the count after each iteration.
|
|
95
|
+
#
|
|
96
|
+
# This method processes events (stats, errors, etc.) in a single GVL/mutex session,
|
|
97
|
+
# which is more efficient than repeated individual polls. It uses non-blocking polls
|
|
98
|
+
# internally (no GVL release between polls).
|
|
99
|
+
#
|
|
100
|
+
# Yields the count of events processed after each poll iteration, allowing the caller
|
|
101
|
+
# to implement timeout or other termination logic by returning `:stop`.
|
|
102
|
+
#
|
|
103
|
+
# @yield [count] Called after each poll iteration
|
|
104
|
+
# @yieldparam count [Integer] Number of events processed in this iteration
|
|
105
|
+
# @yieldreturn [Symbol, Object] Return `:stop` to break the loop, any other value continues
|
|
106
|
+
# @return [nil]
|
|
107
|
+
# @raise [Rdkafka::ClosedAdminError] if called on a closed admin client
|
|
108
|
+
#
|
|
109
|
+
# @note This method holds the inner lock until the queue is empty or `:stop` is returned.
|
|
110
|
+
# Other admin operations will wait until this method returns.
|
|
111
|
+
# @note This method is thread-safe as it uses @native_kafka.with_inner synchronization
|
|
112
|
+
#
|
|
113
|
+
# @example Drain all pending events
|
|
114
|
+
# admin.events_poll_nb_each { |_count| }
|
|
115
|
+
#
|
|
116
|
+
# @example With timeout control
|
|
117
|
+
# deadline = monotonic_now + timeout_ms
|
|
118
|
+
# admin.events_poll_nb_each do |_count|
|
|
119
|
+
# :stop if monotonic_now >= deadline
|
|
120
|
+
# end
|
|
121
|
+
def events_poll_nb_each
|
|
122
|
+
closed_admin_check(__method__)
|
|
123
|
+
|
|
124
|
+
@native_kafka.with_inner do |inner|
|
|
125
|
+
loop do
|
|
126
|
+
count = Rdkafka::Bindings.rd_kafka_poll_nb(inner, 0)
|
|
127
|
+
break if count.zero?
|
|
128
|
+
break if yield(count) == :stop
|
|
129
|
+
end
|
|
130
|
+
end
|
|
78
131
|
end
|
|
79
132
|
|
|
80
133
|
# Performs the metadata request using admin
|
|
@@ -113,7 +166,7 @@ module Rdkafka
|
|
|
113
166
|
# @raise [ConfigError] When the partition count or replication factor are out of valid range
|
|
114
167
|
# @raise [RdkafkaError] When the topic name is invalid or the topic already exists
|
|
115
168
|
# @raise [RdkafkaError] When the topic configuration is invalid
|
|
116
|
-
def create_topic(topic_name, partition_count, replication_factor, topic_config={})
|
|
169
|
+
def create_topic(topic_name, partition_count, replication_factor, topic_config = {})
|
|
117
170
|
closed_admin_check(__method__)
|
|
118
171
|
|
|
119
172
|
# Create a rd_kafka_NewTopic_t representing the new topic
|
|
@@ -129,14 +182,12 @@ module Rdkafka
|
|
|
129
182
|
raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
|
|
130
183
|
end
|
|
131
184
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
)
|
|
139
|
-
end
|
|
185
|
+
topic_config&.each do |key, value|
|
|
186
|
+
Rdkafka::Bindings.rd_kafka_NewTopic_set_config(
|
|
187
|
+
new_topic_ptr,
|
|
188
|
+
key.to_s,
|
|
189
|
+
value.to_s
|
|
190
|
+
)
|
|
140
191
|
end
|
|
141
192
|
|
|
142
193
|
# Note that rd_kafka_CreateTopics can create more than one topic at a time
|
|
@@ -372,6 +423,8 @@ module Rdkafka
|
|
|
372
423
|
# - RD_KAFKA_RESOURCE_BROKER = 4
|
|
373
424
|
# @param resource_name [String] name of the resource
|
|
374
425
|
# @param resource_pattern_type [Integer] rd_kafka_ResourcePatternType_t value:
|
|
426
|
+
# - RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
|
|
427
|
+
# - RD_KAFKA_RESOURCE_PATTERN_ANY = 1
|
|
375
428
|
# - RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
|
|
376
429
|
# - RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
|
|
377
430
|
# - RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
|
|
@@ -471,6 +524,8 @@ module Rdkafka
|
|
|
471
524
|
# - RD_KAFKA_RESOURCE_BROKER = 4
|
|
472
525
|
# @param resource_name [String, nil] name of the resource or nil for any
|
|
473
526
|
# @param resource_pattern_type [Integer] rd_kafka_ResourcePatternType_t value:
|
|
527
|
+
# - RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
|
|
528
|
+
# - RD_KAFKA_RESOURCE_PATTERN_ANY = 1
|
|
474
529
|
# - RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
|
|
475
530
|
# - RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
|
|
476
531
|
# - RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
|
|
@@ -572,6 +627,8 @@ module Rdkafka
|
|
|
572
627
|
# - RD_KAFKA_RESOURCE_BROKER = 4
|
|
573
628
|
# @param resource_name [String, nil] name of the resource or nil for any
|
|
574
629
|
# @param resource_pattern_type [Integer] rd_kafka_ResourcePatternType_t value:
|
|
630
|
+
# - RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
|
|
631
|
+
# - RD_KAFKA_RESOURCE_PATTERN_ANY = 1
|
|
575
632
|
# - RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
|
|
576
633
|
# - RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
|
|
577
634
|
# - RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
|
|
@@ -720,10 +777,12 @@ module Rdkafka
|
|
|
720
777
|
|
|
721
778
|
raise
|
|
722
779
|
ensure
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
780
|
+
if configs_array_ptr
|
|
781
|
+
Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
|
|
782
|
+
configs_array_ptr,
|
|
783
|
+
pointer_array.size
|
|
784
|
+
)
|
|
785
|
+
end
|
|
727
786
|
end
|
|
728
787
|
|
|
729
788
|
handle
|
|
@@ -791,7 +850,6 @@ module Rdkafka
|
|
|
791
850
|
configs_array_ptr = FFI::MemoryPointer.new(:pointer, pointer_array.size)
|
|
792
851
|
configs_array_ptr.write_array_of_pointer(pointer_array)
|
|
793
852
|
|
|
794
|
-
|
|
795
853
|
begin
|
|
796
854
|
@native_kafka.with_inner do |inner|
|
|
797
855
|
Rdkafka::Bindings.rd_kafka_IncrementalAlterConfigs(
|
|
@@ -807,10 +865,117 @@ module Rdkafka
|
|
|
807
865
|
|
|
808
866
|
raise
|
|
809
867
|
ensure
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
868
|
+
if configs_array_ptr
|
|
869
|
+
Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
|
|
870
|
+
configs_array_ptr,
|
|
871
|
+
pointer_array.size
|
|
872
|
+
)
|
|
873
|
+
end
|
|
874
|
+
end
|
|
875
|
+
|
|
876
|
+
handle
|
|
877
|
+
end
|
|
878
|
+
|
|
879
|
+
# Queries partition offsets by specification (earliest, latest, max_timestamp, or by
|
|
880
|
+
# timestamp) without requiring a consumer group.
|
|
881
|
+
#
|
|
882
|
+
# @param topic_partition_offsets [Hash{String => Array<Hash>}] hash mapping topic names to
|
|
883
|
+
# arrays of partition offset specifications. Each specification is a hash with:
|
|
884
|
+
# - `:partition` [Integer] partition number
|
|
885
|
+
# - `:offset` [Symbol, Integer] offset specification - `:earliest`, `:latest`,
|
|
886
|
+
# `:max_timestamp`, or an integer timestamp in milliseconds
|
|
887
|
+
# @param isolation_level [Integer, nil] optional isolation level:
|
|
888
|
+
# - `RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED` (0) - default
|
|
889
|
+
# - `RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED` (1)
|
|
890
|
+
#
|
|
891
|
+
# @return [ListOffsetsHandle] handle that can be used to wait for the result
|
|
892
|
+
#
|
|
893
|
+
# @raise [ClosedAdminError] when the admin is closed
|
|
894
|
+
# @raise [ConfigError] when the background queue is unavailable
|
|
895
|
+
#
|
|
896
|
+
# @example Query earliest and latest offsets
|
|
897
|
+
# handle = admin.list_offsets(
|
|
898
|
+
# { "my_topic" => [
|
|
899
|
+
# { partition: 0, offset: :earliest },
|
|
900
|
+
# { partition: 1, offset: :latest }
|
|
901
|
+
# ] }
|
|
902
|
+
# )
|
|
903
|
+
# report = handle.wait(max_wait_timeout_ms: 15_000)
|
|
904
|
+
# report.offsets
|
|
905
|
+
# # => [{ topic: "my_topic", partition: 0, offset: 0, ... }, ...]
|
|
906
|
+
def list_offsets(topic_partition_offsets, isolation_level: nil)
|
|
907
|
+
closed_admin_check(__method__)
|
|
908
|
+
|
|
909
|
+
# Count total partitions for pre-allocation
|
|
910
|
+
total = topic_partition_offsets.sum { |_, partitions| partitions.size }
|
|
911
|
+
|
|
912
|
+
# Build native topic partition list
|
|
913
|
+
tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(total)
|
|
914
|
+
|
|
915
|
+
topic_partition_offsets.each do |topic, partitions|
|
|
916
|
+
partitions.each do |spec|
|
|
917
|
+
partition = spec.fetch(:partition)
|
|
918
|
+
offset = spec.fetch(:offset)
|
|
919
|
+
|
|
920
|
+
native_offset = case offset
|
|
921
|
+
when :earliest then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_EARLIEST
|
|
922
|
+
when :latest then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_LATEST
|
|
923
|
+
when :max_timestamp then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP
|
|
924
|
+
when Integer then offset
|
|
925
|
+
else
|
|
926
|
+
raise ArgumentError, "Unknown offset specification: #{offset.inspect}"
|
|
927
|
+
end
|
|
928
|
+
|
|
929
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, partition)
|
|
930
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(tpl, topic, partition, native_offset)
|
|
931
|
+
end
|
|
932
|
+
end
|
|
933
|
+
|
|
934
|
+
# Get a pointer to the queue that our request will be enqueued on
|
|
935
|
+
queue_ptr = @native_kafka.with_inner do |inner|
|
|
936
|
+
Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
|
|
937
|
+
end
|
|
938
|
+
|
|
939
|
+
if queue_ptr.null?
|
|
940
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
|
|
941
|
+
raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
|
|
942
|
+
end
|
|
943
|
+
|
|
944
|
+
# Create and register the handle we will return to the caller
|
|
945
|
+
handle = ListOffsetsHandle.new
|
|
946
|
+
handle[:pending] = true
|
|
947
|
+
handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
|
|
948
|
+
|
|
949
|
+
admin_options_ptr = @native_kafka.with_inner do |inner|
|
|
950
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_new(
|
|
951
|
+
inner,
|
|
952
|
+
Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_LISTOFFSETS
|
|
953
|
+
)
|
|
954
|
+
end
|
|
955
|
+
|
|
956
|
+
if isolation_level
|
|
957
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_set_isolation_level(admin_options_ptr, isolation_level)
|
|
958
|
+
end
|
|
959
|
+
|
|
960
|
+
ListOffsetsHandle.register(handle)
|
|
961
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, handle.to_ptr)
|
|
962
|
+
|
|
963
|
+
begin
|
|
964
|
+
@native_kafka.with_inner do |inner|
|
|
965
|
+
Rdkafka::Bindings.rd_kafka_ListOffsets(
|
|
966
|
+
inner,
|
|
967
|
+
tpl,
|
|
968
|
+
admin_options_ptr,
|
|
969
|
+
queue_ptr
|
|
970
|
+
)
|
|
971
|
+
end
|
|
972
|
+
rescue Exception
|
|
973
|
+
ListOffsetsHandle.remove(handle.to_ptr.address)
|
|
974
|
+
raise
|
|
975
|
+
ensure
|
|
976
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
|
977
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
|
978
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
|
|
814
979
|
end
|
|
815
980
|
|
|
816
981
|
handle
|