rdkafka 0.8.0.beta.1 → 0.10.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.semaphore/semaphore.yml +23 -0
- data/CHANGELOG.md +18 -0
- data/README.md +5 -2
- data/docker-compose.yml +2 -0
- data/ext/README.md +7 -0
- data/ext/Rakefile +2 -1
- data/lib/rdkafka/abstract_handle.rb +82 -0
- data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
- data/lib/rdkafka/admin/create_topic_report.rb +22 -0
- data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
- data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
- data/lib/rdkafka/admin.rb +155 -0
- data/lib/rdkafka/bindings.rb +57 -18
- data/lib/rdkafka/callbacks.rb +106 -0
- data/lib/rdkafka/config.rb +59 -3
- data/lib/rdkafka/consumer.rb +125 -5
- data/lib/rdkafka/error.rb +29 -3
- data/lib/rdkafka/metadata.rb +6 -5
- data/lib/rdkafka/producer/delivery_handle.rb +7 -53
- data/lib/rdkafka/producer.rb +25 -11
- data/lib/rdkafka/version.rb +3 -3
- data/lib/rdkafka.rb +7 -0
- data/spec/rdkafka/abstract_handle_spec.rb +114 -0
- data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
- data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
- data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin_spec.rb +203 -0
- data/spec/rdkafka/bindings_spec.rb +32 -8
- data/spec/rdkafka/callbacks_spec.rb +20 -0
- data/spec/rdkafka/config_spec.rb +76 -7
- data/spec/rdkafka/consumer_spec.rb +266 -2
- data/spec/rdkafka/error_spec.rb +4 -0
- data/spec/rdkafka/metadata_spec.rb +78 -0
- data/spec/rdkafka/producer/delivery_handle_spec.rb +1 -41
- data/spec/rdkafka/producer_spec.rb +98 -31
- data/spec/spec_helper.rb +28 -11
- metadata +32 -9
- data/.travis.yml +0 -45
data/lib/rdkafka/bindings.rb
CHANGED
@@ -108,6 +108,8 @@ module Rdkafka
|
|
108
108
|
attach_function :rd_kafka_conf_set_opaque, [:pointer, :pointer], :void
|
109
109
|
callback :stats_cb, [:pointer, :string, :int, :pointer], :int
|
110
110
|
attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
|
111
|
+
callback :error_cb, [:pointer, :int, :string, :pointer], :void
|
112
|
+
attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
|
111
113
|
|
112
114
|
# Log queue
|
113
115
|
attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
|
@@ -130,7 +132,7 @@ module Rdkafka
|
|
130
132
|
else
|
131
133
|
Logger::UNKNOWN
|
132
134
|
end
|
133
|
-
Rdkafka::Config.
|
135
|
+
Rdkafka::Config.log_queue << [severity, "rdkafka: #{line}"]
|
134
136
|
end
|
135
137
|
|
136
138
|
StatsCallback = FFI::Function.new(
|
@@ -146,6 +148,15 @@ module Rdkafka
|
|
146
148
|
0
|
147
149
|
end
|
148
150
|
|
151
|
+
ErrorCallback = FFI::Function.new(
|
152
|
+
:void, [:pointer, :int, :string, :pointer]
|
153
|
+
) do |_client_prr, err_code, reason, _opaque|
|
154
|
+
if Rdkafka::Config.error_callback
|
155
|
+
error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason)
|
156
|
+
Rdkafka::Config.error_callback.call(error)
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
149
160
|
# Handle
|
150
161
|
|
151
162
|
enum :kafka_type, [
|
@@ -245,22 +256,50 @@ module Rdkafka
|
|
245
256
|
rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
|
246
257
|
end
|
247
258
|
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
259
|
+
# Create Topics
|
260
|
+
|
261
|
+
RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
|
262
|
+
RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
|
263
|
+
|
264
|
+
attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
|
265
|
+
attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
|
266
|
+
attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
|
267
|
+
attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
|
268
|
+
attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
|
269
|
+
attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
|
270
|
+
|
271
|
+
# Delete Topics
|
272
|
+
|
273
|
+
RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
|
274
|
+
RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
|
275
|
+
|
276
|
+
attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
|
277
|
+
attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
|
278
|
+
attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
|
279
|
+
attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
|
280
|
+
attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
|
281
|
+
|
282
|
+
# Background Queue and Callback
|
283
|
+
|
284
|
+
attach_function :rd_kafka_queue_get_background, [:pointer], :pointer
|
285
|
+
attach_function :rd_kafka_conf_set_background_event_cb, [:pointer, :pointer], :void
|
286
|
+
attach_function :rd_kafka_queue_destroy, [:pointer], :void
|
287
|
+
|
288
|
+
# Admin Options
|
289
|
+
|
290
|
+
attach_function :rd_kafka_AdminOptions_new, [:pointer, :int32], :pointer
|
291
|
+
attach_function :rd_kafka_AdminOptions_set_opaque, [:pointer, :pointer], :void
|
292
|
+
attach_function :rd_kafka_AdminOptions_destroy, [:pointer], :void
|
293
|
+
|
294
|
+
# Extracting data from event types
|
295
|
+
|
296
|
+
attach_function :rd_kafka_event_type, [:pointer], :int32
|
297
|
+
attach_function :rd_kafka_event_opaque, [:pointer], :pointer
|
298
|
+
|
299
|
+
# Extracting data from topic results
|
300
|
+
|
301
|
+
attach_function :rd_kafka_topic_result_error, [:pointer], :int32
|
302
|
+
attach_function :rd_kafka_topic_result_error_string, [:pointer], :pointer
|
303
|
+
attach_function :rd_kafka_topic_result_name, [:pointer], :pointer
|
265
304
|
end
|
266
305
|
end
|
@@ -0,0 +1,106 @@
|
|
1
|
+
module Rdkafka
|
2
|
+
module Callbacks
|
3
|
+
|
4
|
+
# Extracts attributes of a rd_kafka_topic_result_t
|
5
|
+
#
|
6
|
+
# @private
|
7
|
+
class TopicResult
|
8
|
+
attr_reader :result_error, :error_string, :result_name
|
9
|
+
|
10
|
+
def initialize(topic_result_pointer)
|
11
|
+
@result_error = Rdkafka::Bindings.rd_kafka_topic_result_error(topic_result_pointer)
|
12
|
+
@error_string = Rdkafka::Bindings.rd_kafka_topic_result_error_string(topic_result_pointer)
|
13
|
+
@result_name = Rdkafka::Bindings.rd_kafka_topic_result_name(topic_result_pointer)
|
14
|
+
end
|
15
|
+
|
16
|
+
def self.create_topic_results_from_array(count, array_pointer)
|
17
|
+
(1..count).map do |index|
|
18
|
+
result_pointer = (array_pointer + (index - 1)).read_pointer
|
19
|
+
new(result_pointer)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
# FFI Function used for Create Topic and Delete Topic callbacks
|
25
|
+
BackgroundEventCallbackFunction = FFI::Function.new(
|
26
|
+
:void, [:pointer, :pointer, :pointer]
|
27
|
+
) do |client_ptr, event_ptr, opaque_ptr|
|
28
|
+
BackgroundEventCallback.call(client_ptr, event_ptr, opaque_ptr)
|
29
|
+
end
|
30
|
+
|
31
|
+
# @private
|
32
|
+
class BackgroundEventCallback
|
33
|
+
def self.call(_, event_ptr, _)
|
34
|
+
event_type = Rdkafka::Bindings.rd_kafka_event_type(event_ptr)
|
35
|
+
if event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATETOPICS_RESULT
|
36
|
+
process_create_topic(event_ptr)
|
37
|
+
elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
|
38
|
+
process_delete_topic(event_ptr)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
private
|
43
|
+
|
44
|
+
def self.process_create_topic(event_ptr)
|
45
|
+
create_topics_result = Rdkafka::Bindings.rd_kafka_event_CreateTopics_result(event_ptr)
|
46
|
+
|
47
|
+
# Get the number of create topic results
|
48
|
+
pointer_to_size_t = FFI::MemoryPointer.new(:int32)
|
49
|
+
create_topic_result_array = Rdkafka::Bindings.rd_kafka_CreateTopics_result_topics(create_topics_result, pointer_to_size_t)
|
50
|
+
create_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, create_topic_result_array)
|
51
|
+
create_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
|
52
|
+
|
53
|
+
if create_topic_handle = Rdkafka::Admin::CreateTopicHandle.remove(create_topic_handle_ptr.address)
|
54
|
+
create_topic_handle[:response] = create_topic_results[0].result_error
|
55
|
+
create_topic_handle[:error_string] = create_topic_results[0].error_string
|
56
|
+
create_topic_handle[:result_name] = create_topic_results[0].result_name
|
57
|
+
create_topic_handle[:pending] = false
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def self.process_delete_topic(event_ptr)
|
62
|
+
delete_topics_result = Rdkafka::Bindings.rd_kafka_event_DeleteTopics_result(event_ptr)
|
63
|
+
|
64
|
+
# Get the number of topic results
|
65
|
+
pointer_to_size_t = FFI::MemoryPointer.new(:int32)
|
66
|
+
delete_topic_result_array = Rdkafka::Bindings.rd_kafka_DeleteTopics_result_topics(delete_topics_result, pointer_to_size_t)
|
67
|
+
delete_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, delete_topic_result_array)
|
68
|
+
delete_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
|
69
|
+
|
70
|
+
if delete_topic_handle = Rdkafka::Admin::DeleteTopicHandle.remove(delete_topic_handle_ptr.address)
|
71
|
+
delete_topic_handle[:response] = delete_topic_results[0].result_error
|
72
|
+
delete_topic_handle[:error_string] = delete_topic_results[0].error_string
|
73
|
+
delete_topic_handle[:result_name] = delete_topic_results[0].result_name
|
74
|
+
delete_topic_handle[:pending] = false
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
# FFI Function used for Message Delivery callbacks
|
80
|
+
|
81
|
+
DeliveryCallbackFunction = FFI::Function.new(
|
82
|
+
:void, [:pointer, :pointer, :pointer]
|
83
|
+
) do |client_ptr, message_ptr, opaque_ptr|
|
84
|
+
DeliveryCallback.call(client_ptr, message_ptr, opaque_ptr)
|
85
|
+
end
|
86
|
+
|
87
|
+
# @private
|
88
|
+
class DeliveryCallback
|
89
|
+
def self.call(_, message_ptr, opaque_ptr)
|
90
|
+
message = Rdkafka::Bindings::Message.new(message_ptr)
|
91
|
+
delivery_handle_ptr_address = message[:_private].address
|
92
|
+
if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
|
93
|
+
# Update delivery handle
|
94
|
+
delivery_handle[:response] = message[:err]
|
95
|
+
delivery_handle[:partition] = message[:partition]
|
96
|
+
delivery_handle[:offset] = message[:offset]
|
97
|
+
delivery_handle[:pending] = false
|
98
|
+
# Call delivery callback on opaque
|
99
|
+
if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
|
100
|
+
opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
data/lib/rdkafka/config.rb
CHANGED
@@ -10,7 +10,18 @@ module Rdkafka
|
|
10
10
|
# @private
|
11
11
|
@@statistics_callback = nil
|
12
12
|
# @private
|
13
|
+
@@error_callback = nil
|
14
|
+
# @private
|
13
15
|
@@opaques = {}
|
16
|
+
# @private
|
17
|
+
@@log_queue = Queue.new
|
18
|
+
|
19
|
+
Thread.start do
|
20
|
+
loop do
|
21
|
+
severity, msg = @@log_queue.pop
|
22
|
+
@@logger.add(severity, msg)
|
23
|
+
end
|
24
|
+
end
|
14
25
|
|
15
26
|
# Returns the current logger, by default this is a logger to stdout.
|
16
27
|
#
|
@@ -19,6 +30,16 @@ module Rdkafka
|
|
19
30
|
@@logger
|
20
31
|
end
|
21
32
|
|
33
|
+
|
34
|
+
# Returns a queue whose contents will be passed to the configured logger. Each entry
|
35
|
+
# should follow the format [Logger::Severity, String]. The benefit over calling the
|
36
|
+
# logger directly is that this is safe to use from trap contexts.
|
37
|
+
#
|
38
|
+
# @return [Queue]
|
39
|
+
def self.log_queue
|
40
|
+
@@log_queue
|
41
|
+
end
|
42
|
+
|
22
43
|
# Set the logger that will be used for all logging output by this library.
|
23
44
|
#
|
24
45
|
# @param logger [Logger] The logger to be used
|
@@ -33,11 +54,11 @@ module Rdkafka
|
|
33
54
|
# You can configure if and how often this happens using `statistics.interval.ms`.
|
34
55
|
# The callback is called with a hash that's documented here: https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
|
35
56
|
#
|
36
|
-
# @param callback [Proc] The callback
|
57
|
+
# @param callback [Proc, #call] The callback
|
37
58
|
#
|
38
59
|
# @return [nil]
|
39
60
|
def self.statistics_callback=(callback)
|
40
|
-
raise TypeError.new("Callback has to be
|
61
|
+
raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
|
41
62
|
@@statistics_callback = callback
|
42
63
|
end
|
43
64
|
|
@@ -48,6 +69,25 @@ module Rdkafka
|
|
48
69
|
@@statistics_callback
|
49
70
|
end
|
50
71
|
|
72
|
+
# Set a callback that will be called every time the underlying client emits an error.
|
73
|
+
# If this callback is not set, global errors such as brokers becoming unavailable will only be sent to the logger, as defined by librdkafka.
|
74
|
+
# The callback is called with an instance of RdKafka::Error.
|
75
|
+
#
|
76
|
+
# @param callback [Proc, #call] The callback
|
77
|
+
#
|
78
|
+
# @return [nil]
|
79
|
+
def self.error_callback=(callback)
|
80
|
+
raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
|
81
|
+
@@error_callback = callback
|
82
|
+
end
|
83
|
+
|
84
|
+
# Returns the current error callback, by default this is nil.
|
85
|
+
#
|
86
|
+
# @return [Proc, nil]
|
87
|
+
def self.error_callback
|
88
|
+
@@error_callback
|
89
|
+
end
|
90
|
+
|
51
91
|
# @private
|
52
92
|
def self.opaques
|
53
93
|
@@opaques
|
@@ -137,13 +177,26 @@ module Rdkafka
|
|
137
177
|
# Create Kafka config
|
138
178
|
config = native_config(opaque)
|
139
179
|
# Set callback to receive delivery reports on config
|
140
|
-
Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::
|
180
|
+
Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
|
141
181
|
# Return producer with Kafka client
|
142
182
|
Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
|
143
183
|
opaque.producer = producer
|
144
184
|
end
|
145
185
|
end
|
146
186
|
|
187
|
+
# Create an admin instance with this configuration.
|
188
|
+
#
|
189
|
+
# @raise [ConfigError] When the configuration contains invalid options
|
190
|
+
# @raise [ClientCreationError] When the native client cannot be created
|
191
|
+
#
|
192
|
+
# @return [Admin] The created admin instance
|
193
|
+
def admin
|
194
|
+
opaque = Opaque.new
|
195
|
+
config = native_config(opaque)
|
196
|
+
Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
|
197
|
+
Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
|
198
|
+
end
|
199
|
+
|
147
200
|
# Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
|
148
201
|
class ConfigError < RuntimeError; end
|
149
202
|
|
@@ -190,6 +243,9 @@ module Rdkafka
|
|
190
243
|
|
191
244
|
# Set stats callback
|
192
245
|
Rdkafka::Bindings.rd_kafka_conf_set_stats_cb(config, Rdkafka::Bindings::StatsCallback)
|
246
|
+
|
247
|
+
# Set error callback
|
248
|
+
Rdkafka::Bindings.rd_kafka_conf_set_error_cb(config, Rdkafka::Bindings::ErrorCallback)
|
193
249
|
end
|
194
250
|
end
|
195
251
|
|
data/lib/rdkafka/consumer.rb
CHANGED
@@ -36,6 +36,8 @@ module Rdkafka
|
|
36
36
|
#
|
37
37
|
# @return [nil]
|
38
38
|
def subscribe(*topics)
|
39
|
+
closed_consumer_check(__method__)
|
40
|
+
|
39
41
|
# Create topic partition list with topics and no partition set
|
40
42
|
tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
|
41
43
|
|
@@ -49,7 +51,7 @@ module Rdkafka
|
|
49
51
|
raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
|
50
52
|
end
|
51
53
|
ensure
|
52
|
-
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
|
54
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
|
53
55
|
end
|
54
56
|
|
55
57
|
# Unsubscribe from all subscribed topics.
|
@@ -58,6 +60,8 @@ module Rdkafka
|
|
58
60
|
#
|
59
61
|
# @return [nil]
|
60
62
|
def unsubscribe
|
63
|
+
closed_consumer_check(__method__)
|
64
|
+
|
61
65
|
response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
|
62
66
|
if response != 0
|
63
67
|
raise Rdkafka::RdkafkaError.new(response)
|
@@ -72,6 +76,8 @@ module Rdkafka
|
|
72
76
|
#
|
73
77
|
# @return [nil]
|
74
78
|
def pause(list)
|
79
|
+
closed_consumer_check(__method__)
|
80
|
+
|
75
81
|
unless list.is_a?(TopicPartitionList)
|
76
82
|
raise TypeError.new("list has to be a TopicPartitionList")
|
77
83
|
end
|
@@ -98,6 +104,8 @@ module Rdkafka
|
|
98
104
|
#
|
99
105
|
# @return [nil]
|
100
106
|
def resume(list)
|
107
|
+
closed_consumer_check(__method__)
|
108
|
+
|
101
109
|
unless list.is_a?(TopicPartitionList)
|
102
110
|
raise TypeError.new("list has to be a TopicPartitionList")
|
103
111
|
end
|
@@ -120,6 +128,8 @@ module Rdkafka
|
|
120
128
|
#
|
121
129
|
# @return [TopicPartitionList]
|
122
130
|
def subscription
|
131
|
+
closed_consumer_check(__method__)
|
132
|
+
|
123
133
|
ptr = FFI::MemoryPointer.new(:pointer)
|
124
134
|
response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
|
125
135
|
|
@@ -142,6 +152,8 @@ module Rdkafka
|
|
142
152
|
#
|
143
153
|
# @raise [RdkafkaError] When assigning fails
|
144
154
|
def assign(list)
|
155
|
+
closed_consumer_check(__method__)
|
156
|
+
|
145
157
|
unless list.is_a?(TopicPartitionList)
|
146
158
|
raise TypeError.new("list has to be a TopicPartitionList")
|
147
159
|
end
|
@@ -164,6 +176,8 @@ module Rdkafka
|
|
164
176
|
#
|
165
177
|
# @return [TopicPartitionList]
|
166
178
|
def assignment
|
179
|
+
closed_consumer_check(__method__)
|
180
|
+
|
167
181
|
ptr = FFI::MemoryPointer.new(:pointer)
|
168
182
|
response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
|
169
183
|
if response != 0
|
@@ -180,7 +194,7 @@ module Rdkafka
|
|
180
194
|
end
|
181
195
|
end
|
182
196
|
ensure
|
183
|
-
ptr.free
|
197
|
+
ptr.free unless ptr.nil?
|
184
198
|
end
|
185
199
|
|
186
200
|
# Return the current committed offset per partition for this consumer group.
|
@@ -193,6 +207,8 @@ module Rdkafka
|
|
193
207
|
#
|
194
208
|
# @return [TopicPartitionList]
|
195
209
|
def committed(list=nil, timeout_ms=1200)
|
210
|
+
closed_consumer_check(__method__)
|
211
|
+
|
196
212
|
if list.nil?
|
197
213
|
list = assignment
|
198
214
|
elsif !list.is_a?(TopicPartitionList)
|
@@ -222,6 +238,8 @@ module Rdkafka
|
|
222
238
|
#
|
223
239
|
# @return [Integer] The low and high watermark
|
224
240
|
def query_watermark_offsets(topic, partition, timeout_ms=200)
|
241
|
+
closed_consumer_check(__method__)
|
242
|
+
|
225
243
|
low = FFI::MemoryPointer.new(:int64, 1)
|
226
244
|
high = FFI::MemoryPointer.new(:int64, 1)
|
227
245
|
|
@@ -239,8 +257,8 @@ module Rdkafka
|
|
239
257
|
|
240
258
|
return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
|
241
259
|
ensure
|
242
|
-
low.free
|
243
|
-
high.free
|
260
|
+
low.free unless low.nil?
|
261
|
+
high.free unless high.nil?
|
244
262
|
end
|
245
263
|
|
246
264
|
# Calculate the consumer lag per partition for the provided topic partition list.
|
@@ -279,6 +297,7 @@ module Rdkafka
|
|
279
297
|
#
|
280
298
|
# @return [String, nil]
|
281
299
|
def cluster_id
|
300
|
+
closed_consumer_check(__method__)
|
282
301
|
Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
|
283
302
|
end
|
284
303
|
|
@@ -288,6 +307,7 @@ module Rdkafka
|
|
288
307
|
#
|
289
308
|
# @return [String, nil]
|
290
309
|
def member_id
|
310
|
+
closed_consumer_check(__method__)
|
291
311
|
Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
|
292
312
|
end
|
293
313
|
|
@@ -301,6 +321,8 @@ module Rdkafka
|
|
301
321
|
#
|
302
322
|
# @return [nil]
|
303
323
|
def store_offset(message)
|
324
|
+
closed_consumer_check(__method__)
|
325
|
+
|
304
326
|
# rd_kafka_offset_store is one of the few calls that does not support
|
305
327
|
# a string as the topic, so create a native topic for it.
|
306
328
|
native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
|
@@ -331,6 +353,8 @@ module Rdkafka
|
|
331
353
|
#
|
332
354
|
# @return [nil]
|
333
355
|
def seek(message)
|
356
|
+
closed_consumer_check(__method__)
|
357
|
+
|
334
358
|
# rd_kafka_offset_store is one of the few calls that does not support
|
335
359
|
# a string as the topic, so create a native topic for it.
|
336
360
|
native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
|
@@ -369,6 +393,8 @@ module Rdkafka
|
|
369
393
|
#
|
370
394
|
# @return [nil]
|
371
395
|
def commit(list=nil, async=false)
|
396
|
+
closed_consumer_check(__method__)
|
397
|
+
|
372
398
|
if !list.nil? && !list.is_a?(TopicPartitionList)
|
373
399
|
raise TypeError.new("list has to be nil or a TopicPartitionList")
|
374
400
|
end
|
@@ -393,7 +419,7 @@ module Rdkafka
|
|
393
419
|
#
|
394
420
|
# @return [Message, nil] A message or nil if there was no new message within the timeout
|
395
421
|
def poll(timeout_ms)
|
396
|
-
|
422
|
+
closed_consumer_check(__method__)
|
397
423
|
|
398
424
|
message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
|
399
425
|
if message_ptr.null?
|
@@ -441,5 +467,99 @@ module Rdkafka
|
|
441
467
|
end
|
442
468
|
end
|
443
469
|
end
|
470
|
+
|
471
|
+
def closed_consumer_check(method)
|
472
|
+
raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
|
473
|
+
end
|
474
|
+
|
475
|
+
# Poll for new messages and yield them in batches that may contain
|
476
|
+
# messages from more than one partition.
|
477
|
+
#
|
478
|
+
# Rather than yield each message immediately as soon as it is received,
|
479
|
+
# each_batch will attempt to wait for as long as `timeout_ms` in order
|
480
|
+
# to create a batch of up to but no more than `max_items` in size.
|
481
|
+
#
|
482
|
+
# Said differently, if more than `max_items` are available within
|
483
|
+
# `timeout_ms`, then `each_batch` will yield early with `max_items` in the
|
484
|
+
# array, but if `timeout_ms` passes by with fewer messages arriving, it
|
485
|
+
# will yield an array of fewer messages, quite possibly zero.
|
486
|
+
#
|
487
|
+
# In order to prevent wrongly auto committing many messages at once across
|
488
|
+
# possibly many partitions, callers must explicitly indicate which messages
|
489
|
+
# have been successfully processed as some consumed messages may not have
|
490
|
+
# been yielded yet. To do this, the caller should set
|
491
|
+
# `enable.auto.offset.store` to false and pass processed messages to
|
492
|
+
# {store_offset}. It is also possible, though more complex, to set
|
493
|
+
# 'enable.auto.commit' to false and then pass a manually assembled
|
494
|
+
# TopicPartitionList to {commit}.
|
495
|
+
#
|
496
|
+
# As with `each`, iteration will end when the consumer is closed.
|
497
|
+
#
|
498
|
+
# Exception behavior is more complicated than with `each`, in that if
|
499
|
+
# :yield_on_error is true, and an exception is raised during the
|
500
|
+
# poll, and messages have already been received, they will be yielded to
|
501
|
+
# the caller before the exception is allowed to propogate.
|
502
|
+
#
|
503
|
+
# If you are setting either auto.commit or auto.offset.store to false in
|
504
|
+
# the consumer configuration, then you should let yield_on_error keep its
|
505
|
+
# default value of false because you are gauranteed to see these messages
|
506
|
+
# again. However, if both auto.commit and auto.offset.store are set to
|
507
|
+
# true, you should set yield_on_error to true so you can process messages
|
508
|
+
# that you may or may not see again.
|
509
|
+
#
|
510
|
+
# @param max_items [Integer] Maximum size of the yielded array of messages
|
511
|
+
#
|
512
|
+
# @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
|
513
|
+
#
|
514
|
+
# @param timeout_ms [Integer] max time to wait for up to max_items
|
515
|
+
#
|
516
|
+
# @raise [RdkafkaError] When polling fails
|
517
|
+
#
|
518
|
+
# @yield [messages, pending_exception]
|
519
|
+
# @yieldparam messages [Array] An array of received Message
|
520
|
+
# @yieldparam pending_exception [Exception] normally nil, or an exception
|
521
|
+
# which will be propogated after processing of the partial batch is complete.
|
522
|
+
#
|
523
|
+
# @return [nil]
|
524
|
+
def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
|
525
|
+
closed_consumer_check(__method__)
|
526
|
+
slice = []
|
527
|
+
bytes = 0
|
528
|
+
end_time = monotonic_now + timeout_ms / 1000.0
|
529
|
+
loop do
|
530
|
+
break if @closing
|
531
|
+
max_wait = end_time - monotonic_now
|
532
|
+
max_wait_ms = if max_wait <= 0
|
533
|
+
0 # should not block, but may retrieve a message
|
534
|
+
else
|
535
|
+
(max_wait * 1000).floor
|
536
|
+
end
|
537
|
+
message = nil
|
538
|
+
begin
|
539
|
+
message = poll max_wait_ms
|
540
|
+
rescue Rdkafka::RdkafkaError => error
|
541
|
+
raise unless yield_on_error
|
542
|
+
raise if slice.empty?
|
543
|
+
yield slice.dup, error
|
544
|
+
raise
|
545
|
+
end
|
546
|
+
if message
|
547
|
+
slice << message
|
548
|
+
bytes += message.payload.bytesize
|
549
|
+
end
|
550
|
+
if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
|
551
|
+
yield slice.dup, nil
|
552
|
+
slice.clear
|
553
|
+
bytes = 0
|
554
|
+
end_time = monotonic_now + timeout_ms / 1000.0
|
555
|
+
end
|
556
|
+
end
|
557
|
+
end
|
558
|
+
|
559
|
+
private
|
560
|
+
def monotonic_now
|
561
|
+
# needed because Time.now can go backwards
|
562
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
563
|
+
end
|
444
564
|
end
|
445
565
|
end
|