rdkafka 0.6.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +23 -0
  3. data/CHANGELOG.md +27 -0
  4. data/README.md +9 -9
  5. data/docker-compose.yml +17 -11
  6. data/ext/README.md +10 -15
  7. data/ext/Rakefile +24 -3
  8. data/lib/rdkafka.rb +8 -0
  9. data/lib/rdkafka/abstract_handle.rb +82 -0
  10. data/lib/rdkafka/admin.rb +155 -0
  11. data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
  12. data/lib/rdkafka/admin/create_topic_report.rb +22 -0
  13. data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
  14. data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
  15. data/lib/rdkafka/bindings.rb +64 -18
  16. data/lib/rdkafka/callbacks.rb +106 -0
  17. data/lib/rdkafka/config.rb +38 -9
  18. data/lib/rdkafka/consumer.rb +221 -46
  19. data/lib/rdkafka/consumer/headers.rb +7 -5
  20. data/lib/rdkafka/consumer/partition.rb +1 -1
  21. data/lib/rdkafka/consumer/topic_partition_list.rb +6 -16
  22. data/lib/rdkafka/error.rb +35 -4
  23. data/lib/rdkafka/metadata.rb +92 -0
  24. data/lib/rdkafka/producer.rb +50 -24
  25. data/lib/rdkafka/producer/delivery_handle.rb +7 -49
  26. data/lib/rdkafka/producer/delivery_report.rb +7 -2
  27. data/lib/rdkafka/version.rb +3 -3
  28. data/rdkafka.gemspec +3 -3
  29. data/spec/rdkafka/abstract_handle_spec.rb +114 -0
  30. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  31. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  32. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  33. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  34. data/spec/rdkafka/admin_spec.rb +203 -0
  35. data/spec/rdkafka/bindings_spec.rb +28 -10
  36. data/spec/rdkafka/callbacks_spec.rb +20 -0
  37. data/spec/rdkafka/config_spec.rb +51 -9
  38. data/spec/rdkafka/consumer/message_spec.rb +6 -1
  39. data/spec/rdkafka/consumer_spec.rb +287 -20
  40. data/spec/rdkafka/error_spec.rb +7 -3
  41. data/spec/rdkafka/metadata_spec.rb +78 -0
  42. data/spec/rdkafka/producer/delivery_handle_spec.rb +3 -43
  43. data/spec/rdkafka/producer/delivery_report_spec.rb +5 -1
  44. data/spec/rdkafka/producer_spec.rb +220 -100
  45. data/spec/spec_helper.rb +34 -6
  46. metadata +37 -13
  47. data/.travis.yml +0 -34
@@ -0,0 +1,27 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class CreateTopicHandle < AbstractHandle
4
+ layout :pending, :bool,
5
+ :response, :int,
6
+ :error_string, :pointer,
7
+ :result_name, :pointer
8
+
9
+ # @return [String] the name of the operation
10
+ def operation_name
11
+ "create topic"
12
+ end
13
+
14
+ # @return [Boolean] whether the create topic was successful
15
+ def create_result
16
+ CreateTopicReport.new(self[:error_string], self[:result_name])
17
+ end
18
+
19
+ def raise_error
20
+ raise RdkafkaError.new(
21
+ self[:response],
22
+ broker_message: CreateTopicReport.new(self[:error_string], self[:result_name]).error_string
23
+ )
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,22 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class CreateTopicReport
4
+ # Any error message generated from the CreateTopic
5
+ # @return [String]
6
+ attr_reader :error_string
7
+
8
+ # The name of the topic created
9
+ # @return [String]
10
+ attr_reader :result_name
11
+
12
+ def initialize(error_string, result_name)
13
+ if error_string != FFI::Pointer::NULL
14
+ @error_string = error_string.read_string
15
+ end
16
+ if result_name != FFI::Pointer::NULL
17
+ @result_name = @result_name = result_name.read_string
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,27 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class DeleteTopicHandle < AbstractHandle
4
+ layout :pending, :bool,
5
+ :response, :int,
6
+ :error_string, :pointer,
7
+ :result_name, :pointer
8
+
9
+ # @return [String] the name of the operation
10
+ def operation_name
11
+ "delete topic"
12
+ end
13
+
14
+ # @return [Boolean] whether the delete topic was successful
15
+ def create_result
16
+ DeleteTopicReport.new(self[:error_string], self[:result_name])
17
+ end
18
+
19
+ def raise_error
20
+ raise RdkafkaError.new(
21
+ self[:response],
22
+ broker_message: DeleteTopicReport.new(self[:error_string], self[:result_name]).error_string
23
+ )
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,22 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class DeleteTopicReport
4
+ # Any error message generated from the DeleteTopic
5
+ # @return [String]
6
+ attr_reader :error_string
7
+
8
+ # The name of the topic deleted
9
+ # @return [String]
10
+ attr_reader :result_name
11
+
12
+ def initialize(error_string, result_name)
13
+ if error_string != FFI::Pointer::NULL
14
+ @error_string = error_string.read_string
15
+ end
16
+ if result_name != FFI::Pointer::NULL
17
+ @result_name = @result_name = result_name.read_string
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -8,7 +8,7 @@ module Rdkafka
8
8
  extend FFI::Library
9
9
 
10
10
  def self.lib_extension
11
- if Gem::Platform.local.os.include?("darwin")
11
+ if RbConfig::CONFIG['host_os'] =~ /darwin/
12
12
  'dylib'
13
13
  else
14
14
  'so'
@@ -22,6 +22,11 @@ module Rdkafka
22
22
  RD_KAFKA_RESP_ERR__NOENT = -156
23
23
  RD_KAFKA_RESP_ERR_NO_ERROR = 0
24
24
 
25
+ RD_KAFKA_OFFSET_END = -1
26
+ RD_KAFKA_OFFSET_BEGINNING = -2
27
+ RD_KAFKA_OFFSET_STORED = -1000
28
+ RD_KAFKA_OFFSET_INVALID = -1001
29
+
25
30
  class SizePtr < FFI::Struct
26
31
  layout :value, :size_t
27
32
  end
@@ -35,6 +40,8 @@ module Rdkafka
35
40
 
36
41
  attach_function :rd_kafka_memberid, [:pointer], :string
37
42
  attach_function :rd_kafka_clusterid, [:pointer], :string
43
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void
38
45
 
39
46
  # Message struct
40
47
 
@@ -123,7 +130,7 @@ module Rdkafka
123
130
  else
124
131
  Logger::UNKNOWN
125
132
  end
126
- Rdkafka::Config.logger.add(severity) { "rdkafka: #{line}" }
133
+ Rdkafka::Config.log_queue << [severity, "rdkafka: #{line}"]
127
134
  end
128
135
 
129
136
  StatsCallback = FFI::Function.new(
@@ -227,22 +234,61 @@ module Rdkafka
227
234
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
228
235
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
229
236
 
230
- DeliveryCallback = FFI::Function.new(
231
- :void, [:pointer, :pointer, :pointer]
232
- ) do |client_ptr, message_ptr, opaque_ptr|
233
- message = Message.new(message_ptr)
234
- delivery_handle_ptr_address = message[:_private].address
235
- if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
236
- # Update delivery handle
237
- delivery_handle[:pending] = false
238
- delivery_handle[:response] = message[:err]
239
- delivery_handle[:partition] = message[:partition]
240
- delivery_handle[:offset] = message[:offset]
241
- # Call delivery callback on opaque
242
- if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
243
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset]))
244
- end
245
- end
237
+ # Partitioner
238
+ attach_function :rd_kafka_msg_partitioner_consistent_random, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
239
+
240
+ def self.partitioner(str, partition_count)
241
+ # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
242
+ return -1 unless partition_count&.nonzero?
243
+
244
+ str_ptr = FFI::MemoryPointer.from_string(str)
245
+ rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
246
246
  end
247
+
248
+ # Create Topics
249
+
250
+ RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
251
+ RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
252
+
253
+ attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
254
+ attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
255
+ attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
256
+ attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
257
+ attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
258
+ attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
259
+
260
+ # Delete Topics
261
+
262
+ RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
263
+ RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
264
+
265
+ attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
266
+ attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
267
+ attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
268
+ attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
269
+ attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
270
+
271
+ # Background Queue and Callback
272
+
273
+ attach_function :rd_kafka_queue_get_background, [:pointer], :pointer
274
+ attach_function :rd_kafka_conf_set_background_event_cb, [:pointer, :pointer], :void
275
+ attach_function :rd_kafka_queue_destroy, [:pointer], :void
276
+
277
+ # Admin Options
278
+
279
+ attach_function :rd_kafka_AdminOptions_new, [:pointer, :int32], :pointer
280
+ attach_function :rd_kafka_AdminOptions_set_opaque, [:pointer, :pointer], :void
281
+ attach_function :rd_kafka_AdminOptions_destroy, [:pointer], :void
282
+
283
+ # Extracting data from event types
284
+
285
+ attach_function :rd_kafka_event_type, [:pointer], :int32
286
+ attach_function :rd_kafka_event_opaque, [:pointer], :pointer
287
+
288
+ # Extracting data from topic results
289
+
290
+ attach_function :rd_kafka_topic_result_error, [:pointer], :int32
291
+ attach_function :rd_kafka_topic_result_error_string, [:pointer], :pointer
292
+ attach_function :rd_kafka_topic_result_name, [:pointer], :pointer
247
293
  end
248
294
  end
@@ -0,0 +1,106 @@
1
+ module Rdkafka
2
+ module Callbacks
3
+
4
+ # Extracts attributes of a rd_kafka_topic_result_t
5
+ #
6
+ # @private
7
+ class TopicResult
8
+ attr_reader :result_error, :error_string, :result_name
9
+
10
+ def initialize(topic_result_pointer)
11
+ @result_error = Rdkafka::Bindings.rd_kafka_topic_result_error(topic_result_pointer)
12
+ @error_string = Rdkafka::Bindings.rd_kafka_topic_result_error_string(topic_result_pointer)
13
+ @result_name = Rdkafka::Bindings.rd_kafka_topic_result_name(topic_result_pointer)
14
+ end
15
+
16
+ def self.create_topic_results_from_array(count, array_pointer)
17
+ (1..count).map do |index|
18
+ result_pointer = (array_pointer + (index - 1)).read_pointer
19
+ new(result_pointer)
20
+ end
21
+ end
22
+ end
23
+
24
+ # FFI Function used for Create Topic and Delete Topic callbacks
25
+ BackgroundEventCallbackFunction = FFI::Function.new(
26
+ :void, [:pointer, :pointer, :pointer]
27
+ ) do |client_ptr, event_ptr, opaque_ptr|
28
+ BackgroundEventCallback.call(client_ptr, event_ptr, opaque_ptr)
29
+ end
30
+
31
+ # @private
32
+ class BackgroundEventCallback
33
+ def self.call(_, event_ptr, _)
34
+ event_type = Rdkafka::Bindings.rd_kafka_event_type(event_ptr)
35
+ if event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATETOPICS_RESULT
36
+ process_create_topic(event_ptr)
37
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
38
+ process_delete_topic(event_ptr)
39
+ end
40
+ end
41
+
42
+ private
43
+
44
+ def self.process_create_topic(event_ptr)
45
+ create_topics_result = Rdkafka::Bindings.rd_kafka_event_CreateTopics_result(event_ptr)
46
+
47
+ # Get the number of create topic results
48
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
49
+ create_topic_result_array = Rdkafka::Bindings.rd_kafka_CreateTopics_result_topics(create_topics_result, pointer_to_size_t)
50
+ create_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, create_topic_result_array)
51
+ create_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
52
+
53
+ if create_topic_handle = Rdkafka::Admin::CreateTopicHandle.remove(create_topic_handle_ptr.address)
54
+ create_topic_handle[:response] = create_topic_results[0].result_error
55
+ create_topic_handle[:error_string] = create_topic_results[0].error_string
56
+ create_topic_handle[:result_name] = create_topic_results[0].result_name
57
+ create_topic_handle[:pending] = false
58
+ end
59
+ end
60
+
61
+ def self.process_delete_topic(event_ptr)
62
+ delete_topics_result = Rdkafka::Bindings.rd_kafka_event_DeleteTopics_result(event_ptr)
63
+
64
+ # Get the number of topic results
65
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
66
+ delete_topic_result_array = Rdkafka::Bindings.rd_kafka_DeleteTopics_result_topics(delete_topics_result, pointer_to_size_t)
67
+ delete_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, delete_topic_result_array)
68
+ delete_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
69
+
70
+ if delete_topic_handle = Rdkafka::Admin::DeleteTopicHandle.remove(delete_topic_handle_ptr.address)
71
+ delete_topic_handle[:response] = delete_topic_results[0].result_error
72
+ delete_topic_handle[:error_string] = delete_topic_results[0].error_string
73
+ delete_topic_handle[:result_name] = delete_topic_results[0].result_name
74
+ delete_topic_handle[:pending] = false
75
+ end
76
+ end
77
+ end
78
+
79
+ # FFI Function used for Message Delivery callbacks
80
+
81
+ DeliveryCallbackFunction = FFI::Function.new(
82
+ :void, [:pointer, :pointer, :pointer]
83
+ ) do |client_ptr, message_ptr, opaque_ptr|
84
+ DeliveryCallback.call(client_ptr, message_ptr, opaque_ptr)
85
+ end
86
+
87
+ # @private
88
+ class DeliveryCallback
89
+ def self.call(_, message_ptr, opaque_ptr)
90
+ message = Rdkafka::Bindings::Message.new(message_ptr)
91
+ delivery_handle_ptr_address = message[:_private].address
92
+ if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
93
+ # Update delivery handle
94
+ delivery_handle[:response] = message[:err]
95
+ delivery_handle[:partition] = message[:partition]
96
+ delivery_handle[:offset] = message[:offset]
97
+ delivery_handle[:pending] = false
98
+ # Call delivery callback on opaque
99
+ if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
100
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
106
+ end
@@ -11,6 +11,15 @@ module Rdkafka
11
11
  @@statistics_callback = nil
12
12
  # @private
13
13
  @@opaques = {}
14
+ # @private
15
+ @@log_queue = Queue.new
16
+
17
+ Thread.start do
18
+ loop do
19
+ severity, msg = @@log_queue.pop
20
+ @@logger.add(severity, msg)
21
+ end
22
+ end
14
23
 
15
24
  # Returns the current logger, by default this is a logger to stdout.
16
25
  #
@@ -19,6 +28,15 @@ module Rdkafka
19
28
  @@logger
20
29
  end
21
30
 
31
+ # Returns a queue whose contents will be passed to the configured logger. Each entry
32
+ # should follow the format [Logger::Severity, String]. The benefit over calling the
33
+ # logger directly is that this is safe to use from trap contexts.
34
+ #
35
+ # @return [Queue]
36
+ def self.log_queue
37
+ @@log_queue
38
+ end
39
+
22
40
  # Set the logger that will be used for all logging output by this library.
23
41
  #
24
42
  # @param logger [Logger] The logger to be used
@@ -33,11 +51,11 @@ module Rdkafka
33
51
  # You can configure if and how often this happens using `statistics.interval.ms`.
34
52
  # The callback is called with a hash that's documented here: https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
35
53
  #
36
- # @param callback [Proc] The callback
54
+ # @param callback [Proc, #call] The callback
37
55
  #
38
56
  # @return [nil]
39
57
  def self.statistics_callback=(callback)
40
- raise TypeError.new("Callback has to be a proc or lambda") unless callback.is_a? Proc
58
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
41
59
  @@statistics_callback = callback
42
60
  end
43
61
 
@@ -67,7 +85,7 @@ module Rdkafka
67
85
 
68
86
  # Returns a new config with the provided options which are merged with {DEFAULT_CONFIG}.
69
87
  #
70
- # @param config_hash [Hash<String,Symbol => String>] The config options for rdkafka
88
+ # @param config_hash [Hash{String,Symbol => String}] The config options for rdkafka
71
89
  #
72
90
  # @return [Config]
73
91
  def initialize(config_hash = {})
@@ -137,13 +155,26 @@ module Rdkafka
137
155
  # Create Kafka config
138
156
  config = native_config(opaque)
139
157
  # Set callback to receive delivery reports on config
140
- Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Bindings::DeliveryCallback)
158
+ Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
141
159
  # Return producer with Kafka client
142
160
  Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
143
161
  opaque.producer = producer
144
162
  end
145
163
  end
146
164
 
165
+ # Create an admin instance with this configuration.
166
+ #
167
+ # @raise [ConfigError] When the configuration contains invalid options
168
+ # @raise [ClientCreationError] When the native client cannot be created
169
+ #
170
+ # @return [Admin] The created admin instance
171
+ def admin
172
+ opaque = Opaque.new
173
+ config = native_config(opaque)
174
+ Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
175
+ Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
176
+ end
177
+
147
178
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
148
179
  class ConfigError < RuntimeError; end
149
180
 
@@ -155,7 +186,7 @@ module Rdkafka
155
186
 
156
187
  private
157
188
 
158
- # This method is only intented to be used to create a client,
189
+ # This method is only intended to be used to create a client,
159
190
  # using it in another way will leak memory.
160
191
  def native_config(opaque=nil)
161
192
  Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
@@ -212,10 +243,8 @@ module Rdkafka
212
243
  Rdkafka::Bindings.rd_kafka_queue_get_main(handle)
213
244
  )
214
245
 
215
- FFI::AutoPointer.new(
216
- handle,
217
- Rdkafka::Bindings.method(:rd_kafka_destroy)
218
- )
246
+ # Return handle which should be closed using rd_kafka_destroy after usage.
247
+ handle
219
248
  end
220
249
  end
221
250
 
@@ -5,6 +5,9 @@ module Rdkafka
5
5
  #
6
6
  # To create a consumer set up a {Config} and call {Config#consumer consumer} on that. It is
7
7
  # mandatory to set `:"group.id"` in the configuration.
8
+ #
9
+ # Consumer implements `Enumerable`, so you can use `each` to consume messages, or for example
10
+ # `each_slice` to consume batches of messages.
8
11
  class Consumer
9
12
  include Enumerable
10
13
 
@@ -17,8 +20,12 @@ module Rdkafka
17
20
  # Close this consumer
18
21
  # @return [nil]
19
22
  def close
23
+ return unless @native_kafka
24
+
20
25
  @closing = true
21
26
  Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
+ Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
+ @native_kafka = nil
22
29
  end
23
30
 
24
31
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -29,21 +36,22 @@ module Rdkafka
29
36
  #
30
37
  # @return [nil]
31
38
  def subscribe(*topics)
39
+ closed_consumer_check(__method__)
40
+
32
41
  # Create topic partition list with topics and no partition set
33
- tpl = TopicPartitionList.new_native_tpl(topics.length)
42
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
34
43
 
35
44
  topics.each do |topic|
36
- Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
37
- tpl,
38
- topic,
39
- -1
40
- )
45
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, -1)
41
46
  end
47
+
42
48
  # Subscribe to topic partition list and check this was successful
43
49
  response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
44
50
  if response != 0
45
51
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
46
52
  end
53
+ ensure
54
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
47
55
  end
48
56
 
49
57
  # Unsubscribe from all subscribed topics.
@@ -52,6 +60,8 @@ module Rdkafka
52
60
  #
53
61
  # @return [nil]
54
62
  def unsubscribe
63
+ closed_consumer_check(__method__)
64
+
55
65
  response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
56
66
  if response != 0
57
67
  raise Rdkafka::RdkafkaError.new(response)
@@ -66,15 +76,23 @@ module Rdkafka
66
76
  #
67
77
  # @return [nil]
68
78
  def pause(list)
79
+ closed_consumer_check(__method__)
80
+
69
81
  unless list.is_a?(TopicPartitionList)
70
82
  raise TypeError.new("list has to be a TopicPartitionList")
71
83
  end
84
+
72
85
  tpl = list.to_native_tpl
73
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
74
86
 
75
- if response != 0
76
- list = TopicPartitionList.from_native_tpl(tpl)
77
- raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
87
+ begin
88
+ response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
89
+
90
+ if response != 0
91
+ list = TopicPartitionList.from_native_tpl(tpl)
92
+ raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
93
+ end
94
+ ensure
95
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
78
96
  end
79
97
  end
80
98
 
@@ -86,13 +104,21 @@ module Rdkafka
86
104
  #
87
105
  # @return [nil]
88
106
  def resume(list)
107
+ closed_consumer_check(__method__)
108
+
89
109
  unless list.is_a?(TopicPartitionList)
90
110
  raise TypeError.new("list has to be a TopicPartitionList")
91
111
  end
112
+
92
113
  tpl = list.to_native_tpl
93
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
94
- if response != 0
95
- raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
114
+
115
+ begin
116
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
117
+ if response != 0
118
+ raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
+ end
120
+ ensure
121
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
96
122
  end
97
123
  end
98
124
 
@@ -102,17 +128,21 @@ module Rdkafka
102
128
  #
103
129
  # @return [TopicPartitionList]
104
130
  def subscription
105
- tpl = FFI::MemoryPointer.new(:pointer)
106
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, tpl)
131
+ closed_consumer_check(__method__)
132
+
133
+ ptr = FFI::MemoryPointer.new(:pointer)
134
+ response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
135
+
107
136
  if response != 0
108
137
  raise Rdkafka::RdkafkaError.new(response)
109
138
  end
110
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
139
+
140
+ native = ptr.read_pointer
111
141
 
112
142
  begin
113
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
143
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(native)
114
144
  ensure
115
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
145
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native)
116
146
  end
117
147
  end
118
148
 
@@ -122,13 +152,21 @@ module Rdkafka
122
152
  #
123
153
  # @raise [RdkafkaError] When assigning fails
124
154
  def assign(list)
155
+ closed_consumer_check(__method__)
156
+
125
157
  unless list.is_a?(TopicPartitionList)
126
158
  raise TypeError.new("list has to be a TopicPartitionList")
127
159
  end
160
+
128
161
  tpl = list.to_native_tpl
129
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
130
- if response != 0
131
- raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
162
+
163
+ begin
164
+ response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
165
+ if response != 0
166
+ raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
+ end
168
+ ensure
169
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
132
170
  end
133
171
  end
134
172
 
@@ -138,19 +176,25 @@ module Rdkafka
138
176
  #
139
177
  # @return [TopicPartitionList]
140
178
  def assignment
141
- tpl = FFI::MemoryPointer.new(:pointer)
142
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, tpl)
179
+ closed_consumer_check(__method__)
180
+
181
+ ptr = FFI::MemoryPointer.new(:pointer)
182
+ response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
143
183
  if response != 0
144
184
  raise Rdkafka::RdkafkaError.new(response)
145
185
  end
146
186
 
147
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
187
+ tpl = ptr.read_pointer
148
188
 
149
- begin
150
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
151
- ensure
152
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
189
+ if !tpl.null?
190
+ begin
191
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
192
+ ensure
193
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
194
+ end
153
195
  end
196
+ ensure
197
+ ptr.free unless ptr.nil?
154
198
  end
155
199
 
156
200
  # Return the current committed offset per partition for this consumer group.
@@ -163,17 +207,25 @@ module Rdkafka
163
207
  #
164
208
  # @return [TopicPartitionList]
165
209
  def committed(list=nil, timeout_ms=1200)
210
+ closed_consumer_check(__method__)
211
+
166
212
  if list.nil?
167
213
  list = assignment
168
214
  elsif !list.is_a?(TopicPartitionList)
169
215
  raise TypeError.new("list has to be nil or a TopicPartitionList")
170
216
  end
217
+
171
218
  tpl = list.to_native_tpl
172
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
173
- if response != 0
174
- raise Rdkafka::RdkafkaError.new(response)
219
+
220
+ begin
221
+ response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
222
+ if response != 0
223
+ raise Rdkafka::RdkafkaError.new(response)
224
+ end
225
+ TopicPartitionList.from_native_tpl(tpl)
226
+ ensure
227
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
175
228
  end
176
- TopicPartitionList.from_native_tpl(tpl)
177
229
  end
178
230
 
179
231
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
@@ -186,6 +238,8 @@ module Rdkafka
186
238
  #
187
239
  # @return [Integer] The low and high watermark
188
240
  def query_watermark_offsets(topic, partition, timeout_ms=200)
241
+ closed_consumer_check(__method__)
242
+
189
243
  low = FFI::MemoryPointer.new(:int64, 1)
190
244
  high = FFI::MemoryPointer.new(:int64, 1)
191
245
 
@@ -195,13 +249,16 @@ module Rdkafka
195
249
  partition,
196
250
  low,
197
251
  high,
198
- timeout_ms
252
+ timeout_ms,
199
253
  )
200
254
  if response != 0
201
255
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
202
256
  end
203
257
 
204
- return low.read_int64, high.read_int64
258
+ return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
259
+ ensure
260
+ low.free unless low.nil?
261
+ high.free unless high.nil?
205
262
  end
206
263
 
207
264
  # Calculate the consumer lag per partition for the provided topic partition list.
@@ -217,6 +274,7 @@ module Rdkafka
217
274
  # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
218
275
  def lag(topic_partition_list, watermark_timeout_ms=100)
219
276
  out = {}
277
+
220
278
  topic_partition_list.to_h.each do |topic, partitions|
221
279
  # Query high watermarks for this topic's partitions
222
280
  # and compare to the offset in the list.
@@ -239,6 +297,7 @@ module Rdkafka
239
297
  #
240
298
  # @return [String, nil]
241
299
  def cluster_id
300
+ closed_consumer_check(__method__)
242
301
  Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
243
302
  end
244
303
 
@@ -248,6 +307,7 @@ module Rdkafka
248
307
  #
249
308
  # @return [String, nil]
250
309
  def member_id
310
+ closed_consumer_check(__method__)
251
311
  Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
252
312
  end
253
313
 
@@ -261,6 +321,8 @@ module Rdkafka
261
321
  #
262
322
  # @return [nil]
263
323
  def store_offset(message)
324
+ closed_consumer_check(__method__)
325
+
264
326
  # rd_kafka_offset_store is one of the few calls that does not support
265
327
  # a string as the topic, so create a native topic for it.
266
328
  native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
@@ -291,6 +353,8 @@ module Rdkafka
291
353
  #
292
354
  # @return [nil]
293
355
  def seek(message)
356
+ closed_consumer_check(__method__)
357
+
294
358
  # rd_kafka_offset_store is one of the few calls that does not support
295
359
  # a string as the topic, so create a native topic for it.
296
360
  native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
@@ -313,26 +377,37 @@ module Rdkafka
313
377
  end
314
378
  end
315
379
 
316
- # Commit the current offsets of this consumer
380
+ # Manually commit the current offsets of this consumer.
381
+ #
382
+ # To use this set `enable.auto.commit`to `false` to disable automatic triggering
383
+ # of commits.
384
+ #
385
+ # If `enable.auto.offset.store` is set to `true` the offset of the last consumed
386
+ # message for every partition is used. If set to `false` you can use {store_offset} to
387
+ # indicate when a message has been fully processed.
317
388
  #
318
389
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
319
390
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
320
391
  #
321
- # @raise [RdkafkaError] When comitting fails
392
+ # @raise [RdkafkaError] When committing fails
322
393
  #
323
394
  # @return [nil]
324
395
  def commit(list=nil, async=false)
396
+ closed_consumer_check(__method__)
397
+
325
398
  if !list.nil? && !list.is_a?(TopicPartitionList)
326
399
  raise TypeError.new("list has to be nil or a TopicPartitionList")
327
400
  end
328
- tpl = if list
329
- list.to_native_tpl
330
- else
331
- nil
332
- end
333
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
334
- if response != 0
335
- raise Rdkafka::RdkafkaError.new(response)
401
+
402
+ tpl = list ? list.to_native_tpl : nil
403
+
404
+ begin
405
+ response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
406
+ if response != 0
407
+ raise Rdkafka::RdkafkaError.new(response)
408
+ end
409
+ ensure
410
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
336
411
  end
337
412
  end
338
413
 
@@ -344,6 +419,8 @@ module Rdkafka
344
419
  #
345
420
  # @return [Message, nil] A message or nil if there was no new message within the timeout
346
421
  def poll(timeout_ms)
422
+ closed_consumer_check(__method__)
423
+
347
424
  message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
348
425
  if message_ptr.null?
349
426
  nil
@@ -367,16 +444,20 @@ module Rdkafka
367
444
  # Poll for new messages and yield for each received one. Iteration
368
445
  # will end when the consumer is closed.
369
446
  #
447
+ # If `enable.partition.eof` is turned on in the config this will raise an
448
+ # error when an eof is reached, so you probably want to disable that when
449
+ # using this method of iteration.
450
+ #
370
451
  # @raise [RdkafkaError] When polling fails
371
452
  #
372
453
  # @yieldparam message [Message] Received message
373
454
  #
374
455
  # @return [nil]
375
- def each(&block)
456
+ def each
376
457
  loop do
377
458
  message = poll(250)
378
459
  if message
379
- block.call(message)
460
+ yield(message)
380
461
  else
381
462
  if @closing
382
463
  break
@@ -386,5 +467,99 @@ module Rdkafka
386
467
  end
387
468
  end
388
469
  end
470
+
471
+ def closed_consumer_check(method)
472
+ raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
+ end
474
+
475
+ # Poll for new messages and yield them in batches that may contain
476
+ # messages from more than one partition.
477
+ #
478
+ # Rather than yield each message immediately as soon as it is received,
479
+ # each_batch will attempt to wait for as long as `timeout_ms` in order
480
+ # to create a batch of up to but no more than `max_items` in size.
481
+ #
482
+ # Said differently, if more than `max_items` are available within
483
+ # `timeout_ms`, then `each_batch` will yield early with `max_items` in the
484
+ # array, but if `timeout_ms` passes by with fewer messages arriving, it
485
+ # will yield an array of fewer messages, quite possibly zero.
486
+ #
487
+ # In order to prevent wrongly auto committing many messages at once across
488
+ # possibly many partitions, callers must explicitly indicate which messages
489
+ # have been successfully processed as some consumed messages may not have
490
+ # been yielded yet. To do this, the caller should set
491
+ # `enable.auto.offset.store` to false and pass processed messages to
492
+ # {store_offset}. It is also possible, though more complex, to set
493
+ # 'enable.auto.commit' to false and then pass a manually assembled
494
+ # TopicPartitionList to {commit}.
495
+ #
496
+ # As with `each`, iteration will end when the consumer is closed.
497
+ #
498
+ # Exception behavior is more complicated than with `each`, in that if
499
+ # :yield_on_error is true, and an exception is raised during the
500
+ # poll, and messages have already been received, they will be yielded to
501
+ # the caller before the exception is allowed to propogate.
502
+ #
503
+ # If you are setting either auto.commit or auto.offset.store to false in
504
+ # the consumer configuration, then you should let yield_on_error keep its
505
+ # default value of false because you are gauranteed to see these messages
506
+ # again. However, if both auto.commit and auto.offset.store are set to
507
+ # true, you should set yield_on_error to true so you can process messages
508
+ # that you may or may not see again.
509
+ #
510
+ # @param max_items [Integer] Maximum size of the yielded array of messages
511
+ #
512
+ # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
513
+ #
514
+ # @param timeout_ms [Integer] max time to wait for up to max_items
515
+ #
516
+ # @raise [RdkafkaError] When polling fails
517
+ #
518
+ # @yield [messages, pending_exception]
519
+ # @yieldparam messages [Array] An array of received Message
520
+ # @yieldparam pending_exception [Exception] normally nil, or an exception
521
+ # which will be propogated after processing of the partial batch is complete.
522
+ #
523
+ # @return [nil]
524
+ def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
525
+ closed_consumer_check(__method__)
526
+ slice = []
527
+ bytes = 0
528
+ end_time = monotonic_now + timeout_ms / 1000.0
529
+ loop do
530
+ break if @closing
531
+ max_wait = end_time - monotonic_now
532
+ max_wait_ms = if max_wait <= 0
533
+ 0 # should not block, but may retrieve a message
534
+ else
535
+ (max_wait * 1000).floor
536
+ end
537
+ message = nil
538
+ begin
539
+ message = poll max_wait_ms
540
+ rescue Rdkafka::RdkafkaError => error
541
+ raise unless yield_on_error
542
+ raise if slice.empty?
543
+ yield slice.dup, error
544
+ raise
545
+ end
546
+ if message
547
+ slice << message
548
+ bytes += message.payload.bytesize
549
+ end
550
+ if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
+ yield slice.dup, nil
552
+ slice.clear
553
+ bytes = 0
554
+ end_time = monotonic_now + timeout_ms / 1000.0
555
+ end
556
+ end
557
+ end
558
+
559
+ private
560
+ def monotonic_now
561
+ # needed because Time.now can go backwards
562
+ Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
+ end
389
564
  end
390
565
  end