rdkafka 0.5.0 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +23 -0
  3. data/CHANGELOG.md +23 -0
  4. data/README.md +9 -9
  5. data/docker-compose.yml +17 -11
  6. data/ext/README.md +3 -15
  7. data/ext/Rakefile +23 -3
  8. data/lib/rdkafka.rb +8 -0
  9. data/lib/rdkafka/abstract_handle.rb +82 -0
  10. data/lib/rdkafka/admin.rb +144 -0
  11. data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
  12. data/lib/rdkafka/admin/create_topic_report.rb +22 -0
  13. data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
  14. data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
  15. data/lib/rdkafka/bindings.rb +63 -17
  16. data/lib/rdkafka/callbacks.rb +106 -0
  17. data/lib/rdkafka/config.rb +18 -7
  18. data/lib/rdkafka/consumer.rb +162 -46
  19. data/lib/rdkafka/consumer/headers.rb +7 -5
  20. data/lib/rdkafka/consumer/partition.rb +1 -1
  21. data/lib/rdkafka/consumer/topic_partition_list.rb +6 -16
  22. data/lib/rdkafka/error.rb +35 -4
  23. data/lib/rdkafka/metadata.rb +92 -0
  24. data/lib/rdkafka/producer.rb +43 -15
  25. data/lib/rdkafka/producer/delivery_handle.rb +7 -49
  26. data/lib/rdkafka/producer/delivery_report.rb +7 -2
  27. data/lib/rdkafka/version.rb +3 -3
  28. data/rdkafka.gemspec +3 -3
  29. data/spec/rdkafka/abstract_handle_spec.rb +114 -0
  30. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  31. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  32. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  33. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  34. data/spec/rdkafka/admin_spec.rb +192 -0
  35. data/spec/rdkafka/bindings_spec.rb +20 -2
  36. data/spec/rdkafka/callbacks_spec.rb +20 -0
  37. data/spec/rdkafka/config_spec.rb +17 -2
  38. data/spec/rdkafka/consumer/message_spec.rb +6 -1
  39. data/spec/rdkafka/consumer_spec.rb +145 -19
  40. data/spec/rdkafka/error_spec.rb +7 -3
  41. data/spec/rdkafka/metadata_spec.rb +78 -0
  42. data/spec/rdkafka/producer/delivery_handle_spec.rb +3 -43
  43. data/spec/rdkafka/producer/delivery_report_spec.rb +5 -1
  44. data/spec/rdkafka/producer_spec.rb +147 -72
  45. data/spec/spec_helper.rb +34 -6
  46. metadata +34 -10
  47. data/.travis.yml +0 -34
@@ -0,0 +1,27 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class CreateTopicHandle < AbstractHandle
4
+ layout :pending, :bool,
5
+ :response, :int,
6
+ :error_string, :pointer,
7
+ :result_name, :pointer
8
+
9
+ # @return [String] the name of the operation
10
+ def operation_name
11
+ "create topic"
12
+ end
13
+
14
+ # @return [Boolean] whether the create topic was successful
15
+ def create_result
16
+ CreateTopicReport.new(self[:error_string], self[:result_name])
17
+ end
18
+
19
+ def raise_error
20
+ raise RdkafkaError.new(
21
+ self[:response],
22
+ broker_message: CreateTopicReport.new(self[:error_string], self[:result_name]).error_string
23
+ )
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,22 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class CreateTopicReport
4
+ # Any error message generated from the CreateTopic
5
+ # @return [String]
6
+ attr_reader :error_string
7
+
8
+ # The name of the topic created
9
+ # @return [String]
10
+ attr_reader :result_name
11
+
12
+ def initialize(error_string, result_name)
13
+ if error_string != FFI::Pointer::NULL
14
+ @error_string = error_string.read_string
15
+ end
16
+ if result_name != FFI::Pointer::NULL
17
+ @result_name = @result_name = result_name.read_string
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,27 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class DeleteTopicHandle < AbstractHandle
4
+ layout :pending, :bool,
5
+ :response, :int,
6
+ :error_string, :pointer,
7
+ :result_name, :pointer
8
+
9
+ # @return [String] the name of the operation
10
+ def operation_name
11
+ "delete topic"
12
+ end
13
+
14
+ # @return [Boolean] whether the delete topic was successful
15
+ def create_result
16
+ DeleteTopicReport.new(self[:error_string], self[:result_name])
17
+ end
18
+
19
+ def raise_error
20
+ raise RdkafkaError.new(
21
+ self[:response],
22
+ broker_message: DeleteTopicReport.new(self[:error_string], self[:result_name]).error_string
23
+ )
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,22 @@
1
+ module Rdkafka
2
+ class Admin
3
+ class DeleteTopicReport
4
+ # Any error message generated from the DeleteTopic
5
+ # @return [String]
6
+ attr_reader :error_string
7
+
8
+ # The name of the topic deleted
9
+ # @return [String]
10
+ attr_reader :result_name
11
+
12
+ def initialize(error_string, result_name)
13
+ if error_string != FFI::Pointer::NULL
14
+ @error_string = error_string.read_string
15
+ end
16
+ if result_name != FFI::Pointer::NULL
17
+ @result_name = @result_name = result_name.read_string
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -8,7 +8,7 @@ module Rdkafka
8
8
  extend FFI::Library
9
9
 
10
10
  def self.lib_extension
11
- if Gem::Platform.local.os.include?("darwin")
11
+ if RbConfig::CONFIG['host_os'] =~ /darwin/
12
12
  'dylib'
13
13
  else
14
14
  'so'
@@ -22,6 +22,11 @@ module Rdkafka
22
22
  RD_KAFKA_RESP_ERR__NOENT = -156
23
23
  RD_KAFKA_RESP_ERR_NO_ERROR = 0
24
24
 
25
+ RD_KAFKA_OFFSET_END = -1
26
+ RD_KAFKA_OFFSET_BEGINNING = -2
27
+ RD_KAFKA_OFFSET_STORED = -1000
28
+ RD_KAFKA_OFFSET_INVALID = -1001
29
+
25
30
  class SizePtr < FFI::Struct
26
31
  layout :value, :size_t
27
32
  end
@@ -35,6 +40,8 @@ module Rdkafka
35
40
 
36
41
  attach_function :rd_kafka_memberid, [:pointer], :string
37
42
  attach_function :rd_kafka_clusterid, [:pointer], :string
43
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void
38
45
 
39
46
  # Message struct
40
47
 
@@ -164,6 +171,7 @@ module Rdkafka
164
171
  attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
165
172
  attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
166
173
  attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
174
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
167
175
 
168
176
  # Headers
169
177
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -226,22 +234,60 @@ module Rdkafka
226
234
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
227
235
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
228
236
 
229
- DeliveryCallback = FFI::Function.new(
230
- :void, [:pointer, :pointer, :pointer]
231
- ) do |client_ptr, message_ptr, opaque_ptr|
232
- message = Message.new(message_ptr)
233
- delivery_handle_ptr_address = message[:_private].address
234
- if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
235
- # Update delivery handle
236
- delivery_handle[:pending] = false
237
- delivery_handle[:response] = message[:err]
238
- delivery_handle[:partition] = message[:partition]
239
- delivery_handle[:offset] = message[:offset]
240
- # Call delivery callback on opaque
241
- if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
242
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset]))
243
- end
244
- end
237
+ # Partitioner
238
+ attach_function :rd_kafka_msg_partitioner_consistent_random, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
239
+
240
+ def self.partitioner(str, partition_count)
241
+ # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
242
+ return -1 unless partition_count&.nonzero?
243
+
244
+ str_ptr = FFI::MemoryPointer.from_string(str)
245
+ rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
245
246
  end
247
+
248
+ # Create Topics
249
+
250
+ RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
251
+ RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
252
+
253
+ attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
254
+ attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
255
+ attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
256
+ attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
257
+ attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
258
+
259
+ # Delete Topics
260
+
261
+ RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
262
+ RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
263
+
264
+ attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
265
+ attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
266
+ attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
267
+ attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
268
+ attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
269
+
270
+ # Background Queue and Callback
271
+
272
+ attach_function :rd_kafka_queue_get_background, [:pointer], :pointer
273
+ attach_function :rd_kafka_conf_set_background_event_cb, [:pointer, :pointer], :void
274
+ attach_function :rd_kafka_queue_destroy, [:pointer], :void
275
+
276
+ # Admin Options
277
+
278
+ attach_function :rd_kafka_AdminOptions_new, [:pointer, :int32], :pointer
279
+ attach_function :rd_kafka_AdminOptions_set_opaque, [:pointer, :pointer], :void
280
+ attach_function :rd_kafka_AdminOptions_destroy, [:pointer], :void
281
+
282
+ # Extracting data from event types
283
+
284
+ attach_function :rd_kafka_event_type, [:pointer], :int32
285
+ attach_function :rd_kafka_event_opaque, [:pointer], :pointer
286
+
287
+ # Extracting data from topic results
288
+
289
+ attach_function :rd_kafka_topic_result_error, [:pointer], :int32
290
+ attach_function :rd_kafka_topic_result_error_string, [:pointer], :pointer
291
+ attach_function :rd_kafka_topic_result_name, [:pointer], :pointer
246
292
  end
247
293
  end
@@ -0,0 +1,106 @@
1
+ module Rdkafka
2
+ module Callbacks
3
+
4
+ # Extracts attributes of a rd_kafka_topic_result_t
5
+ #
6
+ # @private
7
+ class TopicResult
8
+ attr_reader :result_error, :error_string, :result_name
9
+
10
+ def initialize(topic_result_pointer)
11
+ @result_error = Rdkafka::Bindings.rd_kafka_topic_result_error(topic_result_pointer)
12
+ @error_string = Rdkafka::Bindings.rd_kafka_topic_result_error_string(topic_result_pointer)
13
+ @result_name = Rdkafka::Bindings.rd_kafka_topic_result_name(topic_result_pointer)
14
+ end
15
+
16
+ def self.create_topic_results_from_array(count, array_pointer)
17
+ (1..count).map do |index|
18
+ result_pointer = (array_pointer + (index - 1)).read_pointer
19
+ new(result_pointer)
20
+ end
21
+ end
22
+ end
23
+
24
+ # FFI Function used for Create Topic and Delete Topic callbacks
25
+ BackgroundEventCallbackFunction = FFI::Function.new(
26
+ :void, [:pointer, :pointer, :pointer]
27
+ ) do |client_ptr, event_ptr, opaque_ptr|
28
+ BackgroundEventCallback.call(client_ptr, event_ptr, opaque_ptr)
29
+ end
30
+
31
+ # @private
32
+ class BackgroundEventCallback
33
+ def self.call(_, event_ptr, _)
34
+ event_type = Rdkafka::Bindings.rd_kafka_event_type(event_ptr)
35
+ if event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATETOPICS_RESULT
36
+ process_create_topic(event_ptr)
37
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
38
+ process_delete_topic(event_ptr)
39
+ end
40
+ end
41
+
42
+ private
43
+
44
+ def self.process_create_topic(event_ptr)
45
+ create_topics_result = Rdkafka::Bindings.rd_kafka_event_CreateTopics_result(event_ptr)
46
+
47
+ # Get the number of create topic results
48
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
49
+ create_topic_result_array = Rdkafka::Bindings.rd_kafka_CreateTopics_result_topics(create_topics_result, pointer_to_size_t)
50
+ create_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, create_topic_result_array)
51
+ create_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
52
+
53
+ if create_topic_handle = Rdkafka::Admin::CreateTopicHandle.remove(create_topic_handle_ptr.address)
54
+ create_topic_handle[:response] = create_topic_results[0].result_error
55
+ create_topic_handle[:error_string] = create_topic_results[0].error_string
56
+ create_topic_handle[:result_name] = create_topic_results[0].result_name
57
+ create_topic_handle[:pending] = false
58
+ end
59
+ end
60
+
61
+ def self.process_delete_topic(event_ptr)
62
+ delete_topics_result = Rdkafka::Bindings.rd_kafka_event_DeleteTopics_result(event_ptr)
63
+
64
+ # Get the number of topic results
65
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
66
+ delete_topic_result_array = Rdkafka::Bindings.rd_kafka_DeleteTopics_result_topics(delete_topics_result, pointer_to_size_t)
67
+ delete_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, delete_topic_result_array)
68
+ delete_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
69
+
70
+ if delete_topic_handle = Rdkafka::Admin::DeleteTopicHandle.remove(delete_topic_handle_ptr.address)
71
+ delete_topic_handle[:response] = delete_topic_results[0].result_error
72
+ delete_topic_handle[:error_string] = delete_topic_results[0].error_string
73
+ delete_topic_handle[:result_name] = delete_topic_results[0].result_name
74
+ delete_topic_handle[:pending] = false
75
+ end
76
+ end
77
+ end
78
+
79
+ # FFI Function used for Message Delivery callbacks
80
+
81
+ DeliveryCallbackFunction = FFI::Function.new(
82
+ :void, [:pointer, :pointer, :pointer]
83
+ ) do |client_ptr, message_ptr, opaque_ptr|
84
+ DeliveryCallback.call(client_ptr, message_ptr, opaque_ptr)
85
+ end
86
+
87
+ # @private
88
+ class DeliveryCallback
89
+ def self.call(_, message_ptr, opaque_ptr)
90
+ message = Rdkafka::Bindings::Message.new(message_ptr)
91
+ delivery_handle_ptr_address = message[:_private].address
92
+ if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
93
+ # Update delivery handle
94
+ delivery_handle[:response] = message[:err]
95
+ delivery_handle[:partition] = message[:partition]
96
+ delivery_handle[:offset] = message[:offset]
97
+ delivery_handle[:pending] = false
98
+ # Call delivery callback on opaque
99
+ if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
100
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
106
+ end
@@ -67,7 +67,7 @@ module Rdkafka
67
67
 
68
68
  # Returns a new config with the provided options which are merged with {DEFAULT_CONFIG}.
69
69
  #
70
- # @param config_hash [Hash<String,Symbol => String>] The config options for rdkafka
70
+ # @param config_hash [Hash{String,Symbol => String}] The config options for rdkafka
71
71
  #
72
72
  # @return [Config]
73
73
  def initialize(config_hash = {})
@@ -137,13 +137,26 @@ module Rdkafka
137
137
  # Create Kafka config
138
138
  config = native_config(opaque)
139
139
  # Set callback to receive delivery reports on config
140
- Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Bindings::DeliveryCallback)
140
+ Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
141
141
  # Return producer with Kafka client
142
142
  Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
143
143
  opaque.producer = producer
144
144
  end
145
145
  end
146
146
 
147
+ # Create an admin instance with this configuration.
148
+ #
149
+ # @raise [ConfigError] When the configuration contains invalid options
150
+ # @raise [ClientCreationError] When the native client cannot be created
151
+ #
152
+ # @return [Admin] The created admin instance
153
+ def admin
154
+ opaque = Opaque.new
155
+ config = native_config(opaque)
156
+ Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
157
+ Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
158
+ end
159
+
147
160
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
148
161
  class ConfigError < RuntimeError; end
149
162
 
@@ -155,7 +168,7 @@ module Rdkafka
155
168
 
156
169
  private
157
170
 
158
- # This method is only intented to be used to create a client,
171
+ # This method is only intended to be used to create a client,
159
172
  # using it in another way will leak memory.
160
173
  def native_config(opaque=nil)
161
174
  Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
@@ -212,10 +225,8 @@ module Rdkafka
212
225
  Rdkafka::Bindings.rd_kafka_queue_get_main(handle)
213
226
  )
214
227
 
215
- FFI::AutoPointer.new(
216
- handle,
217
- Rdkafka::Bindings.method(:rd_kafka_destroy)
218
- )
228
+ # Return handle which should be closed using rd_kafka_destroy after usage.
229
+ handle
219
230
  end
220
231
  end
221
232
 
@@ -5,6 +5,9 @@ module Rdkafka
5
5
  #
6
6
  # To create a consumer set up a {Config} and call {Config#consumer consumer} on that. It is
7
7
  # mandatory to set `:"group.id"` in the configuration.
8
+ #
9
+ # Consumer implements `Enumerable`, so you can use `each` to consume messages, or for example
10
+ # `each_slice` to consume batches of messages.
8
11
  class Consumer
9
12
  include Enumerable
10
13
 
@@ -17,8 +20,12 @@ module Rdkafka
17
20
  # Close this consumer
18
21
  # @return [nil]
19
22
  def close
23
+ return unless @native_kafka
24
+
20
25
  @closing = true
21
26
  Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
+ Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
+ @native_kafka = nil
22
29
  end
23
30
 
24
31
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -29,21 +36,22 @@ module Rdkafka
29
36
  #
30
37
  # @return [nil]
31
38
  def subscribe(*topics)
39
+ closed_consumer_check(__method__)
40
+
32
41
  # Create topic partition list with topics and no partition set
33
- tpl = TopicPartitionList.new_native_tpl(topics.length)
42
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
34
43
 
35
44
  topics.each do |topic|
36
- Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
37
- tpl,
38
- topic,
39
- -1
40
- )
45
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, -1)
41
46
  end
47
+
42
48
  # Subscribe to topic partition list and check this was successful
43
49
  response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
44
50
  if response != 0
45
51
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
46
52
  end
53
+ ensure
54
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
47
55
  end
48
56
 
49
57
  # Unsubscribe from all subscribed topics.
@@ -52,6 +60,8 @@ module Rdkafka
52
60
  #
53
61
  # @return [nil]
54
62
  def unsubscribe
63
+ closed_consumer_check(__method__)
64
+
55
65
  response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
56
66
  if response != 0
57
67
  raise Rdkafka::RdkafkaError.new(response)
@@ -66,15 +76,23 @@ module Rdkafka
66
76
  #
67
77
  # @return [nil]
68
78
  def pause(list)
79
+ closed_consumer_check(__method__)
80
+
69
81
  unless list.is_a?(TopicPartitionList)
70
82
  raise TypeError.new("list has to be a TopicPartitionList")
71
83
  end
84
+
72
85
  tpl = list.to_native_tpl
73
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
74
86
 
75
- if response != 0
76
- list = TopicPartitionList.from_native_tpl(tpl)
77
- raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
87
+ begin
88
+ response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
89
+
90
+ if response != 0
91
+ list = TopicPartitionList.from_native_tpl(tpl)
92
+ raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
93
+ end
94
+ ensure
95
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
78
96
  end
79
97
  end
80
98
 
@@ -86,13 +104,21 @@ module Rdkafka
86
104
  #
87
105
  # @return [nil]
88
106
  def resume(list)
107
+ closed_consumer_check(__method__)
108
+
89
109
  unless list.is_a?(TopicPartitionList)
90
110
  raise TypeError.new("list has to be a TopicPartitionList")
91
111
  end
112
+
92
113
  tpl = list.to_native_tpl
93
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
94
- if response != 0
95
- raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
114
+
115
+ begin
116
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
117
+ if response != 0
118
+ raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
+ end
120
+ ensure
121
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
96
122
  end
97
123
  end
98
124
 
@@ -102,17 +128,21 @@ module Rdkafka
102
128
  #
103
129
  # @return [TopicPartitionList]
104
130
  def subscription
105
- tpl = FFI::MemoryPointer.new(:pointer)
106
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, tpl)
131
+ closed_consumer_check(__method__)
132
+
133
+ ptr = FFI::MemoryPointer.new(:pointer)
134
+ response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
135
+
107
136
  if response != 0
108
137
  raise Rdkafka::RdkafkaError.new(response)
109
138
  end
110
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
139
+
140
+ native = ptr.read_pointer
111
141
 
112
142
  begin
113
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
143
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(native)
114
144
  ensure
115
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
145
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native)
116
146
  end
117
147
  end
118
148
 
@@ -122,13 +152,21 @@ module Rdkafka
122
152
  #
123
153
  # @raise [RdkafkaError] When assigning fails
124
154
  def assign(list)
155
+ closed_consumer_check(__method__)
156
+
125
157
  unless list.is_a?(TopicPartitionList)
126
158
  raise TypeError.new("list has to be a TopicPartitionList")
127
159
  end
160
+
128
161
  tpl = list.to_native_tpl
129
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
130
- if response != 0
131
- raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
162
+
163
+ begin
164
+ response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
165
+ if response != 0
166
+ raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
+ end
168
+ ensure
169
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
132
170
  end
133
171
  end
134
172
 
@@ -138,19 +176,25 @@ module Rdkafka
138
176
  #
139
177
  # @return [TopicPartitionList]
140
178
  def assignment
141
- tpl = FFI::MemoryPointer.new(:pointer)
142
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, tpl)
179
+ closed_consumer_check(__method__)
180
+
181
+ ptr = FFI::MemoryPointer.new(:pointer)
182
+ response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
143
183
  if response != 0
144
184
  raise Rdkafka::RdkafkaError.new(response)
145
185
  end
146
186
 
147
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
187
+ tpl = ptr.read_pointer
148
188
 
149
- begin
150
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
151
- ensure
152
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
189
+ if !tpl.null?
190
+ begin
191
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
192
+ ensure
193
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
194
+ end
153
195
  end
196
+ ensure
197
+ ptr.free unless ptr.nil?
154
198
  end
155
199
 
156
200
  # Return the current committed offset per partition for this consumer group.
@@ -163,17 +207,25 @@ module Rdkafka
163
207
  #
164
208
  # @return [TopicPartitionList]
165
209
  def committed(list=nil, timeout_ms=1200)
210
+ closed_consumer_check(__method__)
211
+
166
212
  if list.nil?
167
213
  list = assignment
168
214
  elsif !list.is_a?(TopicPartitionList)
169
215
  raise TypeError.new("list has to be nil or a TopicPartitionList")
170
216
  end
217
+
171
218
  tpl = list.to_native_tpl
172
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
173
- if response != 0
174
- raise Rdkafka::RdkafkaError.new(response)
219
+
220
+ begin
221
+ response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
222
+ if response != 0
223
+ raise Rdkafka::RdkafkaError.new(response)
224
+ end
225
+ TopicPartitionList.from_native_tpl(tpl)
226
+ ensure
227
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
175
228
  end
176
- TopicPartitionList.from_native_tpl(tpl)
177
229
  end
178
230
 
179
231
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
@@ -186,6 +238,8 @@ module Rdkafka
186
238
  #
187
239
  # @return [Integer] The low and high watermark
188
240
  def query_watermark_offsets(topic, partition, timeout_ms=200)
241
+ closed_consumer_check(__method__)
242
+
189
243
  low = FFI::MemoryPointer.new(:int64, 1)
190
244
  high = FFI::MemoryPointer.new(:int64, 1)
191
245
 
@@ -195,13 +249,16 @@ module Rdkafka
195
249
  partition,
196
250
  low,
197
251
  high,
198
- timeout_ms
252
+ timeout_ms,
199
253
  )
200
254
  if response != 0
201
255
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
202
256
  end
203
257
 
204
- return low.read_int64, high.read_int64
258
+ return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
259
+ ensure
260
+ low.free unless low.nil?
261
+ high.free unless high.nil?
205
262
  end
206
263
 
207
264
  # Calculate the consumer lag per partition for the provided topic partition list.
@@ -217,6 +274,7 @@ module Rdkafka
217
274
  # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
218
275
  def lag(topic_partition_list, watermark_timeout_ms=100)
219
276
  out = {}
277
+
220
278
  topic_partition_list.to_h.each do |topic, partitions|
221
279
  # Query high watermarks for this topic's partitions
222
280
  # and compare to the offset in the list.
@@ -239,6 +297,7 @@ module Rdkafka
239
297
  #
240
298
  # @return [String, nil]
241
299
  def cluster_id
300
+ closed_consumer_check(__method__)
242
301
  Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
243
302
  end
244
303
 
@@ -248,6 +307,7 @@ module Rdkafka
248
307
  #
249
308
  # @return [String, nil]
250
309
  def member_id
310
+ closed_consumer_check(__method__)
251
311
  Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
252
312
  end
253
313
 
@@ -261,6 +321,8 @@ module Rdkafka
261
321
  #
262
322
  # @return [nil]
263
323
  def store_offset(message)
324
+ closed_consumer_check(__method__)
325
+
264
326
  # rd_kafka_offset_store is one of the few calls that does not support
265
327
  # a string as the topic, so create a native topic for it.
266
328
  native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
@@ -282,26 +344,70 @@ module Rdkafka
282
344
  end
283
345
  end
284
346
 
285
- # Commit the current offsets of this consumer
347
+ # Seek to a particular message. The next poll on the topic/partition will return the
348
+ # message at the given offset.
349
+ #
350
+ # @param message [Rdkafka::Consumer::Message] The message to which to seek
351
+ #
352
+ # @raise [RdkafkaError] When seeking fails
353
+ #
354
+ # @return [nil]
355
+ def seek(message)
356
+ closed_consumer_check(__method__)
357
+
358
+ # rd_kafka_offset_store is one of the few calls that does not support
359
+ # a string as the topic, so create a native topic for it.
360
+ native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
361
+ @native_kafka,
362
+ message.topic,
363
+ nil
364
+ )
365
+ response = Rdkafka::Bindings.rd_kafka_seek(
366
+ native_topic,
367
+ message.partition,
368
+ message.offset,
369
+ 0 # timeout
370
+ )
371
+ if response != 0
372
+ raise Rdkafka::RdkafkaError.new(response)
373
+ end
374
+ ensure
375
+ if native_topic && !native_topic.null?
376
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
377
+ end
378
+ end
379
+
380
+ # Manually commit the current offsets of this consumer.
381
+ #
382
+ # To use this set `enable.auto.commit`to `false` to disable automatic triggering
383
+ # of commits.
384
+ #
385
+ # If `enable.auto.offset.store` is set to `true` the offset of the last consumed
386
+ # message for every partition is used. If set to `false` you can use {store_offset} to
387
+ # indicate when a message has been fully processed.
286
388
  #
287
389
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
288
390
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
289
391
  #
290
- # @raise [RdkafkaError] When comitting fails
392
+ # @raise [RdkafkaError] When committing fails
291
393
  #
292
394
  # @return [nil]
293
395
  def commit(list=nil, async=false)
396
+ closed_consumer_check(__method__)
397
+
294
398
  if !list.nil? && !list.is_a?(TopicPartitionList)
295
399
  raise TypeError.new("list has to be nil or a TopicPartitionList")
296
400
  end
297
- tpl = if list
298
- list.to_native_tpl
299
- else
300
- nil
301
- end
302
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
303
- if response != 0
304
- raise Rdkafka::RdkafkaError.new(response)
401
+
402
+ tpl = list ? list.to_native_tpl : nil
403
+
404
+ begin
405
+ response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
406
+ if response != 0
407
+ raise Rdkafka::RdkafkaError.new(response)
408
+ end
409
+ ensure
410
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
305
411
  end
306
412
  end
307
413
 
@@ -313,6 +419,8 @@ module Rdkafka
313
419
  #
314
420
  # @return [Message, nil] A message or nil if there was no new message within the timeout
315
421
  def poll(timeout_ms)
422
+ closed_consumer_check(__method__)
423
+
316
424
  message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
317
425
  if message_ptr.null?
318
426
  nil
@@ -336,16 +444,20 @@ module Rdkafka
336
444
  # Poll for new messages and yield for each received one. Iteration
337
445
  # will end when the consumer is closed.
338
446
  #
447
+ # If `enable.partition.eof` is turned on in the config this will raise an
448
+ # error when an eof is reached, so you probably want to disable that when
449
+ # using this method of iteration.
450
+ #
339
451
  # @raise [RdkafkaError] When polling fails
340
452
  #
341
453
  # @yieldparam message [Message] Received message
342
454
  #
343
455
  # @return [nil]
344
- def each(&block)
456
+ def each
345
457
  loop do
346
458
  message = poll(250)
347
459
  if message
348
- block.call(message)
460
+ yield(message)
349
461
  else
350
462
  if @closing
351
463
  break
@@ -355,5 +467,9 @@ module Rdkafka
355
467
  end
356
468
  end
357
469
  end
470
+
471
+ def closed_consumer_check(method)
472
+ raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
+ end
358
474
  end
359
475
  end