rdkafka 0.14.0 → 0.15.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/CHANGELOG.md +11 -0
  5. data/README.md +32 -22
  6. data/docker-compose.yml +2 -0
  7. data/lib/rdkafka/admin/acl_binding_result.rb +37 -0
  8. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  9. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  10. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  11. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  12. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  13. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  14. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  15. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  16. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  17. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  18. data/lib/rdkafka/admin.rb +443 -0
  19. data/lib/rdkafka/bindings.rb +119 -0
  20. data/lib/rdkafka/callbacks.rb +187 -0
  21. data/lib/rdkafka/config.rb +24 -3
  22. data/lib/rdkafka/consumer/headers.rb +1 -1
  23. data/lib/rdkafka/consumer/topic_partition_list.rb +8 -7
  24. data/lib/rdkafka/consumer.rb +46 -10
  25. data/lib/rdkafka/producer.rb +2 -2
  26. data/lib/rdkafka/version.rb +3 -3
  27. data/lib/rdkafka.rb +11 -0
  28. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  29. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  30. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  31. data/spec/rdkafka/admin/delete_acl_report_spec.rb +71 -0
  32. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  33. data/spec/rdkafka/admin/describe_acl_report_spec.rb +72 -0
  34. data/spec/rdkafka/admin_spec.rb +204 -0
  35. data/spec/rdkafka/config_spec.rb +8 -0
  36. data/spec/rdkafka/consumer_spec.rb +69 -0
  37. data/spec/spec_helper.rb +3 -1
  38. data.tar.gz.sig +0 -0
  39. metadata +26 -2
  40. metadata.gz.sig +0 -0
@@ -23,6 +23,96 @@ module Rdkafka
23
23
  end
24
24
  end
25
25
 
26
+ class GroupResult
27
+ attr_reader :result_error, :error_string, :result_name
28
+ def initialize(group_result_pointer)
29
+ native_error = Rdkafka::Bindings.rd_kafka_group_result_error(group_result_pointer)
30
+
31
+ if native_error.null?
32
+ @result_error = 0
33
+ @error_string = FFI::Pointer::NULL
34
+ else
35
+ @result_error = native_error[:code]
36
+ @error_string = native_error[:errstr]
37
+ end
38
+
39
+ @result_name = Rdkafka::Bindings.rd_kafka_group_result_name(group_result_pointer)
40
+ end
41
+ def self.create_group_results_from_array(count, array_pointer)
42
+ (1..count).map do |index|
43
+ result_pointer = (array_pointer + (index - 1)).read_pointer
44
+ new(result_pointer)
45
+ end
46
+ end
47
+ end
48
+
49
+ # Extracts attributes of rd_kafka_acl_result_t
50
+ #
51
+ # @private
52
+ class CreateAclResult
53
+ attr_reader :result_error, :error_string
54
+
55
+ def initialize(acl_result_pointer)
56
+ rd_kafka_error_pointer = Bindings.rd_kafka_acl_result_error(acl_result_pointer)
57
+ @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
58
+ @error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
59
+ end
60
+
61
+ def self.create_acl_results_from_array(count, array_pointer)
62
+ (1..count).map do |index|
63
+ result_pointer = (array_pointer + (index - 1)).read_pointer
64
+ new(result_pointer)
65
+ end
66
+ end
67
+ end
68
+
69
+ # Extracts attributes of rd_kafka_DeleteAcls_result_response_t
70
+ #
71
+ # @private
72
+ class DeleteAclResult
73
+ attr_reader :result_error, :error_string, :matching_acls, :matching_acls_count
74
+
75
+ def initialize(acl_result_pointer)
76
+ @matching_acls=[]
77
+ rd_kafka_error_pointer = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_error(acl_result_pointer)
78
+ @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
79
+ @error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
80
+ if @result_error == 0
81
+ # Get the number of matching acls
82
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
83
+ @matching_acls = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_matching_acls(acl_result_pointer, pointer_to_size_t)
84
+ @matching_acls_count = pointer_to_size_t.read_int
85
+ end
86
+ end
87
+
88
+ def self.delete_acl_results_from_array(count, array_pointer)
89
+ (1..count).map do |index|
90
+ result_pointer = (array_pointer + (index - 1)).read_pointer
91
+ new(result_pointer)
92
+ end
93
+ end
94
+ end
95
+
96
+ # Extracts attributes of rd_kafka_DeleteAcls_result_response_t
97
+ #
98
+ # @private
99
+ class DescribeAclResult
100
+ attr_reader :result_error, :error_string, :matching_acls, :matching_acls_count
101
+
102
+ def initialize(event_ptr)
103
+ @matching_acls=[]
104
+ @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
105
+ @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
106
+ if @result_error == 0
107
+ acl_describe_result = Rdkafka::Bindings.rd_kafka_event_DescribeAcls_result(event_ptr)
108
+ # Get the number of matching acls
109
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
110
+ @matching_acls = Rdkafka::Bindings.rd_kafka_DescribeAcls_result_acls(acl_describe_result, pointer_to_size_t)
111
+ @matching_acls_count = pointer_to_size_t.read_int
112
+ end
113
+ end
114
+ end
115
+
26
116
  # FFI Function used for Create Topic and Delete Topic callbacks
27
117
  BackgroundEventCallbackFunction = FFI::Function.new(
28
118
  :void, [:pointer, :pointer, :pointer]
@@ -38,6 +128,16 @@ module Rdkafka
38
128
  process_create_topic(event_ptr)
39
129
  elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
40
130
  process_delete_topic(event_ptr)
131
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS_RESULT
132
+ process_create_partitions(event_ptr)
133
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATEACLS_RESULT
134
+ process_create_acl(event_ptr)
135
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEACLS_RESULT
136
+ process_delete_acl(event_ptr)
137
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
138
+ process_describe_acl(event_ptr)
139
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEGROUPS_RESULT
140
+ process_delete_groups(event_ptr)
41
141
  end
42
142
  end
43
143
 
@@ -60,6 +160,23 @@ module Rdkafka
60
160
  end
61
161
  end
62
162
 
163
+ def self.process_delete_groups(event_ptr)
164
+ delete_groups_result = Rdkafka::Bindings.rd_kafka_event_DeleteGroups_result(event_ptr)
165
+
166
+ # Get the number of delete group results
167
+ pointer_to_size_t = FFI::MemoryPointer.new(:size_t)
168
+ delete_group_result_array = Rdkafka::Bindings.rd_kafka_DeleteGroups_result_groups(delete_groups_result, pointer_to_size_t)
169
+ delete_group_results = GroupResult.create_group_results_from_array(pointer_to_size_t.read_int, delete_group_result_array) # TODO fix this
170
+ delete_group_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
171
+
172
+ if (delete_group_handle = Rdkafka::Admin::DeleteGroupsHandle.remove(delete_group_handle_ptr.address))
173
+ delete_group_handle[:response] = delete_group_results[0].result_error
174
+ delete_group_handle[:error_string] = delete_group_results[0].error_string
175
+ delete_group_handle[:result_name] = delete_group_results[0].result_name
176
+ delete_group_handle[:pending] = false
177
+ end
178
+ end
179
+
63
180
  def self.process_delete_topic(event_ptr)
64
181
  delete_topics_result = Rdkafka::Bindings.rd_kafka_event_DeleteTopics_result(event_ptr)
65
182
 
@@ -76,6 +193,75 @@ module Rdkafka
76
193
  delete_topic_handle[:pending] = false
77
194
  end
78
195
  end
196
+
197
+ def self.process_create_partitions(event_ptr)
198
+ create_partitionss_result = Rdkafka::Bindings.rd_kafka_event_CreatePartitions_result(event_ptr)
199
+
200
+ # Get the number of create topic results
201
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
202
+ create_partitions_result_array = Rdkafka::Bindings.rd_kafka_CreatePartitions_result_topics(create_partitionss_result, pointer_to_size_t)
203
+ create_partitions_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, create_partitions_result_array)
204
+ create_partitions_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
205
+
206
+ if create_partitions_handle = Rdkafka::Admin::CreatePartitionsHandle.remove(create_partitions_handle_ptr.address)
207
+ create_partitions_handle[:response] = create_partitions_results[0].result_error
208
+ create_partitions_handle[:error_string] = create_partitions_results[0].error_string
209
+ create_partitions_handle[:result_name] = create_partitions_results[0].result_name
210
+ create_partitions_handle[:pending] = false
211
+ end
212
+ end
213
+
214
+ def self.process_create_acl(event_ptr)
215
+ create_acls_result = Rdkafka::Bindings.rd_kafka_event_CreateAcls_result(event_ptr)
216
+
217
+ # Get the number of acl results
218
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
219
+ create_acl_result_array = Rdkafka::Bindings.rd_kafka_CreateAcls_result_acls(create_acls_result, pointer_to_size_t)
220
+ create_acl_results = CreateAclResult.create_acl_results_from_array(pointer_to_size_t.read_int, create_acl_result_array)
221
+ create_acl_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
222
+
223
+ if create_acl_handle = Rdkafka::Admin::CreateAclHandle.remove(create_acl_handle_ptr.address)
224
+ create_acl_handle[:response] = create_acl_results[0].result_error
225
+ create_acl_handle[:response_string] = create_acl_results[0].error_string
226
+ create_acl_handle[:pending] = false
227
+ end
228
+ end
229
+
230
+ def self.process_delete_acl(event_ptr)
231
+ delete_acls_result = Rdkafka::Bindings.rd_kafka_event_DeleteAcls_result(event_ptr)
232
+
233
+ # Get the number of acl results
234
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
235
+ delete_acl_result_responses = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_responses(delete_acls_result, pointer_to_size_t)
236
+ delete_acl_results = DeleteAclResult.delete_acl_results_from_array(pointer_to_size_t.read_int, delete_acl_result_responses)
237
+ delete_acl_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
238
+
239
+ if delete_acl_handle = Rdkafka::Admin::DeleteAclHandle.remove(delete_acl_handle_ptr.address)
240
+ delete_acl_handle[:response] = delete_acl_results[0].result_error
241
+ delete_acl_handle[:response_string] = delete_acl_results[0].error_string
242
+ delete_acl_handle[:pending] = false
243
+ if delete_acl_results[0].result_error == 0
244
+ delete_acl_handle[:matching_acls] = delete_acl_results[0].matching_acls
245
+ delete_acl_handle[:matching_acls_count] = delete_acl_results[0].matching_acls_count
246
+ end
247
+ end
248
+ end
249
+
250
+ def self.process_describe_acl(event_ptr)
251
+ describe_acl = DescribeAclResult.new(event_ptr)
252
+ describe_acl_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
253
+
254
+ if describe_acl_handle = Rdkafka::Admin::DescribeAclHandle.remove(describe_acl_handle_ptr.address)
255
+ describe_acl_handle[:response] = describe_acl.result_error
256
+ describe_acl_handle[:response_string] = describe_acl.error_string
257
+ describe_acl_handle[:pending] = false
258
+ if describe_acl.result_error == 0
259
+ describe_acl_handle[:acls] = describe_acl.matching_acls
260
+ describe_acl_handle[:acls_count] = describe_acl.matching_acls_count
261
+ end
262
+ end
263
+ end
264
+
79
265
  end
80
266
 
81
267
  # FFI Function used for Message Delivery callbacks
@@ -108,5 +294,6 @@ module Rdkafka
108
294
  end
109
295
  end
110
296
  end
297
+
111
298
  end
112
299
  end
@@ -112,6 +112,7 @@ module Rdkafka
112
112
  def initialize(config_hash = {})
113
113
  @config_hash = DEFAULT_CONFIG.merge(config_hash)
114
114
  @consumer_rebalance_listener = nil
115
+ @consumer_poll_set = true
115
116
  end
116
117
 
117
118
  # Set a config option.
@@ -140,6 +141,22 @@ module Rdkafka
140
141
  @consumer_rebalance_listener = listener
141
142
  end
142
143
 
144
+ # Should we use a single queue for the underlying consumer and events.
145
+ #
146
+ # This is an advanced API that allows for more granular control of the polling process.
147
+ # When this value is set to `false` (`true` by defualt), there will be two queues that need to
148
+ # be polled:
149
+ # - main librdkafka queue for events
150
+ # - consumer queue with messages and rebalances
151
+ #
152
+ # It is recommended to use the defaults and only set it to `false` in advance multi-threaded
153
+ # and complex cases where granular events handling control is needed.
154
+ #
155
+ # @param poll_set [Boolean]
156
+ def consumer_poll_set=(poll_set)
157
+ @consumer_poll_set = poll_set
158
+ end
159
+
143
160
  # Creates a consumer with this configuration.
144
161
  #
145
162
  # @return [Consumer] The created consumer
@@ -158,8 +175,8 @@ module Rdkafka
158
175
  # Create native client
159
176
  kafka = native_kafka(config, :rd_kafka_consumer)
160
177
 
161
- # Redirect the main queue to the consumer
162
- Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
178
+ # Redirect the main queue to the consumer queue
179
+ Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka) if @consumer_poll_set
163
180
 
164
181
  # Return consumer with Kafka client
165
182
  Rdkafka::Consumer.new(
@@ -187,7 +204,11 @@ module Rdkafka
187
204
  # Return producer with Kafka client
188
205
  partitioner_name = self[:partitioner] || self["partitioner"]
189
206
  Rdkafka::Producer.new(
190
- Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true, opaque: opaque),
207
+ Rdkafka::NativeKafka.new(
208
+ native_kafka(config, :rd_kafka_producer),
209
+ run_polling_thread: true,
210
+ opaque: opaque
211
+ ),
191
212
  partitioner_name
192
213
  ).tap do |producer|
193
214
  opaque.producer = producer
@@ -20,7 +20,7 @@ module Rdkafka
20
20
  #
21
21
  # @private
22
22
  #
23
- # @param [librdkakfa message] native_message
23
+ # @param [Rdkafka::Bindings::Message] native_message
24
24
  # @return [Hash<String, String>] headers Hash for the native_message
25
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
26
26
  def self.from_native(native_message)
@@ -36,6 +36,11 @@ module Rdkafka
36
36
  # Add a topic with optionally partitions to the list.
37
37
  # Calling this method multiple times for the same topic will overwrite the previous configuraton.
38
38
  #
39
+ # @param topic [String] The topic's name
40
+ # @param partitions [Array<Integer>, Range<Integer>, Integer] The topic's partitions or partition count
41
+ #
42
+ # @return [nil]
43
+ #
39
44
  # @example Add a topic with unassigned partitions
40
45
  # tpl.add_topic("topic")
41
46
  #
@@ -45,10 +50,6 @@ module Rdkafka
45
50
  # @example Add a topic with all topics up to a count
46
51
  # tpl.add_topic("topic", 9)
47
52
  #
48
- # @param topic [String] The topic's name
49
- # @param partitions [Array<Integer>, Range<Integer>, Integer] The topic's partitions or partition count
50
- #
51
- # @return [nil]
52
53
  def add_topic(topic, partitions=nil)
53
54
  if partitions.nil?
54
55
  @data[topic.to_s] = nil
@@ -90,11 +91,11 @@ module Rdkafka
90
91
 
91
92
  # Create a new topic partition list based of a native one.
92
93
  #
94
+ # @private
95
+ #
93
96
  # @param pointer [FFI::Pointer] Optional pointer to an existing native list. Its contents will be copied.
94
97
  #
95
98
  # @return [TopicPartitionList]
96
- #
97
- # @private
98
99
  def self.from_native_tpl(pointer)
99
100
  # Data to be moved into the tpl
100
101
  data = {}
@@ -127,8 +128,8 @@ module Rdkafka
127
128
  #
128
129
  # The pointer will be cleaned by `rd_kafka_topic_partition_list_destroy` when GC releases it.
129
130
  #
130
- # @return [FFI::Pointer]
131
131
  # @private
132
+ # @return [FFI::Pointer]
132
133
  def to_native_tpl
133
134
  tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
134
135
 
@@ -221,6 +221,15 @@ module Rdkafka
221
221
  ptr.free unless ptr.nil?
222
222
  end
223
223
 
224
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
225
+ def assignment_lost?
226
+ closed_consumer_check(__method__)
227
+
228
+ @native_kafka.with_inner do |inner|
229
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
230
+ end
231
+ end
232
+
224
233
  # Return the current committed offset per partition for this consumer group.
225
234
  # The offset field of each requested partition will either be set to stored offset or to -1001
226
235
  # in case there was no stored offset for that partition.
@@ -259,9 +268,9 @@ module Rdkafka
259
268
  #
260
269
  # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
261
270
  #
262
- # @raise [RdkafkaError] When getting the positions fails.
263
- #
264
271
  # @return [TopicPartitionList]
272
+ #
273
+ # @raise [RdkafkaError] When getting the positions fails.
265
274
  def position(list=nil)
266
275
  if list.nil?
267
276
  list = assignment
@@ -289,7 +298,7 @@ module Rdkafka
289
298
  # @param timeout_ms [Integer] The timeout for querying the broker
290
299
  # @return [Integer] The low and high watermark
291
300
  # @raise [RdkafkaError] When querying the broker fails.
292
- def query_watermark_offsets(topic, partition, timeout_ms=200)
301
+ def query_watermark_offsets(topic, partition, timeout_ms=1000)
293
302
  closed_consumer_check(__method__)
294
303
 
295
304
  low = FFI::MemoryPointer.new(:int64, 1)
@@ -325,7 +334,7 @@ module Rdkafka
325
334
  # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
326
335
  # per partition
327
336
  # @raise [RdkafkaError] When querying the broker fails.
328
- def lag(topic_partition_list, watermark_timeout_ms=100)
337
+ def lag(topic_partition_list, watermark_timeout_ms=1000)
329
338
  out = {}
330
339
 
331
340
  topic_partition_list.to_h.each do |topic, partitions|
@@ -438,9 +447,9 @@ module Rdkafka
438
447
  #
439
448
  # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
440
449
  #
441
- # @raise [RdKafkaError] When the OffsetForTimes lookup fails
442
- #
443
450
  # @return [TopicPartitionList]
451
+ #
452
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
444
453
  def offsets_for_times(list, timeout_ms = 1000)
445
454
  closed_consumer_check(__method__)
446
455
 
@@ -531,15 +540,41 @@ module Rdkafka
531
540
  end
532
541
  end
533
542
 
543
+ # Polls the main rdkafka queue (not the consumer one). Do **NOT** use it if `consumer_poll_set`
544
+ # was set to `true`.
545
+ #
546
+ # Events will cause application-provided callbacks to be called.
547
+ #
548
+ # Events (in the context of the consumer):
549
+ # - error callbacks
550
+ # - stats callbacks
551
+ # - any other callbacks supported by librdkafka that are not part of the consumer_poll, that
552
+ # would have a callback configured and activated.
553
+ #
554
+ # This method needs to be called at regular intervals to serve any queued callbacks waiting to
555
+ # be called. When in use, does **NOT** replace `#poll` but needs to run complementary with it.
556
+ #
557
+ # @param timeout_ms [Integer] poll timeout. If set to 0 will run async, when set to -1 will
558
+ # block until any events available.
559
+ #
560
+ # @note This method technically should be called `#poll` and the current `#poll` should be
561
+ # called `#consumer_poll` though we keep the current naming convention to make it backward
562
+ # compatible.
563
+ def events_poll(timeout_ms = 0)
564
+ @native_kafka.with_inner do |inner|
565
+ Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
566
+ end
567
+ end
568
+
534
569
  # Poll for new messages and yield for each received one. Iteration
535
570
  # will end when the consumer is closed.
536
571
  #
537
572
  # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
538
573
  # reached, so you probably want to disable that when using this method of iteration.
539
574
  #
540
- # @raise [RdkafkaError] When polling fails
541
575
  # @yieldparam message [Message] Received message
542
576
  # @return [nil]
577
+ # @raise [RdkafkaError] When polling fails
543
578
  def each
544
579
  loop do
545
580
  message = poll(250)
@@ -594,14 +629,15 @@ module Rdkafka
594
629
  # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
595
630
  # @param timeout_ms [Integer] max time to wait for up to max_items
596
631
  #
597
- # @raise [RdkafkaError] When polling fails
598
- #
599
- # @yield [messages, pending_exception]
600
632
  # @yieldparam messages [Array] An array of received Message
601
633
  # @yieldparam pending_exception [Exception] normally nil, or an exception
634
+ #
635
+ # @yield [messages, pending_exception]
602
636
  # which will be propagated after processing of the partial batch is complete.
603
637
  #
604
638
  # @return [nil]
639
+ #
640
+ # @raise [RdkafkaError] When polling fails
605
641
  def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
606
642
  closed_consumer_check(__method__)
607
643
  slice = []
@@ -165,9 +165,9 @@ module Rdkafka
165
165
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
166
166
  # @param headers [Hash<String,String>] Optional message headers
167
167
  #
168
- # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
169
- #
170
168
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
169
+ #
170
+ # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
171
171
  def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
172
172
  closed_producer_check(__method__)
173
173
 
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.14.0"
5
- LIBRDKAFKA_VERSION = "2.2.0"
6
- LIBRDKAFKA_SOURCE_SHA256 = "af9a820cbecbc64115629471df7c7cecd40403b6c34bfdbb9223152677a47226"
4
+ VERSION = "0.15.0"
5
+ LIBRDKAFKA_VERSION = "2.3.0"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
7
7
  end
data/lib/rdkafka.rb CHANGED
@@ -11,8 +11,19 @@ require "rdkafka/abstract_handle"
11
11
  require "rdkafka/admin"
12
12
  require "rdkafka/admin/create_topic_handle"
13
13
  require "rdkafka/admin/create_topic_report"
14
+ require "rdkafka/admin/delete_groups_handle"
15
+ require "rdkafka/admin/delete_groups_report"
14
16
  require "rdkafka/admin/delete_topic_handle"
15
17
  require "rdkafka/admin/delete_topic_report"
18
+ require "rdkafka/admin/create_partitions_handle"
19
+ require "rdkafka/admin/create_partitions_report"
20
+ require "rdkafka/admin/create_acl_handle"
21
+ require "rdkafka/admin/create_acl_report"
22
+ require "rdkafka/admin/delete_acl_handle"
23
+ require "rdkafka/admin/delete_acl_report"
24
+ require "rdkafka/admin/describe_acl_handle"
25
+ require "rdkafka/admin/describe_acl_report"
26
+ require "rdkafka/admin/acl_binding_result"
16
27
  require "rdkafka/bindings"
17
28
  require "rdkafka/callbacks"
18
29
  require "rdkafka/config"
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Admin::CreateAclHandle do
6
+ # If create acl was successful there is no error object
7
+ # the error code is set to RD_KAFKA_RESP_ERR_NO_ERRORa
8
+ # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L169
9
+ let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR }
10
+
11
+ subject do
12
+ Rdkafka::Admin::CreateAclHandle.new.tap do |handle|
13
+ handle[:pending] = pending_handle
14
+ handle[:response] = response
15
+ # If create acl was successful there is no error object and the error_string is set to ""
16
+ # https://github.com/confluentinc/librdkafka/blob/1f9f245ac409f50f724695c628c7a0d54a763b9a/src/rdkafka_error.c#L178
17
+ handle[:response_string] = FFI::MemoryPointer.from_string("")
18
+ end
19
+ end
20
+
21
+ describe "#wait" do
22
+ let(:pending_handle) { true }
23
+
24
+ it "should wait until the timeout and then raise an error" do
25
+ expect {
26
+ subject.wait(max_wait_timeout: 0.1)
27
+ }.to raise_error Rdkafka::Admin::CreateAclHandle::WaitTimeoutError, /create acl/
28
+ end
29
+
30
+ context "when not pending anymore and no error" do
31
+ let(:pending_handle) { false }
32
+
33
+ it "should return a create acl report" do
34
+ report = subject.wait
35
+
36
+ expect(report.rdkafka_response_string).to eq("")
37
+ end
38
+
39
+ it "should wait without a timeout" do
40
+ report = subject.wait(max_wait_timeout: nil)
41
+
42
+ expect(report.rdkafka_response_string).to eq("")
43
+ end
44
+ end
45
+ end
46
+
47
+ describe "#raise_error" do
48
+ let(:pending_handle) { false }
49
+
50
+ it "should raise the appropriate error" do
51
+ expect {
52
+ subject.raise_error
53
+ }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/)
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Admin::CreateAclReport do
6
+ subject { Rdkafka::Admin::CreateAclReport.new(
7
+ rdkafka_response: Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR,
8
+ rdkafka_response_string: FFI::MemoryPointer.from_string("")
9
+ )}
10
+
11
+ it "should get RD_KAFKA_RESP_ERR_NO_ERROR " do
12
+ expect(subject.rdkafka_response).to eq(0)
13
+ end
14
+
15
+ it "should get empty string" do
16
+ expect(subject.rdkafka_response_string).to eq("")
17
+ end
18
+ end
@@ -0,0 +1,85 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Admin::DeleteAclHandle do
6
+ let(:response) { Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR }
7
+ let(:resource_name) {"acl-test-topic"}
8
+ let(:resource_type) {Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC}
9
+ let(:resource_pattern_type) {Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL}
10
+ let(:principal) {"User:anonymous"}
11
+ let(:host) {"*"}
12
+ let(:operation) {Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ}
13
+ let(:permission_type) {Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW}
14
+ let(:delete_acl_ptr) {FFI::Pointer::NULL}
15
+
16
+ subject do
17
+ error_buffer = FFI::MemoryPointer.from_string(" " * 256)
18
+ delete_acl_ptr = Rdkafka::Bindings.rd_kafka_AclBinding_new(
19
+ resource_type,
20
+ FFI::MemoryPointer.from_string(resource_name),
21
+ resource_pattern_type,
22
+ FFI::MemoryPointer.from_string(principal),
23
+ FFI::MemoryPointer.from_string(host),
24
+ operation,
25
+ permission_type,
26
+ error_buffer,
27
+ 256
28
+ )
29
+ if delete_acl_ptr.null?
30
+ raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
31
+ end
32
+ pointer_array = [delete_acl_ptr]
33
+ delete_acls_array_ptr = FFI::MemoryPointer.new(:pointer)
34
+ delete_acls_array_ptr.write_array_of_pointer(pointer_array)
35
+ Rdkafka::Admin::DeleteAclHandle.new.tap do |handle|
36
+ handle[:pending] = pending_handle
37
+ handle[:response] = response
38
+ handle[:response_string] = FFI::MemoryPointer.from_string("")
39
+ handle[:matching_acls] = delete_acls_array_ptr
40
+ handle[:matching_acls_count] = 1
41
+ end
42
+ end
43
+
44
+ after do
45
+ if delete_acl_ptr != FFI::Pointer::NULL
46
+ Rdkafka::Bindings.rd_kafka_AclBinding_destroy(delete_acl_ptr)
47
+ end
48
+ end
49
+
50
+ describe "#wait" do
51
+ let(:pending_handle) { true }
52
+
53
+ it "should wait until the timeout and then raise an error" do
54
+ expect {
55
+ subject.wait(max_wait_timeout: 0.1)
56
+ }.to raise_error Rdkafka::Admin::DeleteAclHandle::WaitTimeoutError, /delete acl/
57
+ end
58
+
59
+ context "when not pending anymore and no error" do
60
+ let(:pending_handle) { false }
61
+
62
+ it "should return a delete acl report" do
63
+ report = subject.wait
64
+
65
+ expect(report.deleted_acls.length).to eq(1)
66
+ end
67
+
68
+ it "should wait without a timeout" do
69
+ report = subject.wait(max_wait_timeout: nil)
70
+
71
+ expect(report.deleted_acls[0].matching_acl_resource_name).to eq(resource_name)
72
+ end
73
+ end
74
+ end
75
+
76
+ describe "#raise_error" do
77
+ let(:pending_handle) { false }
78
+
79
+ it "should raise the appropriate error" do
80
+ expect {
81
+ subject.raise_error
82
+ }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/)
83
+ end
84
+ end
85
+ end