rdkafka 0.8.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/.rspec +1 -0
  3. data/.semaphore/semaphore.yml +23 -0
  4. data/CHANGELOG.md +24 -1
  5. data/Guardfile +19 -0
  6. data/README.md +8 -3
  7. data/bin/console +11 -0
  8. data/docker-compose.yml +5 -3
  9. data/ext/README.md +8 -1
  10. data/ext/Rakefile +5 -20
  11. data/lib/rdkafka/abstract_handle.rb +82 -0
  12. data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
  13. data/lib/rdkafka/admin/create_topic_report.rb +22 -0
  14. data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
  15. data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
  16. data/lib/rdkafka/admin.rb +155 -0
  17. data/lib/rdkafka/bindings.rb +57 -18
  18. data/lib/rdkafka/callbacks.rb +106 -0
  19. data/lib/rdkafka/config.rb +59 -3
  20. data/lib/rdkafka/consumer.rb +125 -5
  21. data/lib/rdkafka/error.rb +29 -3
  22. data/lib/rdkafka/metadata.rb +6 -5
  23. data/lib/rdkafka/producer/delivery_handle.rb +7 -53
  24. data/lib/rdkafka/producer/delivery_report.rb +1 -1
  25. data/lib/rdkafka/producer.rb +27 -12
  26. data/lib/rdkafka/version.rb +3 -3
  27. data/lib/rdkafka.rb +7 -0
  28. data/rdkafka.gemspec +9 -7
  29. data/spec/rdkafka/abstract_handle_spec.rb +113 -0
  30. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  31. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  32. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  33. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  34. data/spec/rdkafka/admin_spec.rb +203 -0
  35. data/spec/rdkafka/bindings_spec.rb +32 -8
  36. data/spec/rdkafka/callbacks_spec.rb +20 -0
  37. data/spec/rdkafka/config_spec.rb +78 -9
  38. data/spec/rdkafka/consumer_spec.rb +326 -42
  39. data/spec/rdkafka/error_spec.rb +4 -0
  40. data/spec/rdkafka/metadata_spec.rb +78 -0
  41. data/spec/rdkafka/producer/delivery_handle_spec.rb +1 -41
  42. data/spec/rdkafka/producer_spec.rb +102 -34
  43. data/spec/spec_helper.rb +78 -20
  44. metadata +84 -29
  45. data/.travis.yml +0 -48
@@ -0,0 +1,106 @@
1
+ module Rdkafka
2
+ module Callbacks
3
+
4
+ # Extracts attributes of a rd_kafka_topic_result_t
5
+ #
6
+ # @private
7
+ class TopicResult
8
+ attr_reader :result_error, :error_string, :result_name
9
+
10
+ def initialize(topic_result_pointer)
11
+ @result_error = Rdkafka::Bindings.rd_kafka_topic_result_error(topic_result_pointer)
12
+ @error_string = Rdkafka::Bindings.rd_kafka_topic_result_error_string(topic_result_pointer)
13
+ @result_name = Rdkafka::Bindings.rd_kafka_topic_result_name(topic_result_pointer)
14
+ end
15
+
16
+ def self.create_topic_results_from_array(count, array_pointer)
17
+ (1..count).map do |index|
18
+ result_pointer = (array_pointer + (index - 1)).read_pointer
19
+ new(result_pointer)
20
+ end
21
+ end
22
+ end
23
+
24
+ # FFI Function used for Create Topic and Delete Topic callbacks
25
+ BackgroundEventCallbackFunction = FFI::Function.new(
26
+ :void, [:pointer, :pointer, :pointer]
27
+ ) do |client_ptr, event_ptr, opaque_ptr|
28
+ BackgroundEventCallback.call(client_ptr, event_ptr, opaque_ptr)
29
+ end
30
+
31
+ # @private
32
+ class BackgroundEventCallback
33
+ def self.call(_, event_ptr, _)
34
+ event_type = Rdkafka::Bindings.rd_kafka_event_type(event_ptr)
35
+ if event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATETOPICS_RESULT
36
+ process_create_topic(event_ptr)
37
+ elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
38
+ process_delete_topic(event_ptr)
39
+ end
40
+ end
41
+
42
+ private
43
+
44
+ def self.process_create_topic(event_ptr)
45
+ create_topics_result = Rdkafka::Bindings.rd_kafka_event_CreateTopics_result(event_ptr)
46
+
47
+ # Get the number of create topic results
48
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
49
+ create_topic_result_array = Rdkafka::Bindings.rd_kafka_CreateTopics_result_topics(create_topics_result, pointer_to_size_t)
50
+ create_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, create_topic_result_array)
51
+ create_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
52
+
53
+ if create_topic_handle = Rdkafka::Admin::CreateTopicHandle.remove(create_topic_handle_ptr.address)
54
+ create_topic_handle[:response] = create_topic_results[0].result_error
55
+ create_topic_handle[:error_string] = create_topic_results[0].error_string
56
+ create_topic_handle[:result_name] = create_topic_results[0].result_name
57
+ create_topic_handle[:pending] = false
58
+ end
59
+ end
60
+
61
+ def self.process_delete_topic(event_ptr)
62
+ delete_topics_result = Rdkafka::Bindings.rd_kafka_event_DeleteTopics_result(event_ptr)
63
+
64
+ # Get the number of topic results
65
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
66
+ delete_topic_result_array = Rdkafka::Bindings.rd_kafka_DeleteTopics_result_topics(delete_topics_result, pointer_to_size_t)
67
+ delete_topic_results = TopicResult.create_topic_results_from_array(pointer_to_size_t.read_int, delete_topic_result_array)
68
+ delete_topic_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
69
+
70
+ if delete_topic_handle = Rdkafka::Admin::DeleteTopicHandle.remove(delete_topic_handle_ptr.address)
71
+ delete_topic_handle[:response] = delete_topic_results[0].result_error
72
+ delete_topic_handle[:error_string] = delete_topic_results[0].error_string
73
+ delete_topic_handle[:result_name] = delete_topic_results[0].result_name
74
+ delete_topic_handle[:pending] = false
75
+ end
76
+ end
77
+ end
78
+
79
+ # FFI Function used for Message Delivery callbacks
80
+
81
+ DeliveryCallbackFunction = FFI::Function.new(
82
+ :void, [:pointer, :pointer, :pointer]
83
+ ) do |client_ptr, message_ptr, opaque_ptr|
84
+ DeliveryCallback.call(client_ptr, message_ptr, opaque_ptr)
85
+ end
86
+
87
+ # @private
88
+ class DeliveryCallback
89
+ def self.call(_, message_ptr, opaque_ptr)
90
+ message = Rdkafka::Bindings::Message.new(message_ptr)
91
+ delivery_handle_ptr_address = message[:_private].address
92
+ if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
93
+ # Update delivery handle
94
+ delivery_handle[:response] = message[:err]
95
+ delivery_handle[:partition] = message[:partition]
96
+ delivery_handle[:offset] = message[:offset]
97
+ delivery_handle[:pending] = false
98
+ # Call delivery callback on opaque
99
+ if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
100
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
106
+ end
@@ -10,7 +10,18 @@ module Rdkafka
10
10
  # @private
11
11
  @@statistics_callback = nil
12
12
  # @private
13
+ @@error_callback = nil
14
+ # @private
13
15
  @@opaques = {}
16
+ # @private
17
+ @@log_queue = Queue.new
18
+
19
+ Thread.start do
20
+ loop do
21
+ severity, msg = @@log_queue.pop
22
+ @@logger.add(severity, msg)
23
+ end
24
+ end
14
25
 
15
26
  # Returns the current logger, by default this is a logger to stdout.
16
27
  #
@@ -19,6 +30,16 @@ module Rdkafka
19
30
  @@logger
20
31
  end
21
32
 
33
+
34
+ # Returns a queue whose contents will be passed to the configured logger. Each entry
35
+ # should follow the format [Logger::Severity, String]. The benefit over calling the
36
+ # logger directly is that this is safe to use from trap contexts.
37
+ #
38
+ # @return [Queue]
39
+ def self.log_queue
40
+ @@log_queue
41
+ end
42
+
22
43
  # Set the logger that will be used for all logging output by this library.
23
44
  #
24
45
  # @param logger [Logger] The logger to be used
@@ -33,11 +54,11 @@ module Rdkafka
33
54
  # You can configure if and how often this happens using `statistics.interval.ms`.
34
55
  # The callback is called with a hash that's documented here: https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
35
56
  #
36
- # @param callback [Proc] The callback
57
+ # @param callback [Proc, #call] The callback
37
58
  #
38
59
  # @return [nil]
39
60
  def self.statistics_callback=(callback)
40
- raise TypeError.new("Callback has to be a proc or lambda") unless callback.is_a? Proc
61
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
41
62
  @@statistics_callback = callback
42
63
  end
43
64
 
@@ -48,6 +69,25 @@ module Rdkafka
48
69
  @@statistics_callback
49
70
  end
50
71
 
72
+ # Set a callback that will be called every time the underlying client emits an error.
73
+ # If this callback is not set, global errors such as brokers becoming unavailable will only be sent to the logger, as defined by librdkafka.
74
+ # The callback is called with an instance of RdKafka::Error.
75
+ #
76
+ # @param callback [Proc, #call] The callback
77
+ #
78
+ # @return [nil]
79
+ def self.error_callback=(callback)
80
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
81
+ @@error_callback = callback
82
+ end
83
+
84
+ # Returns the current error callback, by default this is nil.
85
+ #
86
+ # @return [Proc, nil]
87
+ def self.error_callback
88
+ @@error_callback
89
+ end
90
+
51
91
  # @private
52
92
  def self.opaques
53
93
  @@opaques
@@ -137,13 +177,26 @@ module Rdkafka
137
177
  # Create Kafka config
138
178
  config = native_config(opaque)
139
179
  # Set callback to receive delivery reports on config
140
- Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Bindings::DeliveryCallback)
180
+ Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
141
181
  # Return producer with Kafka client
142
182
  Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
143
183
  opaque.producer = producer
144
184
  end
145
185
  end
146
186
 
187
+ # Create an admin instance with this configuration.
188
+ #
189
+ # @raise [ConfigError] When the configuration contains invalid options
190
+ # @raise [ClientCreationError] When the native client cannot be created
191
+ #
192
+ # @return [Admin] The created admin instance
193
+ def admin
194
+ opaque = Opaque.new
195
+ config = native_config(opaque)
196
+ Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
197
+ Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
198
+ end
199
+
147
200
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
148
201
  class ConfigError < RuntimeError; end
149
202
 
@@ -190,6 +243,9 @@ module Rdkafka
190
243
 
191
244
  # Set stats callback
192
245
  Rdkafka::Bindings.rd_kafka_conf_set_stats_cb(config, Rdkafka::Bindings::StatsCallback)
246
+
247
+ # Set error callback
248
+ Rdkafka::Bindings.rd_kafka_conf_set_error_cb(config, Rdkafka::Bindings::ErrorCallback)
193
249
  end
194
250
  end
195
251
 
@@ -36,6 +36,8 @@ module Rdkafka
36
36
  #
37
37
  # @return [nil]
38
38
  def subscribe(*topics)
39
+ closed_consumer_check(__method__)
40
+
39
41
  # Create topic partition list with topics and no partition set
40
42
  tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
41
43
 
@@ -49,7 +51,7 @@ module Rdkafka
49
51
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
50
52
  end
51
53
  ensure
52
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
54
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
53
55
  end
54
56
 
55
57
  # Unsubscribe from all subscribed topics.
@@ -58,6 +60,8 @@ module Rdkafka
58
60
  #
59
61
  # @return [nil]
60
62
  def unsubscribe
63
+ closed_consumer_check(__method__)
64
+
61
65
  response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
62
66
  if response != 0
63
67
  raise Rdkafka::RdkafkaError.new(response)
@@ -72,6 +76,8 @@ module Rdkafka
72
76
  #
73
77
  # @return [nil]
74
78
  def pause(list)
79
+ closed_consumer_check(__method__)
80
+
75
81
  unless list.is_a?(TopicPartitionList)
76
82
  raise TypeError.new("list has to be a TopicPartitionList")
77
83
  end
@@ -98,6 +104,8 @@ module Rdkafka
98
104
  #
99
105
  # @return [nil]
100
106
  def resume(list)
107
+ closed_consumer_check(__method__)
108
+
101
109
  unless list.is_a?(TopicPartitionList)
102
110
  raise TypeError.new("list has to be a TopicPartitionList")
103
111
  end
@@ -120,6 +128,8 @@ module Rdkafka
120
128
  #
121
129
  # @return [TopicPartitionList]
122
130
  def subscription
131
+ closed_consumer_check(__method__)
132
+
123
133
  ptr = FFI::MemoryPointer.new(:pointer)
124
134
  response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
125
135
 
@@ -142,6 +152,8 @@ module Rdkafka
142
152
  #
143
153
  # @raise [RdkafkaError] When assigning fails
144
154
  def assign(list)
155
+ closed_consumer_check(__method__)
156
+
145
157
  unless list.is_a?(TopicPartitionList)
146
158
  raise TypeError.new("list has to be a TopicPartitionList")
147
159
  end
@@ -164,6 +176,8 @@ module Rdkafka
164
176
  #
165
177
  # @return [TopicPartitionList]
166
178
  def assignment
179
+ closed_consumer_check(__method__)
180
+
167
181
  ptr = FFI::MemoryPointer.new(:pointer)
168
182
  response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
169
183
  if response != 0
@@ -180,7 +194,7 @@ module Rdkafka
180
194
  end
181
195
  end
182
196
  ensure
183
- ptr.free
197
+ ptr.free unless ptr.nil?
184
198
  end
185
199
 
186
200
  # Return the current committed offset per partition for this consumer group.
@@ -193,6 +207,8 @@ module Rdkafka
193
207
  #
194
208
  # @return [TopicPartitionList]
195
209
  def committed(list=nil, timeout_ms=1200)
210
+ closed_consumer_check(__method__)
211
+
196
212
  if list.nil?
197
213
  list = assignment
198
214
  elsif !list.is_a?(TopicPartitionList)
@@ -222,6 +238,8 @@ module Rdkafka
222
238
  #
223
239
  # @return [Integer] The low and high watermark
224
240
  def query_watermark_offsets(topic, partition, timeout_ms=200)
241
+ closed_consumer_check(__method__)
242
+
225
243
  low = FFI::MemoryPointer.new(:int64, 1)
226
244
  high = FFI::MemoryPointer.new(:int64, 1)
227
245
 
@@ -239,8 +257,8 @@ module Rdkafka
239
257
 
240
258
  return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
241
259
  ensure
242
- low.free
243
- high.free
260
+ low.free unless low.nil?
261
+ high.free unless high.nil?
244
262
  end
245
263
 
246
264
  # Calculate the consumer lag per partition for the provided topic partition list.
@@ -279,6 +297,7 @@ module Rdkafka
279
297
  #
280
298
  # @return [String, nil]
281
299
  def cluster_id
300
+ closed_consumer_check(__method__)
282
301
  Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
283
302
  end
284
303
 
@@ -288,6 +307,7 @@ module Rdkafka
288
307
  #
289
308
  # @return [String, nil]
290
309
  def member_id
310
+ closed_consumer_check(__method__)
291
311
  Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
292
312
  end
293
313
 
@@ -301,6 +321,8 @@ module Rdkafka
301
321
  #
302
322
  # @return [nil]
303
323
  def store_offset(message)
324
+ closed_consumer_check(__method__)
325
+
304
326
  # rd_kafka_offset_store is one of the few calls that does not support
305
327
  # a string as the topic, so create a native topic for it.
306
328
  native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
@@ -331,6 +353,8 @@ module Rdkafka
331
353
  #
332
354
  # @return [nil]
333
355
  def seek(message)
356
+ closed_consumer_check(__method__)
357
+
334
358
  # rd_kafka_offset_store is one of the few calls that does not support
335
359
  # a string as the topic, so create a native topic for it.
336
360
  native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
@@ -369,6 +393,8 @@ module Rdkafka
369
393
  #
370
394
  # @return [nil]
371
395
  def commit(list=nil, async=false)
396
+ closed_consumer_check(__method__)
397
+
372
398
  if !list.nil? && !list.is_a?(TopicPartitionList)
373
399
  raise TypeError.new("list has to be nil or a TopicPartitionList")
374
400
  end
@@ -393,7 +419,7 @@ module Rdkafka
393
419
  #
394
420
  # @return [Message, nil] A message or nil if there was no new message within the timeout
395
421
  def poll(timeout_ms)
396
- return unless @native_kafka
422
+ closed_consumer_check(__method__)
397
423
 
398
424
  message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
399
425
  if message_ptr.null?
@@ -441,5 +467,99 @@ module Rdkafka
441
467
  end
442
468
  end
443
469
  end
470
+
471
+ def closed_consumer_check(method)
472
+ raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
+ end
474
+
475
+ # Poll for new messages and yield them in batches that may contain
476
+ # messages from more than one partition.
477
+ #
478
+ # Rather than yield each message immediately as soon as it is received,
479
+ # each_batch will attempt to wait for as long as `timeout_ms` in order
480
+ # to create a batch of up to but no more than `max_items` in size.
481
+ #
482
+ # Said differently, if more than `max_items` are available within
483
+ # `timeout_ms`, then `each_batch` will yield early with `max_items` in the
484
+ # array, but if `timeout_ms` passes by with fewer messages arriving, it
485
+ # will yield an array of fewer messages, quite possibly zero.
486
+ #
487
+ # In order to prevent wrongly auto committing many messages at once across
488
+ # possibly many partitions, callers must explicitly indicate which messages
489
+ # have been successfully processed as some consumed messages may not have
490
+ # been yielded yet. To do this, the caller should set
491
+ # `enable.auto.offset.store` to false and pass processed messages to
492
+ # {store_offset}. It is also possible, though more complex, to set
493
+ # 'enable.auto.commit' to false and then pass a manually assembled
494
+ # TopicPartitionList to {commit}.
495
+ #
496
+ # As with `each`, iteration will end when the consumer is closed.
497
+ #
498
+ # Exception behavior is more complicated than with `each`, in that if
499
+ # :yield_on_error is true, and an exception is raised during the
500
+ # poll, and messages have already been received, they will be yielded to
501
+ # the caller before the exception is allowed to propagate.
502
+ #
503
+ # If you are setting either auto.commit or auto.offset.store to false in
504
+ # the consumer configuration, then you should let yield_on_error keep its
505
+ # default value of false because you are guaranteed to see these messages
506
+ # again. However, if both auto.commit and auto.offset.store are set to
507
+ # true, you should set yield_on_error to true so you can process messages
508
+ # that you may or may not see again.
509
+ #
510
+ # @param max_items [Integer] Maximum size of the yielded array of messages
511
+ #
512
+ # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
513
+ #
514
+ # @param timeout_ms [Integer] max time to wait for up to max_items
515
+ #
516
+ # @raise [RdkafkaError] When polling fails
517
+ #
518
+ # @yield [messages, pending_exception]
519
+ # @yieldparam messages [Array] An array of received Message
520
+ # @yieldparam pending_exception [Exception] normally nil, or an exception
521
+ # which will be propagated after processing of the partial batch is complete.
522
+ #
523
+ # @return [nil]
524
+ def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
525
+ closed_consumer_check(__method__)
526
+ slice = []
527
+ bytes = 0
528
+ end_time = monotonic_now + timeout_ms / 1000.0
529
+ loop do
530
+ break if @closing
531
+ max_wait = end_time - monotonic_now
532
+ max_wait_ms = if max_wait <= 0
533
+ 0 # should not block, but may retrieve a message
534
+ else
535
+ (max_wait * 1000).floor
536
+ end
537
+ message = nil
538
+ begin
539
+ message = poll max_wait_ms
540
+ rescue Rdkafka::RdkafkaError => error
541
+ raise unless yield_on_error
542
+ raise if slice.empty?
543
+ yield slice.dup, error
544
+ raise
545
+ end
546
+ if message
547
+ slice << message
548
+ bytes += message.payload.bytesize
549
+ end
550
+ if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
+ yield slice.dup, nil
552
+ slice.clear
553
+ bytes = 0
554
+ end_time = monotonic_now + timeout_ms / 1000.0
555
+ end
556
+ end
557
+ end
558
+
559
+ private
560
+ def monotonic_now
561
+ # needed because Time.now can go backwards
562
+ Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
+ end
444
564
  end
445
565
  end
data/lib/rdkafka/error.rb CHANGED
@@ -1,15 +1,27 @@
1
1
  module Rdkafka
2
+ # Base error class.
3
+ class BaseError < RuntimeError; end
4
+
2
5
  # Error returned by the underlying rdkafka library.
3
- class RdkafkaError < RuntimeError
6
+ class RdkafkaError < BaseError
4
7
  # The underlying raw error response
5
8
  # @return [Integer]
6
- attr_reader :rdkafka_response, :message_prefix
9
+ attr_reader :rdkafka_response
10
+
11
+ # Prefix to be used for human readable representation
12
+ # @return [String]
13
+ attr_reader :message_prefix
14
+
15
+ # Error message sent by the broker
16
+ # @return [String]
17
+ attr_reader :broker_message
7
18
 
8
19
  # @private
9
- def initialize(response, message_prefix=nil)
20
+ def initialize(response, message_prefix=nil, broker_message: nil)
10
21
  raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
11
22
  @rdkafka_response = response
12
23
  @message_prefix = message_prefix
24
+ @broker_message = broker_message
13
25
  end
14
26
 
15
27
  # This error's code, for example `:partition_eof`, `:msg_size_too_large`.
@@ -57,4 +69,18 @@ module Rdkafka
57
69
  @topic_partition_list = topic_partition_list
58
70
  end
59
71
  end
72
+
73
+ # Error class for public consumer method calls on a closed consumer.
74
+ class ClosedConsumerError < BaseError
75
+ def initialize(method)
76
+ super("Illegal call to #{method.to_s} on a closed consumer")
77
+ end
78
+ end
79
+
80
+ # Error class for public producer method calls on a closed producer.
81
+ class ClosedProducerError < BaseError
82
+ def initialize(method)
83
+ super("Illegal call to #{method.to_s} on a closed producer")
84
+ end
85
+ end
60
86
  end
@@ -9,14 +9,15 @@ module Rdkafka
9
9
 
10
10
  ptr = FFI::MemoryPointer.new(:pointer)
11
11
 
12
- # Retrieve metadata flag is 0/1 for single/multiple topics.
13
- topic_flag = topic_name ? 1 : 0
12
+ # If topic_flag is 1, we request info about *all* topics in the cluster. If topic_flag is 0,
13
+ # we only request info about locally known topics (or a single topic if one is passed in).
14
+ topic_flag = topic_name.nil? ? 1 : 0
14
15
 
15
16
  # Retrieve the Metadata
16
17
  result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
17
18
 
18
19
  # Error Handling
19
- Rdkafka::Error.new(result) unless result.zero?
20
+ raise Rdkafka::RdkafkaError.new(result) unless result.zero?
20
21
 
21
22
  metadata_from_native(ptr.read_pointer)
22
23
  ensure
@@ -34,11 +35,11 @@ module Rdkafka
34
35
 
35
36
  @topics = Array.new(metadata[:topics_count]) do |i|
36
37
  topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
37
- Rdkafka::Error.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
38
+ raise Rdkafka::RdkafkaError.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
38
39
 
39
40
  partitions = Array.new(topic[:partition_count]) do |j|
40
41
  partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
41
- Rdkafka::Error.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
42
+ raise Rdkafka::RdkafkaError.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
42
43
  partition.to_h
43
44
  end
44
45
  topic.to_h.merge!(partitions: partitions)
@@ -2,67 +2,21 @@ module Rdkafka
2
2
  class Producer
3
3
  # Handle to wait for a delivery report which is returned when
4
4
  # producing a message.
5
- class DeliveryHandle < FFI::Struct
5
+ class DeliveryHandle < Rdkafka::AbstractHandle
6
6
  layout :pending, :bool,
7
7
  :response, :int,
8
8
  :partition, :int,
9
9
  :offset, :int64
10
10
 
11
- REGISTRY = {}
12
-
13
- CURRENT_TIME = -> { Process.clock_gettime(Process::CLOCK_MONOTONIC) }.freeze
14
-
15
- private_constant :CURRENT_TIME
16
-
17
- def self.register(address, handle)
18
- REGISTRY[address] = handle
19
- end
20
-
21
- def self.remove(address)
22
- REGISTRY.delete(address)
23
- end
24
-
25
- # Whether the delivery handle is still pending.
26
- #
27
- # @return [Boolean]
28
- def pending?
29
- self[:pending]
11
+ # @return [String] the name of the operation (e.g. "delivery")
12
+ def operation_name
13
+ "delivery"
30
14
  end
31
15
 
32
- # Wait for the delivery report or raise an error if this takes longer than the timeout.
33
- # If there is a timeout this does not mean the message is not delivered, rdkafka might still be working on delivering the message.
34
- # In this case it is possible to call wait again.
35
- #
36
- # @param max_wait_timeout [Numeric, nil] Amount of time to wait before timing out. If this is nil it does not time out.
37
- # @param wait_timeout [Numeric] Amount of time we should wait before we recheck if there is a delivery report available
38
- #
39
- # @raise [RdkafkaError] When delivering the message failed
40
- # @raise [WaitTimeoutError] When the timeout has been reached and the handle is still pending
41
- #
42
- # @return [DeliveryReport]
43
- def wait(max_wait_timeout: 60, wait_timeout: 0.1)
44
- timeout = if max_wait_timeout
45
- CURRENT_TIME.call + max_wait_timeout
46
- else
47
- nil
48
- end
49
- loop do
50
- if pending?
51
- if timeout && timeout <= CURRENT_TIME.call
52
- raise WaitTimeoutError.new("Waiting for delivery timed out after #{max_wait_timeout} seconds")
53
- end
54
- sleep wait_timeout
55
- elsif self[:response] != 0
56
- raise RdkafkaError.new(self[:response])
57
- else
58
- return DeliveryReport.new(self[:partition], self[:offset])
59
- end
60
- end
16
+ # @return [DeliveryReport] a report on the delivery of the message
17
+ def create_result
18
+ DeliveryReport.new(self[:partition], self[:offset])
61
19
  end
62
-
63
- # Error that is raised when waiting for a delivery handle to complete
64
- # takes longer than the specified timeout.
65
- class WaitTimeoutError < RuntimeError; end
66
20
  end
67
21
  end
68
22
  end
@@ -11,7 +11,7 @@ module Rdkafka
11
11
  attr_reader :offset
12
12
 
13
13
  # Error in case happen during produce.
14
- # @return [string]
14
+ # @return [String]
15
15
  attr_reader :error
16
16
 
17
17
  private