karafka-rdkafka 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +7 -0
  2. checksums.yaml.gz.sig +2 -0
  3. data/.gitignore +8 -0
  4. data/.rspec +1 -0
  5. data/.semaphore/semaphore.yml +23 -0
  6. data/.yardopts +2 -0
  7. data/CHANGELOG.md +104 -0
  8. data/Gemfile +3 -0
  9. data/Guardfile +19 -0
  10. data/LICENSE +21 -0
  11. data/README.md +114 -0
  12. data/Rakefile +96 -0
  13. data/bin/console +11 -0
  14. data/docker-compose.yml +24 -0
  15. data/ext/README.md +18 -0
  16. data/ext/Rakefile +62 -0
  17. data/lib/rdkafka/abstract_handle.rb +82 -0
  18. data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
  19. data/lib/rdkafka/admin/create_topic_report.rb +22 -0
  20. data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
  21. data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
  22. data/lib/rdkafka/admin.rb +155 -0
  23. data/lib/rdkafka/bindings.rb +312 -0
  24. data/lib/rdkafka/callbacks.rb +106 -0
  25. data/lib/rdkafka/config.rb +299 -0
  26. data/lib/rdkafka/consumer/headers.rb +63 -0
  27. data/lib/rdkafka/consumer/message.rb +84 -0
  28. data/lib/rdkafka/consumer/partition.rb +49 -0
  29. data/lib/rdkafka/consumer/topic_partition_list.rb +164 -0
  30. data/lib/rdkafka/consumer.rb +565 -0
  31. data/lib/rdkafka/error.rb +86 -0
  32. data/lib/rdkafka/metadata.rb +92 -0
  33. data/lib/rdkafka/producer/client.rb +47 -0
  34. data/lib/rdkafka/producer/delivery_handle.rb +22 -0
  35. data/lib/rdkafka/producer/delivery_report.rb +26 -0
  36. data/lib/rdkafka/producer.rb +178 -0
  37. data/lib/rdkafka/version.rb +5 -0
  38. data/lib/rdkafka.rb +22 -0
  39. data/rdkafka.gemspec +36 -0
  40. data/spec/rdkafka/abstract_handle_spec.rb +113 -0
  41. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  42. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  43. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  44. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  45. data/spec/rdkafka/admin_spec.rb +203 -0
  46. data/spec/rdkafka/bindings_spec.rb +134 -0
  47. data/spec/rdkafka/callbacks_spec.rb +20 -0
  48. data/spec/rdkafka/config_spec.rb +182 -0
  49. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  50. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  51. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +223 -0
  52. data/spec/rdkafka/consumer_spec.rb +1008 -0
  53. data/spec/rdkafka/error_spec.rb +89 -0
  54. data/spec/rdkafka/metadata_spec.rb +78 -0
  55. data/spec/rdkafka/producer/client_spec.rb +145 -0
  56. data/spec/rdkafka/producer/delivery_handle_spec.rb +42 -0
  57. data/spec/rdkafka/producer/delivery_report_spec.rb +17 -0
  58. data/spec/rdkafka/producer_spec.rb +525 -0
  59. data/spec/spec_helper.rb +139 -0
  60. data.tar.gz.sig +0 -0
  61. metadata +277 -0
  62. metadata.gz.sig +0 -0
@@ -0,0 +1,565 @@
1
+ module Rdkafka
2
+ # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
3
+ # brokers automatically assign partitions and load balance partitions over consumers that
4
+ # have the same `:"group.id"` set in their configuration.
5
+ #
6
+ # To create a consumer set up a {Config} and call {Config#consumer consumer} on that. It is
7
+ # mandatory to set `:"group.id"` in the configuration.
8
+ #
9
+ # Consumer implements `Enumerable`, so you can use `each` to consume messages, or for example
10
+ # `each_slice` to consume batches of messages.
11
+ class Consumer
12
+ include Enumerable
13
+
14
+ # @private
15
+ def initialize(native_kafka)
16
+ @native_kafka = native_kafka
17
+ @closing = false
18
+ end
19
+
20
+ # Close this consumer
21
+ # @return [nil]
22
+ def close
23
+ return unless @native_kafka
24
+
25
+ @closing = true
26
+ Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
+ Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
+ @native_kafka = nil
29
+ end
30
+
31
+ # Subscribe to one or more topics letting Kafka handle partition assignments.
32
+ #
33
+ # @param topics [Array<String>] One or more topic names
34
+ #
35
+ # @raise [RdkafkaError] When subscribing fails
36
+ #
37
+ # @return [nil]
38
+ def subscribe(*topics)
39
+ closed_consumer_check(__method__)
40
+
41
+ # Create topic partition list with topics and no partition set
42
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
43
+
44
+ topics.each do |topic|
45
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, -1)
46
+ end
47
+
48
+ # Subscribe to topic partition list and check this was successful
49
+ response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
50
+ if response != 0
51
+ raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
52
+ end
53
+ ensure
54
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
55
+ end
56
+
57
+ # Unsubscribe from all subscribed topics.
58
+ #
59
+ # @raise [RdkafkaError] When unsubscribing fails
60
+ #
61
+ # @return [nil]
62
+ def unsubscribe
63
+ closed_consumer_check(__method__)
64
+
65
+ response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
66
+ if response != 0
67
+ raise Rdkafka::RdkafkaError.new(response)
68
+ end
69
+ end
70
+
71
+ # Pause producing or consumption for the provided list of partitions
72
+ #
73
+ # @param list [TopicPartitionList] The topic with partitions to pause
74
+ #
75
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
76
+ #
77
+ # @return [nil]
78
+ def pause(list)
79
+ closed_consumer_check(__method__)
80
+
81
+ unless list.is_a?(TopicPartitionList)
82
+ raise TypeError.new("list has to be a TopicPartitionList")
83
+ end
84
+
85
+ tpl = list.to_native_tpl
86
+
87
+ begin
88
+ response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
89
+
90
+ if response != 0
91
+ list = TopicPartitionList.from_native_tpl(tpl)
92
+ raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
93
+ end
94
+ ensure
95
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
96
+ end
97
+ end
98
+
99
+ # Resume producing consumption for the provided list of partitions
100
+ #
101
+ # @param list [TopicPartitionList] The topic with partitions to pause
102
+ #
103
+ # @raise [RdkafkaError] When resume subscription fails.
104
+ #
105
+ # @return [nil]
106
+ def resume(list)
107
+ closed_consumer_check(__method__)
108
+
109
+ unless list.is_a?(TopicPartitionList)
110
+ raise TypeError.new("list has to be a TopicPartitionList")
111
+ end
112
+
113
+ tpl = list.to_native_tpl
114
+
115
+ begin
116
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
117
+ if response != 0
118
+ raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
+ end
120
+ ensure
121
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
122
+ end
123
+ end
124
+
125
+ # Return the current subscription to topics and partitions
126
+ #
127
+ # @raise [RdkafkaError] When getting the subscription fails.
128
+ #
129
+ # @return [TopicPartitionList]
130
+ def subscription
131
+ closed_consumer_check(__method__)
132
+
133
+ ptr = FFI::MemoryPointer.new(:pointer)
134
+ response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
135
+
136
+ if response != 0
137
+ raise Rdkafka::RdkafkaError.new(response)
138
+ end
139
+
140
+ native = ptr.read_pointer
141
+
142
+ begin
143
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(native)
144
+ ensure
145
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native)
146
+ end
147
+ end
148
+
149
+ # Atomic assignment of partitions to consume
150
+ #
151
+ # @param list [TopicPartitionList] The topic with partitions to assign
152
+ #
153
+ # @raise [RdkafkaError] When assigning fails
154
+ def assign(list)
155
+ closed_consumer_check(__method__)
156
+
157
+ unless list.is_a?(TopicPartitionList)
158
+ raise TypeError.new("list has to be a TopicPartitionList")
159
+ end
160
+
161
+ tpl = list.to_native_tpl
162
+
163
+ begin
164
+ response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
165
+ if response != 0
166
+ raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
+ end
168
+ ensure
169
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
170
+ end
171
+ end
172
+
173
+ # Returns the current partition assignment.
174
+ #
175
+ # @raise [RdkafkaError] When getting the assignment fails.
176
+ #
177
+ # @return [TopicPartitionList]
178
+ def assignment
179
+ closed_consumer_check(__method__)
180
+
181
+ ptr = FFI::MemoryPointer.new(:pointer)
182
+ response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
183
+ if response != 0
184
+ raise Rdkafka::RdkafkaError.new(response)
185
+ end
186
+
187
+ tpl = ptr.read_pointer
188
+
189
+ if !tpl.null?
190
+ begin
191
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
192
+ ensure
193
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
194
+ end
195
+ end
196
+ ensure
197
+ ptr.free unless ptr.nil?
198
+ end
199
+
200
+ # Return the current committed offset per partition for this consumer group.
201
+ # The offset field of each requested partition will either be set to stored offset or to -1001 in case there was no stored offset for that partition.
202
+ #
203
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
204
+ # @param timeout_ms [Integer] The timeout for fetching this information.
205
+ #
206
+ # @raise [RdkafkaError] When getting the committed positions fails.
207
+ #
208
+ # @return [TopicPartitionList]
209
+ def committed(list=nil, timeout_ms=1200)
210
+ closed_consumer_check(__method__)
211
+
212
+ if list.nil?
213
+ list = assignment
214
+ elsif !list.is_a?(TopicPartitionList)
215
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
216
+ end
217
+
218
+ tpl = list.to_native_tpl
219
+
220
+ begin
221
+ response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
222
+ if response != 0
223
+ raise Rdkafka::RdkafkaError.new(response)
224
+ end
225
+ TopicPartitionList.from_native_tpl(tpl)
226
+ ensure
227
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
228
+ end
229
+ end
230
+
231
+ # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
232
+ #
233
+ # @param topic [String] The topic to query
234
+ # @param partition [Integer] The partition to query
235
+ # @param timeout_ms [Integer] The timeout for querying the broker
236
+ #
237
+ # @raise [RdkafkaError] When querying the broker fails.
238
+ #
239
+ # @return [Integer] The low and high watermark
240
+ def query_watermark_offsets(topic, partition, timeout_ms=200)
241
+ closed_consumer_check(__method__)
242
+
243
+ low = FFI::MemoryPointer.new(:int64, 1)
244
+ high = FFI::MemoryPointer.new(:int64, 1)
245
+
246
+ response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
247
+ @native_kafka,
248
+ topic,
249
+ partition,
250
+ low,
251
+ high,
252
+ timeout_ms,
253
+ )
254
+ if response != 0
255
+ raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
256
+ end
257
+
258
+ return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
259
+ ensure
260
+ low.free unless low.nil?
261
+ high.free unless high.nil?
262
+ end
263
+
264
+ # Calculate the consumer lag per partition for the provided topic partition list.
265
+ # You can get a suitable list by calling {committed} or {position} (TODO). It is also
266
+ # possible to create one yourself, in this case you have to provide a list that
267
+ # already contains all the partitions you need the lag for.
268
+ #
269
+ # @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
270
+ # @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
271
+ #
272
+ # @raise [RdkafkaError] When querying the broker fails.
273
+ #
274
+ # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
275
+ def lag(topic_partition_list, watermark_timeout_ms=100)
276
+ out = {}
277
+
278
+ topic_partition_list.to_h.each do |topic, partitions|
279
+ # Query high watermarks for this topic's partitions
280
+ # and compare to the offset in the list.
281
+ topic_out = {}
282
+ partitions.each do |p|
283
+ next if p.offset.nil?
284
+ low, high = query_watermark_offsets(
285
+ topic,
286
+ p.partition,
287
+ watermark_timeout_ms
288
+ )
289
+ topic_out[p.partition] = high - p.offset
290
+ end
291
+ out[topic] = topic_out
292
+ end
293
+ out
294
+ end
295
+
296
+ # Returns the ClusterId as reported in broker metadata.
297
+ #
298
+ # @return [String, nil]
299
+ def cluster_id
300
+ closed_consumer_check(__method__)
301
+ Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
302
+ end
303
+
304
+ # Returns this client's broker-assigned group member id
305
+ #
306
+ # This currently requires the high-level KafkaConsumer
307
+ #
308
+ # @return [String, nil]
309
+ def member_id
310
+ closed_consumer_check(__method__)
311
+ Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
312
+ end
313
+
314
+ # Store offset of a message to be used in the next commit of this consumer
315
+ #
316
+ # When using this `enable.auto.offset.store` should be set to `false` in the config.
317
+ #
318
+ # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
319
+ #
320
+ # @raise [RdkafkaError] When storing the offset fails
321
+ #
322
+ # @return [nil]
323
+ def store_offset(message)
324
+ closed_consumer_check(__method__)
325
+
326
+ # rd_kafka_offset_store is one of the few calls that does not support
327
+ # a string as the topic, so create a native topic for it.
328
+ native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
329
+ @native_kafka,
330
+ message.topic,
331
+ nil
332
+ )
333
+ response = Rdkafka::Bindings.rd_kafka_offset_store(
334
+ native_topic,
335
+ message.partition,
336
+ message.offset
337
+ )
338
+ if response != 0
339
+ raise Rdkafka::RdkafkaError.new(response)
340
+ end
341
+ ensure
342
+ if native_topic && !native_topic.null?
343
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
344
+ end
345
+ end
346
+
347
+ # Seek to a particular message. The next poll on the topic/partition will return the
348
+ # message at the given offset.
349
+ #
350
+ # @param message [Rdkafka::Consumer::Message] The message to which to seek
351
+ #
352
+ # @raise [RdkafkaError] When seeking fails
353
+ #
354
+ # @return [nil]
355
+ def seek(message)
356
+ closed_consumer_check(__method__)
357
+
358
+ # rd_kafka_offset_store is one of the few calls that does not support
359
+ # a string as the topic, so create a native topic for it.
360
+ native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
361
+ @native_kafka,
362
+ message.topic,
363
+ nil
364
+ )
365
+ response = Rdkafka::Bindings.rd_kafka_seek(
366
+ native_topic,
367
+ message.partition,
368
+ message.offset,
369
+ 0 # timeout
370
+ )
371
+ if response != 0
372
+ raise Rdkafka::RdkafkaError.new(response)
373
+ end
374
+ ensure
375
+ if native_topic && !native_topic.null?
376
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
377
+ end
378
+ end
379
+
380
+ # Manually commit the current offsets of this consumer.
381
+ #
382
+ # To use this set `enable.auto.commit`to `false` to disable automatic triggering
383
+ # of commits.
384
+ #
385
+ # If `enable.auto.offset.store` is set to `true` the offset of the last consumed
386
+ # message for every partition is used. If set to `false` you can use {store_offset} to
387
+ # indicate when a message has been fully processed.
388
+ #
389
+ # @param list [TopicPartitionList,nil] The topic with partitions to commit
390
+ # @param async [Boolean] Whether to commit async or wait for the commit to finish
391
+ #
392
+ # @raise [RdkafkaError] When committing fails
393
+ #
394
+ # @return [nil]
395
+ def commit(list=nil, async=false)
396
+ closed_consumer_check(__method__)
397
+
398
+ if !list.nil? && !list.is_a?(TopicPartitionList)
399
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
400
+ end
401
+
402
+ tpl = list ? list.to_native_tpl : nil
403
+
404
+ begin
405
+ response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
406
+ if response != 0
407
+ raise Rdkafka::RdkafkaError.new(response)
408
+ end
409
+ ensure
410
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
411
+ end
412
+ end
413
+
414
+ # Poll for the next message on one of the subscribed topics
415
+ #
416
+ # @param timeout_ms [Integer] Timeout of this poll
417
+ #
418
+ # @raise [RdkafkaError] When polling fails
419
+ #
420
+ # @return [Message, nil] A message or nil if there was no new message within the timeout
421
+ def poll(timeout_ms)
422
+ closed_consumer_check(__method__)
423
+
424
+ message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
425
+ if message_ptr.null?
426
+ nil
427
+ else
428
+ # Create struct wrapper
429
+ native_message = Rdkafka::Bindings::Message.new(message_ptr)
430
+ # Raise error if needed
431
+ if native_message[:err] != 0
432
+ raise Rdkafka::RdkafkaError.new(native_message[:err])
433
+ end
434
+ # Create a message to pass out
435
+ Rdkafka::Consumer::Message.new(native_message)
436
+ end
437
+ ensure
438
+ # Clean up rdkafka message if there is one
439
+ if !message_ptr.nil? && !message_ptr.null?
440
+ Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
441
+ end
442
+ end
443
+
444
+ # Poll for new messages and yield for each received one. Iteration
445
+ # will end when the consumer is closed.
446
+ #
447
+ # If `enable.partition.eof` is turned on in the config this will raise an
448
+ # error when an eof is reached, so you probably want to disable that when
449
+ # using this method of iteration.
450
+ #
451
+ # @raise [RdkafkaError] When polling fails
452
+ #
453
+ # @yieldparam message [Message] Received message
454
+ #
455
+ # @return [nil]
456
+ def each
457
+ loop do
458
+ message = poll(250)
459
+ if message
460
+ yield(message)
461
+ else
462
+ if @closing
463
+ break
464
+ else
465
+ next
466
+ end
467
+ end
468
+ end
469
+ end
470
+
471
+ def closed_consumer_check(method)
472
+ raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
+ end
474
+
475
+ # Poll for new messages and yield them in batches that may contain
476
+ # messages from more than one partition.
477
+ #
478
+ # Rather than yield each message immediately as soon as it is received,
479
+ # each_batch will attempt to wait for as long as `timeout_ms` in order
480
+ # to create a batch of up to but no more than `max_items` in size.
481
+ #
482
+ # Said differently, if more than `max_items` are available within
483
+ # `timeout_ms`, then `each_batch` will yield early with `max_items` in the
484
+ # array, but if `timeout_ms` passes by with fewer messages arriving, it
485
+ # will yield an array of fewer messages, quite possibly zero.
486
+ #
487
+ # In order to prevent wrongly auto committing many messages at once across
488
+ # possibly many partitions, callers must explicitly indicate which messages
489
+ # have been successfully processed as some consumed messages may not have
490
+ # been yielded yet. To do this, the caller should set
491
+ # `enable.auto.offset.store` to false and pass processed messages to
492
+ # {store_offset}. It is also possible, though more complex, to set
493
+ # 'enable.auto.commit' to false and then pass a manually assembled
494
+ # TopicPartitionList to {commit}.
495
+ #
496
+ # As with `each`, iteration will end when the consumer is closed.
497
+ #
498
+ # Exception behavior is more complicated than with `each`, in that if
499
+ # :yield_on_error is true, and an exception is raised during the
500
+ # poll, and messages have already been received, they will be yielded to
501
+ # the caller before the exception is allowed to propagate.
502
+ #
503
+ # If you are setting either auto.commit or auto.offset.store to false in
504
+ # the consumer configuration, then you should let yield_on_error keep its
505
+ # default value of false because you are guaranteed to see these messages
506
+ # again. However, if both auto.commit and auto.offset.store are set to
507
+ # true, you should set yield_on_error to true so you can process messages
508
+ # that you may or may not see again.
509
+ #
510
+ # @param max_items [Integer] Maximum size of the yielded array of messages
511
+ #
512
+ # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
513
+ #
514
+ # @param timeout_ms [Integer] max time to wait for up to max_items
515
+ #
516
+ # @raise [RdkafkaError] When polling fails
517
+ #
518
+ # @yield [messages, pending_exception]
519
+ # @yieldparam messages [Array] An array of received Message
520
+ # @yieldparam pending_exception [Exception] normally nil, or an exception
521
+ # which will be propagated after processing of the partial batch is complete.
522
+ #
523
+ # @return [nil]
524
+ def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
525
+ closed_consumer_check(__method__)
526
+ slice = []
527
+ bytes = 0
528
+ end_time = monotonic_now + timeout_ms / 1000.0
529
+ loop do
530
+ break if @closing
531
+ max_wait = end_time - monotonic_now
532
+ max_wait_ms = if max_wait <= 0
533
+ 0 # should not block, but may retrieve a message
534
+ else
535
+ (max_wait * 1000).floor
536
+ end
537
+ message = nil
538
+ begin
539
+ message = poll max_wait_ms
540
+ rescue Rdkafka::RdkafkaError => error
541
+ raise unless yield_on_error
542
+ raise if slice.empty?
543
+ yield slice.dup, error
544
+ raise
545
+ end
546
+ if message
547
+ slice << message
548
+ bytes += message.payload.bytesize
549
+ end
550
+ if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
+ yield slice.dup, nil
552
+ slice.clear
553
+ bytes = 0
554
+ end_time = monotonic_now + timeout_ms / 1000.0
555
+ end
556
+ end
557
+ end
558
+
559
+ private
560
+ def monotonic_now
561
+ # needed because Time.now can go backwards
562
+ Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
+ end
564
+ end
565
+ end
@@ -0,0 +1,86 @@
1
+ module Rdkafka
2
+ # Base error class.
3
+ class BaseError < RuntimeError; end
4
+
5
+ # Error returned by the underlying rdkafka library.
6
+ class RdkafkaError < BaseError
7
+ # The underlying raw error response
8
+ # @return [Integer]
9
+ attr_reader :rdkafka_response
10
+
11
+ # Prefix to be used for human readable representation
12
+ # @return [String]
13
+ attr_reader :message_prefix
14
+
15
+ # Error message sent by the broker
16
+ # @return [String]
17
+ attr_reader :broker_message
18
+
19
+ # @private
20
+ def initialize(response, message_prefix=nil, broker_message: nil)
21
+ raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
22
+ @rdkafka_response = response
23
+ @message_prefix = message_prefix
24
+ @broker_message = broker_message
25
+ end
26
+
27
+ # This error's code, for example `:partition_eof`, `:msg_size_too_large`.
28
+ # @return [Symbol]
29
+ def code
30
+ code = Rdkafka::Bindings.rd_kafka_err2name(@rdkafka_response).downcase
31
+ if code[0] == "_"
32
+ code[1..-1].to_sym
33
+ else
34
+ code.to_sym
35
+ end
36
+ end
37
+
38
+ # Human readable representation of this error.
39
+ # @return [String]
40
+ def to_s
41
+ message_prefix_part = if message_prefix
42
+ "#{message_prefix} - "
43
+ else
44
+ ''
45
+ end
46
+ "#{message_prefix_part}#{Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)} (#{code})"
47
+ end
48
+
49
+ # Whether this error indicates the partition is EOF.
50
+ # @return [Boolean]
51
+ def is_partition_eof?
52
+ code == :partition_eof
53
+ end
54
+
55
+ # Error comparison
56
+ def ==(another_error)
57
+ another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
58
+ end
59
+ end
60
+
61
+ # Error with topic partition list returned by the underlying rdkafka library.
62
+ class RdkafkaTopicPartitionListError < RdkafkaError
63
+ # @return [TopicPartitionList]
64
+ attr_reader :topic_partition_list
65
+
66
+ # @private
67
+ def initialize(response, topic_partition_list, message_prefix=nil)
68
+ super(response, message_prefix)
69
+ @topic_partition_list = topic_partition_list
70
+ end
71
+ end
72
+
73
+ # Error class for public consumer method calls on a closed consumer.
74
+ class ClosedConsumerError < BaseError
75
+ def initialize(method)
76
+ super("Illegal call to #{method.to_s} on a closed consumer")
77
+ end
78
+ end
79
+
80
+ # Error class for public producer method calls on a closed producer.
81
+ class ClosedProducerError < BaseError
82
+ def initialize(method)
83
+ super("Illegal call to #{method.to_s} on a closed producer")
84
+ end
85
+ end
86
+ end