rdkafka 0.12.0 → 0.15.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +2 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +58 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +141 -93
  10. data/Gemfile +2 -0
  11. data/{LICENSE → MIT-LICENSE} +2 -1
  12. data/README.md +64 -29
  13. data/Rakefile +2 -0
  14. data/certs/cert_chain.pem +26 -0
  15. data/docker-compose.yml +18 -15
  16. data/ext/README.md +1 -1
  17. data/ext/Rakefile +3 -1
  18. data/lib/rdkafka/abstract_handle.rb +41 -25
  19. data/lib/rdkafka/admin/acl_binding_result.rb +37 -0
  20. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  21. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  22. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  23. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  24. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  25. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  26. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  29. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  30. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  31. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  32. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  33. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  34. data/lib/rdkafka/admin.rb +494 -35
  35. data/lib/rdkafka/bindings.rb +175 -40
  36. data/lib/rdkafka/callbacks.rb +194 -1
  37. data/lib/rdkafka/config.rb +62 -25
  38. data/lib/rdkafka/consumer/headers.rb +24 -9
  39. data/lib/rdkafka/consumer/message.rb +3 -1
  40. data/lib/rdkafka/consumer/partition.rb +2 -0
  41. data/lib/rdkafka/consumer/topic_partition_list.rb +13 -8
  42. data/lib/rdkafka/consumer.rb +219 -102
  43. data/lib/rdkafka/error.rb +15 -0
  44. data/lib/rdkafka/helpers/time.rb +14 -0
  45. data/lib/rdkafka/metadata.rb +25 -2
  46. data/lib/rdkafka/native_kafka.rb +120 -0
  47. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  48. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  49. data/lib/rdkafka/producer.rb +117 -17
  50. data/lib/rdkafka/version.rb +5 -3
  51. data/lib/rdkafka.rb +24 -2
  52. data/rdkafka.gemspec +19 -3
  53. data/renovate.json +6 -0
  54. data/spec/rdkafka/abstract_handle_spec.rb +1 -1
  55. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  56. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  57. data/spec/rdkafka/admin/create_topic_handle_spec.rb +1 -1
  58. data/spec/rdkafka/admin/create_topic_report_spec.rb +1 -1
  59. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  60. data/spec/rdkafka/admin/delete_acl_report_spec.rb +71 -0
  61. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +1 -1
  62. data/spec/rdkafka/admin/delete_topic_report_spec.rb +1 -1
  63. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  64. data/spec/rdkafka/admin/describe_acl_report_spec.rb +72 -0
  65. data/spec/rdkafka/admin_spec.rb +209 -5
  66. data/spec/rdkafka/bindings_spec.rb +2 -1
  67. data/spec/rdkafka/callbacks_spec.rb +1 -1
  68. data/spec/rdkafka/config_spec.rb +24 -3
  69. data/spec/rdkafka/consumer/headers_spec.rb +60 -0
  70. data/spec/rdkafka/consumer/message_spec.rb +1 -1
  71. data/spec/rdkafka/consumer/partition_spec.rb +1 -1
  72. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +20 -1
  73. data/spec/rdkafka/consumer_spec.rb +332 -61
  74. data/spec/rdkafka/error_spec.rb +1 -1
  75. data/spec/rdkafka/metadata_spec.rb +4 -3
  76. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -35
  77. data/spec/rdkafka/producer/delivery_handle_spec.rb +4 -1
  78. data/spec/rdkafka/producer/delivery_report_spec.rb +7 -3
  79. data/spec/rdkafka/producer_spec.rb +208 -20
  80. data/spec/spec_helper.rb +20 -2
  81. data.tar.gz.sig +3 -0
  82. metadata +79 -16
  83. metadata.gz.sig +3 -0
  84. data/.semaphore/semaphore.yml +0 -23
  85. data/bin/console +0 -11
  86. data/lib/rdkafka/producer/client.rb +0 -47
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
3
5
  # brokers automatically assign partitions and load balance partitions over consumers that
@@ -10,31 +12,47 @@ module Rdkafka
10
12
  # `each_slice` to consume batches of messages.
11
13
  class Consumer
12
14
  include Enumerable
15
+ include Helpers::Time
13
16
 
14
17
  # @private
15
18
  def initialize(native_kafka)
16
19
  @native_kafka = native_kafka
17
- @closing = false
20
+ end
21
+
22
+ def finalizer
23
+ ->(_) { close }
24
+ end
25
+
26
+ # @return [String] consumer name
27
+ def name
28
+ @name ||= @native_kafka.with_inner do |inner|
29
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
30
+ end
18
31
  end
19
32
 
20
33
  # Close this consumer
21
34
  # @return [nil]
22
35
  def close
23
- return unless @native_kafka
36
+ return if closed?
37
+ ObjectSpace.undefine_finalizer(self)
38
+
39
+ @native_kafka.synchronize do |inner|
40
+ Rdkafka::Bindings.rd_kafka_consumer_close(inner)
41
+ end
42
+
43
+ @native_kafka.close
44
+ end
24
45
 
25
- @closing = true
26
- Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
- @native_kafka = nil
46
+ # Whether this consumer has closed
47
+ def closed?
48
+ @native_kafka.closed?
29
49
  end
30
50
 
31
- # Subscribe to one or more topics letting Kafka handle partition assignments.
51
+ # Subscribes to one or more topics letting Kafka handle partition assignments.
32
52
  #
33
53
  # @param topics [Array<String>] One or more topic names
34
- #
35
- # @raise [RdkafkaError] When subscribing fails
36
- #
37
54
  # @return [nil]
55
+ # @raise [RdkafkaError] When subscribing fails
38
56
  def subscribe(*topics)
39
57
  closed_consumer_check(__method__)
40
58
 
@@ -46,7 +64,9 @@ module Rdkafka
46
64
  end
47
65
 
48
66
  # Subscribe to topic partition list and check this was successful
49
- response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
67
+ response = @native_kafka.with_inner do |inner|
68
+ Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
69
+ end
50
70
  if response != 0
51
71
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
52
72
  end
@@ -56,13 +76,14 @@ module Rdkafka
56
76
 
57
77
  # Unsubscribe from all subscribed topics.
58
78
  #
59
- # @raise [RdkafkaError] When unsubscribing fails
60
- #
61
79
  # @return [nil]
80
+ # @raise [RdkafkaError] When unsubscribing fails
62
81
  def unsubscribe
63
82
  closed_consumer_check(__method__)
64
83
 
65
- response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
84
+ response = @native_kafka.with_inner do |inner|
85
+ Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
86
+ end
66
87
  if response != 0
67
88
  raise Rdkafka::RdkafkaError.new(response)
68
89
  end
@@ -71,10 +92,8 @@ module Rdkafka
71
92
  # Pause producing or consumption for the provided list of partitions
72
93
  #
73
94
  # @param list [TopicPartitionList] The topic with partitions to pause
74
- #
75
- # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
76
- #
77
95
  # @return [nil]
96
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
78
97
  def pause(list)
79
98
  closed_consumer_check(__method__)
80
99
 
@@ -85,7 +104,9 @@ module Rdkafka
85
104
  tpl = list.to_native_tpl
86
105
 
87
106
  begin
88
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
107
+ response = @native_kafka.with_inner do |inner|
108
+ Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
109
+ end
89
110
 
90
111
  if response != 0
91
112
  list = TopicPartitionList.from_native_tpl(tpl)
@@ -96,13 +117,11 @@ module Rdkafka
96
117
  end
97
118
  end
98
119
 
99
- # Resume producing consumption for the provided list of partitions
120
+ # Resumes producing consumption for the provided list of partitions
100
121
  #
101
122
  # @param list [TopicPartitionList] The topic with partitions to pause
102
- #
103
- # @raise [RdkafkaError] When resume subscription fails.
104
- #
105
123
  # @return [nil]
124
+ # @raise [RdkafkaError] When resume subscription fails.
106
125
  def resume(list)
107
126
  closed_consumer_check(__method__)
108
127
 
@@ -113,7 +132,9 @@ module Rdkafka
113
132
  tpl = list.to_native_tpl
114
133
 
115
134
  begin
116
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
135
+ response = @native_kafka.with_inner do |inner|
136
+ Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
137
+ end
117
138
  if response != 0
118
139
  raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
140
  end
@@ -122,16 +143,17 @@ module Rdkafka
122
143
  end
123
144
  end
124
145
 
125
- # Return the current subscription to topics and partitions
126
- #
127
- # @raise [RdkafkaError] When getting the subscription fails.
146
+ # Returns the current subscription to topics and partitions
128
147
  #
129
148
  # @return [TopicPartitionList]
149
+ # @raise [RdkafkaError] When getting the subscription fails.
130
150
  def subscription
131
151
  closed_consumer_check(__method__)
132
152
 
133
153
  ptr = FFI::MemoryPointer.new(:pointer)
134
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
154
+ response = @native_kafka.with_inner do |inner|
155
+ Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
156
+ end
135
157
 
136
158
  if response != 0
137
159
  raise Rdkafka::RdkafkaError.new(response)
@@ -149,7 +171,6 @@ module Rdkafka
149
171
  # Atomic assignment of partitions to consume
150
172
  #
151
173
  # @param list [TopicPartitionList] The topic with partitions to assign
152
- #
153
174
  # @raise [RdkafkaError] When assigning fails
154
175
  def assign(list)
155
176
  closed_consumer_check(__method__)
@@ -161,7 +182,9 @@ module Rdkafka
161
182
  tpl = list.to_native_tpl
162
183
 
163
184
  begin
164
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
185
+ response = @native_kafka.with_inner do |inner|
186
+ Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
187
+ end
165
188
  if response != 0
166
189
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
190
  end
@@ -172,14 +195,15 @@ module Rdkafka
172
195
 
173
196
  # Returns the current partition assignment.
174
197
  #
175
- # @raise [RdkafkaError] When getting the assignment fails.
176
- #
177
198
  # @return [TopicPartitionList]
199
+ # @raise [RdkafkaError] When getting the assignment fails.
178
200
  def assignment
179
201
  closed_consumer_check(__method__)
180
202
 
181
203
  ptr = FFI::MemoryPointer.new(:pointer)
182
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
204
+ response = @native_kafka.with_inner do |inner|
205
+ Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
206
+ end
183
207
  if response != 0
184
208
  raise Rdkafka::RdkafkaError.new(response)
185
209
  end
@@ -197,15 +221,24 @@ module Rdkafka
197
221
  ptr.free unless ptr.nil?
198
222
  end
199
223
 
224
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
225
+ def assignment_lost?
226
+ closed_consumer_check(__method__)
227
+
228
+ @native_kafka.with_inner do |inner|
229
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
230
+ end
231
+ end
232
+
200
233
  # Return the current committed offset per partition for this consumer group.
201
- # The offset field of each requested partition will either be set to stored offset or to -1001 in case there was no stored offset for that partition.
234
+ # The offset field of each requested partition will either be set to stored offset or to -1001
235
+ # in case there was no stored offset for that partition.
202
236
  #
203
- # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
237
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil
238
+ # to use the current subscription.
204
239
  # @param timeout_ms [Integer] The timeout for fetching this information.
205
- #
206
- # @raise [RdkafkaError] When getting the committed positions fails.
207
- #
208
240
  # @return [TopicPartitionList]
241
+ # @raise [RdkafkaError] When getting the committed positions fails.
209
242
  def committed(list=nil, timeout_ms=1200)
210
243
  closed_consumer_check(__method__)
211
244
 
@@ -218,7 +251,9 @@ module Rdkafka
218
251
  tpl = list.to_native_tpl
219
252
 
220
253
  begin
221
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
254
+ response = @native_kafka.with_inner do |inner|
255
+ Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
256
+ end
222
257
  if response != 0
223
258
  raise Rdkafka::RdkafkaError.new(response)
224
259
  end
@@ -228,29 +263,57 @@ module Rdkafka
228
263
  end
229
264
  end
230
265
 
266
+ # Return the current positions (offsets) for topics and partitions.
267
+ # The offset field of each requested partition will be set to the offset of the last consumed message + 1, or nil in case there was no previous message.
268
+ #
269
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
270
+ #
271
+ # @return [TopicPartitionList]
272
+ #
273
+ # @raise [RdkafkaError] When getting the positions fails.
274
+ def position(list=nil)
275
+ if list.nil?
276
+ list = assignment
277
+ elsif !list.is_a?(TopicPartitionList)
278
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
279
+ end
280
+
281
+ tpl = list.to_native_tpl
282
+
283
+ response = @native_kafka.with_inner do |inner|
284
+ Rdkafka::Bindings.rd_kafka_position(inner, tpl)
285
+ end
286
+
287
+ if response != 0
288
+ raise Rdkafka::RdkafkaError.new(response)
289
+ end
290
+
291
+ TopicPartitionList.from_native_tpl(tpl)
292
+ end
293
+
231
294
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
232
295
  #
233
296
  # @param topic [String] The topic to query
234
297
  # @param partition [Integer] The partition to query
235
298
  # @param timeout_ms [Integer] The timeout for querying the broker
236
- #
237
- # @raise [RdkafkaError] When querying the broker fails.
238
- #
239
299
  # @return [Integer] The low and high watermark
240
- def query_watermark_offsets(topic, partition, timeout_ms=200)
300
+ # @raise [RdkafkaError] When querying the broker fails.
301
+ def query_watermark_offsets(topic, partition, timeout_ms=1000)
241
302
  closed_consumer_check(__method__)
242
303
 
243
304
  low = FFI::MemoryPointer.new(:int64, 1)
244
305
  high = FFI::MemoryPointer.new(:int64, 1)
245
306
 
246
- response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
247
- @native_kafka,
248
- topic,
249
- partition,
250
- low,
251
- high,
252
- timeout_ms,
253
- )
307
+ response = @native_kafka.with_inner do |inner|
308
+ Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
309
+ inner,
310
+ topic,
311
+ partition,
312
+ low,
313
+ high,
314
+ timeout_ms,
315
+ )
316
+ end
254
317
  if response != 0
255
318
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
256
319
  end
@@ -268,11 +331,10 @@ module Rdkafka
268
331
  #
269
332
  # @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
270
333
  # @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
271
- #
334
+ # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
335
+ # per partition
272
336
  # @raise [RdkafkaError] When querying the broker fails.
273
- #
274
- # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
275
- def lag(topic_partition_list, watermark_timeout_ms=100)
337
+ def lag(topic_partition_list, watermark_timeout_ms=1000)
276
338
  out = {}
277
339
 
278
340
  topic_partition_list.to_h.each do |topic, partitions|
@@ -298,7 +360,9 @@ module Rdkafka
298
360
  # @return [String, nil]
299
361
  def cluster_id
300
362
  closed_consumer_check(__method__)
301
- Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
363
+ @native_kafka.with_inner do |inner|
364
+ Rdkafka::Bindings.rd_kafka_clusterid(inner)
365
+ end
302
366
  end
303
367
 
304
368
  # Returns this client's broker-assigned group member id
@@ -308,7 +372,9 @@ module Rdkafka
308
372
  # @return [String, nil]
309
373
  def member_id
310
374
  closed_consumer_check(__method__)
311
- Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
375
+ @native_kafka.with_inner do |inner|
376
+ Rdkafka::Bindings.rd_kafka_memberid(inner)
377
+ end
312
378
  end
313
379
 
314
380
  # Store offset of a message to be used in the next commit of this consumer
@@ -316,20 +382,20 @@ module Rdkafka
316
382
  # When using this `enable.auto.offset.store` should be set to `false` in the config.
317
383
  #
318
384
  # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
319
- #
320
- # @raise [RdkafkaError] When storing the offset fails
321
- #
322
385
  # @return [nil]
386
+ # @raise [RdkafkaError] When storing the offset fails
323
387
  def store_offset(message)
324
388
  closed_consumer_check(__method__)
325
389
 
326
390
  # rd_kafka_offset_store is one of the few calls that does not support
327
391
  # a string as the topic, so create a native topic for it.
328
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
329
- @native_kafka,
330
- message.topic,
331
- nil
332
- )
392
+ native_topic = @native_kafka.with_inner do |inner|
393
+ Rdkafka::Bindings.rd_kafka_topic_new(
394
+ inner,
395
+ message.topic,
396
+ nil
397
+ )
398
+ end
333
399
  response = Rdkafka::Bindings.rd_kafka_offset_store(
334
400
  native_topic,
335
401
  message.partition,
@@ -348,20 +414,20 @@ module Rdkafka
348
414
  # message at the given offset.
349
415
  #
350
416
  # @param message [Rdkafka::Consumer::Message] The message to which to seek
351
- #
352
- # @raise [RdkafkaError] When seeking fails
353
- #
354
417
  # @return [nil]
418
+ # @raise [RdkafkaError] When seeking fails
355
419
  def seek(message)
356
420
  closed_consumer_check(__method__)
357
421
 
358
422
  # rd_kafka_offset_store is one of the few calls that does not support
359
423
  # a string as the topic, so create a native topic for it.
360
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
361
- @native_kafka,
362
- message.topic,
363
- nil
364
- )
424
+ native_topic = @native_kafka.with_inner do |inner|
425
+ Rdkafka::Bindings.rd_kafka_topic_new(
426
+ inner,
427
+ message.topic,
428
+ nil
429
+ )
430
+ end
365
431
  response = Rdkafka::Bindings.rd_kafka_seek(
366
432
  native_topic,
367
433
  message.partition,
@@ -377,6 +443,39 @@ module Rdkafka
377
443
  end
378
444
  end
379
445
 
446
+ # Lookup offset for the given partitions by timestamp.
447
+ #
448
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
449
+ #
450
+ # @return [TopicPartitionList]
451
+ #
452
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
453
+ def offsets_for_times(list, timeout_ms = 1000)
454
+ closed_consumer_check(__method__)
455
+
456
+ if !list.is_a?(TopicPartitionList)
457
+ raise TypeError.new("list has to be a TopicPartitionList")
458
+ end
459
+
460
+ tpl = list.to_native_tpl
461
+
462
+ response = @native_kafka.with_inner do |inner|
463
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
464
+ inner,
465
+ tpl,
466
+ timeout_ms # timeout
467
+ )
468
+ end
469
+
470
+ if response != 0
471
+ raise Rdkafka::RdkafkaError.new(response)
472
+ end
473
+
474
+ TopicPartitionList.from_native_tpl(tpl)
475
+ ensure
476
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
477
+ end
478
+
380
479
  # Manually commit the current offsets of this consumer.
381
480
  #
382
481
  # To use this set `enable.auto.commit`to `false` to disable automatic triggering
@@ -388,10 +487,8 @@ module Rdkafka
388
487
  #
389
488
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
390
489
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
391
- #
392
- # @raise [RdkafkaError] When committing fails
393
- #
394
490
  # @return [nil]
491
+ # @raise [RdkafkaError] When committing fails
395
492
  def commit(list=nil, async=false)
396
493
  closed_consumer_check(__method__)
397
494
 
@@ -402,7 +499,9 @@ module Rdkafka
402
499
  tpl = list ? list.to_native_tpl : nil
403
500
 
404
501
  begin
405
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
502
+ response = @native_kafka.with_inner do |inner|
503
+ Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
504
+ end
406
505
  if response != 0
407
506
  raise Rdkafka::RdkafkaError.new(response)
408
507
  end
@@ -414,14 +513,14 @@ module Rdkafka
414
513
  # Poll for the next message on one of the subscribed topics
415
514
  #
416
515
  # @param timeout_ms [Integer] Timeout of this poll
417
- #
418
- # @raise [RdkafkaError] When polling fails
419
- #
420
516
  # @return [Message, nil] A message or nil if there was no new message within the timeout
517
+ # @raise [RdkafkaError] When polling fails
421
518
  def poll(timeout_ms)
422
519
  closed_consumer_check(__method__)
423
520
 
424
- message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
521
+ message_ptr = @native_kafka.with_inner do |inner|
522
+ Rdkafka::Bindings.rd_kafka_consumer_poll(inner, timeout_ms)
523
+ end
425
524
  if message_ptr.null?
426
525
  nil
427
526
  else
@@ -436,30 +535,53 @@ module Rdkafka
436
535
  end
437
536
  ensure
438
537
  # Clean up rdkafka message if there is one
439
- if !message_ptr.nil? && !message_ptr.null?
538
+ if message_ptr && !message_ptr.null?
440
539
  Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
441
540
  end
442
541
  end
443
542
 
543
+ # Polls the main rdkafka queue (not the consumer one). Do **NOT** use it if `consumer_poll_set`
544
+ # was set to `true`.
545
+ #
546
+ # Events will cause application-provided callbacks to be called.
547
+ #
548
+ # Events (in the context of the consumer):
549
+ # - error callbacks
550
+ # - stats callbacks
551
+ # - any other callbacks supported by librdkafka that are not part of the consumer_poll, that
552
+ # would have a callback configured and activated.
553
+ #
554
+ # This method needs to be called at regular intervals to serve any queued callbacks waiting to
555
+ # be called. When in use, does **NOT** replace `#poll` but needs to run complementary with it.
556
+ #
557
+ # @param timeout_ms [Integer] poll timeout. If set to 0 will run async, when set to -1 will
558
+ # block until any events available.
559
+ #
560
+ # @note This method technically should be called `#poll` and the current `#poll` should be
561
+ # called `#consumer_poll` though we keep the current naming convention to make it backward
562
+ # compatible.
563
+ def events_poll(timeout_ms = 0)
564
+ @native_kafka.with_inner do |inner|
565
+ Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
566
+ end
567
+ end
568
+
444
569
  # Poll for new messages and yield for each received one. Iteration
445
570
  # will end when the consumer is closed.
446
571
  #
447
- # If `enable.partition.eof` is turned on in the config this will raise an
448
- # error when an eof is reached, so you probably want to disable that when
449
- # using this method of iteration.
450
- #
451
- # @raise [RdkafkaError] When polling fails
572
+ # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
573
+ # reached, so you probably want to disable that when using this method of iteration.
452
574
  #
453
575
  # @yieldparam message [Message] Received message
454
- #
455
576
  # @return [nil]
577
+ # @raise [RdkafkaError] When polling fails
456
578
  def each
457
579
  loop do
458
580
  message = poll(250)
459
581
  if message
460
582
  yield(message)
461
583
  else
462
- if @closing
584
+ if closed?
463
585
  break
464
586
  else
465
587
  next
@@ -468,10 +590,6 @@ module Rdkafka
468
590
  end
469
591
  end
470
592
 
471
- def closed_consumer_check(method)
472
- raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
- end
474
-
475
593
  # Poll for new messages and yield them in batches that may contain
476
594
  # messages from more than one partition.
477
595
  #
@@ -508,26 +626,25 @@ module Rdkafka
508
626
  # that you may or may not see again.
509
627
  #
510
628
  # @param max_items [Integer] Maximum size of the yielded array of messages
511
- #
512
629
  # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
513
- #
514
630
  # @param timeout_ms [Integer] max time to wait for up to max_items
515
631
  #
516
- # @raise [RdkafkaError] When polling fails
517
- #
518
- # @yield [messages, pending_exception]
519
632
  # @yieldparam messages [Array] An array of received Message
520
633
  # @yieldparam pending_exception [Exception] normally nil, or an exception
634
+ #
635
+ # @yield [messages, pending_exception]
521
636
  # which will be propagated after processing of the partial batch is complete.
522
637
  #
523
638
  # @return [nil]
639
+ #
640
+ # @raise [RdkafkaError] When polling fails
524
641
  def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
525
642
  closed_consumer_check(__method__)
526
643
  slice = []
527
644
  bytes = 0
528
645
  end_time = monotonic_now + timeout_ms / 1000.0
529
646
  loop do
530
- break if @closing
647
+ break if closed?
531
648
  max_wait = end_time - monotonic_now
532
649
  max_wait_ms = if max_wait <= 0
533
650
  0 # should not block, but may retrieve a message
@@ -545,7 +662,7 @@ module Rdkafka
545
662
  end
546
663
  if message
547
664
  slice << message
548
- bytes += message.payload.bytesize
665
+ bytes += message.payload.bytesize if message.payload
549
666
  end
550
667
  if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
668
  yield slice.dup, nil
@@ -557,9 +674,9 @@ module Rdkafka
557
674
  end
558
675
 
559
676
  private
560
- def monotonic_now
561
- # needed because Time.now can go backwards
562
- Process.clock_gettime(Process::CLOCK_MONOTONIC)
677
+
678
+ def closed_consumer_check(method)
679
+ raise Rdkafka::ClosedConsumerError.new(method) if closed?
563
680
  end
564
681
  end
565
682
  end
data/lib/rdkafka/error.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # Base error class.
3
5
  class BaseError < RuntimeError; end
@@ -83,4 +85,17 @@ module Rdkafka
83
85
  super("Illegal call to #{method.to_s} on a closed producer")
84
86
  end
85
87
  end
88
+
89
+ # Error class for public consumer method calls on a closed admin.
90
+ class ClosedAdminError < BaseError
91
+ def initialize(method)
92
+ super("Illegal call to #{method.to_s} on a closed admin")
93
+ end
94
+ end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
86
101
  end
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Namespace for some small utilities used in multiple components
5
+ module Helpers
6
+ # Time related methods used across Karafka
7
+ module Time
8
+ # @return [Float] current monotonic time in seconds with microsecond precision
9
+ def monotonic_now
10
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ end
12
+ end
13
+ end
14
+ end
@@ -1,8 +1,21 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Metadata
3
5
  attr_reader :brokers, :topics
4
6
 
5
- def initialize(native_client, topic_name = nil)
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
6
19
  native_topic = if topic_name
7
20
  Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
21
  end
@@ -14,12 +27,22 @@ module Rdkafka
14
27
  topic_flag = topic_name.nil? ? 1 : 0
15
28
 
16
29
  # Retrieve the Metadata
17
- result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
30
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
18
31
 
19
32
  # Error Handling
20
33
  raise Rdkafka::RdkafkaError.new(result) unless result.zero?
21
34
 
22
35
  metadata_from_native(ptr.read_pointer)
36
+ rescue ::Rdkafka::RdkafkaError => e
37
+ raise unless RETRIED_ERRORS.include?(e.code)
38
+ raise if attempt > 10
39
+
40
+ backoff_factor = 2**attempt
41
+ timeout = backoff_factor * 0.1
42
+
43
+ sleep(timeout)
44
+
45
+ retry
23
46
  ensure
24
47
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
25
48
  Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)