rdkafka 0.12.0 → 0.15.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +57 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +155 -93
  10. data/Gemfile +2 -0
  11. data/{LICENSE → MIT-LICENSE} +2 -1
  12. data/README.md +76 -29
  13. data/Rakefile +2 -0
  14. data/certs/cert_chain.pem +26 -0
  15. data/docker-compose.yml +18 -15
  16. data/ext/README.md +1 -1
  17. data/ext/Rakefile +46 -27
  18. data/lib/rdkafka/abstract_handle.rb +41 -25
  19. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  20. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  21. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  22. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  23. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  24. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  25. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  26. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  29. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  30. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  31. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  32. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  33. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  34. data/lib/rdkafka/admin.rb +494 -35
  35. data/lib/rdkafka/bindings.rb +180 -41
  36. data/lib/rdkafka/callbacks.rb +202 -1
  37. data/lib/rdkafka/config.rb +62 -25
  38. data/lib/rdkafka/consumer/headers.rb +24 -9
  39. data/lib/rdkafka/consumer/message.rb +3 -1
  40. data/lib/rdkafka/consumer/partition.rb +2 -0
  41. data/lib/rdkafka/consumer/topic_partition_list.rb +13 -8
  42. data/lib/rdkafka/consumer.rb +243 -111
  43. data/lib/rdkafka/error.rb +15 -0
  44. data/lib/rdkafka/helpers/time.rb +14 -0
  45. data/lib/rdkafka/metadata.rb +25 -2
  46. data/lib/rdkafka/native_kafka.rb +120 -0
  47. data/lib/rdkafka/producer/delivery_handle.rb +16 -2
  48. data/lib/rdkafka/producer/delivery_report.rb +22 -2
  49. data/lib/rdkafka/producer.rb +151 -21
  50. data/lib/rdkafka/version.rb +5 -3
  51. data/lib/rdkafka.rb +24 -2
  52. data/rdkafka.gemspec +21 -5
  53. data/renovate.json +6 -0
  54. data/spec/rdkafka/abstract_handle_spec.rb +1 -1
  55. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  56. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  57. data/spec/rdkafka/admin/create_topic_handle_spec.rb +1 -1
  58. data/spec/rdkafka/admin/create_topic_report_spec.rb +1 -1
  59. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  60. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  61. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +1 -1
  62. data/spec/rdkafka/admin/delete_topic_report_spec.rb +1 -1
  63. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  64. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  65. data/spec/rdkafka/admin_spec.rb +209 -5
  66. data/spec/rdkafka/bindings_spec.rb +2 -1
  67. data/spec/rdkafka/callbacks_spec.rb +1 -1
  68. data/spec/rdkafka/config_spec.rb +24 -3
  69. data/spec/rdkafka/consumer/headers_spec.rb +60 -0
  70. data/spec/rdkafka/consumer/message_spec.rb +1 -1
  71. data/spec/rdkafka/consumer/partition_spec.rb +1 -1
  72. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +20 -1
  73. data/spec/rdkafka/consumer_spec.rb +352 -61
  74. data/spec/rdkafka/error_spec.rb +1 -1
  75. data/spec/rdkafka/metadata_spec.rb +4 -3
  76. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -35
  77. data/spec/rdkafka/producer/delivery_handle_spec.rb +4 -1
  78. data/spec/rdkafka/producer/delivery_report_spec.rb +11 -3
  79. data/spec/rdkafka/producer_spec.rb +234 -22
  80. data/spec/spec_helper.rb +20 -2
  81. data.tar.gz.sig +0 -0
  82. metadata +81 -17
  83. metadata.gz.sig +0 -0
  84. data/.semaphore/semaphore.yml +0 -23
  85. data/bin/console +0 -11
  86. data/lib/rdkafka/producer/client.rb +0 -47
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
3
5
  # brokers automatically assign partitions and load balance partitions over consumers that
@@ -10,31 +12,47 @@ module Rdkafka
10
12
  # `each_slice` to consume batches of messages.
11
13
  class Consumer
12
14
  include Enumerable
15
+ include Helpers::Time
13
16
 
14
17
  # @private
15
18
  def initialize(native_kafka)
16
19
  @native_kafka = native_kafka
17
- @closing = false
20
+ end
21
+
22
+ # @return [String] consumer name
23
+ def name
24
+ @name ||= @native_kafka.with_inner do |inner|
25
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
26
+ end
27
+ end
28
+
29
+ def finalizer
30
+ ->(_) { close }
18
31
  end
19
32
 
20
33
  # Close this consumer
21
34
  # @return [nil]
22
35
  def close
23
- return unless @native_kafka
36
+ return if closed?
37
+ ObjectSpace.undefine_finalizer(self)
24
38
 
25
- @closing = true
26
- Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
- @native_kafka = nil
39
+ @native_kafka.synchronize do |inner|
40
+ Rdkafka::Bindings.rd_kafka_consumer_close(inner)
41
+ end
42
+
43
+ @native_kafka.close
29
44
  end
30
45
 
31
- # Subscribe to one or more topics letting Kafka handle partition assignments.
46
+ # Whether this consumer has closed
47
+ def closed?
48
+ @native_kafka.closed?
49
+ end
50
+
51
+ # Subscribes to one or more topics letting Kafka handle partition assignments.
32
52
  #
33
53
  # @param topics [Array<String>] One or more topic names
34
- #
35
- # @raise [RdkafkaError] When subscribing fails
36
- #
37
54
  # @return [nil]
55
+ # @raise [RdkafkaError] When subscribing fails
38
56
  def subscribe(*topics)
39
57
  closed_consumer_check(__method__)
40
58
 
@@ -46,7 +64,9 @@ module Rdkafka
46
64
  end
47
65
 
48
66
  # Subscribe to topic partition list and check this was successful
49
- response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
67
+ response = @native_kafka.with_inner do |inner|
68
+ Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
69
+ end
50
70
  if response != 0
51
71
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
52
72
  end
@@ -56,13 +76,14 @@ module Rdkafka
56
76
 
57
77
  # Unsubscribe from all subscribed topics.
58
78
  #
59
- # @raise [RdkafkaError] When unsubscribing fails
60
- #
61
79
  # @return [nil]
80
+ # @raise [RdkafkaError] When unsubscribing fails
62
81
  def unsubscribe
63
82
  closed_consumer_check(__method__)
64
83
 
65
- response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
84
+ response = @native_kafka.with_inner do |inner|
85
+ Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
86
+ end
66
87
  if response != 0
67
88
  raise Rdkafka::RdkafkaError.new(response)
68
89
  end
@@ -71,10 +92,8 @@ module Rdkafka
71
92
  # Pause producing or consumption for the provided list of partitions
72
93
  #
73
94
  # @param list [TopicPartitionList] The topic with partitions to pause
74
- #
75
- # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
76
- #
77
95
  # @return [nil]
96
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
78
97
  def pause(list)
79
98
  closed_consumer_check(__method__)
80
99
 
@@ -85,7 +104,9 @@ module Rdkafka
85
104
  tpl = list.to_native_tpl
86
105
 
87
106
  begin
88
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
107
+ response = @native_kafka.with_inner do |inner|
108
+ Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
109
+ end
89
110
 
90
111
  if response != 0
91
112
  list = TopicPartitionList.from_native_tpl(tpl)
@@ -96,13 +117,11 @@ module Rdkafka
96
117
  end
97
118
  end
98
119
 
99
- # Resume producing consumption for the provided list of partitions
120
+ # Resumes producing consumption for the provided list of partitions
100
121
  #
101
122
  # @param list [TopicPartitionList] The topic with partitions to pause
102
- #
103
- # @raise [RdkafkaError] When resume subscription fails.
104
- #
105
123
  # @return [nil]
124
+ # @raise [RdkafkaError] When resume subscription fails.
106
125
  def resume(list)
107
126
  closed_consumer_check(__method__)
108
127
 
@@ -113,7 +132,9 @@ module Rdkafka
113
132
  tpl = list.to_native_tpl
114
133
 
115
134
  begin
116
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
135
+ response = @native_kafka.with_inner do |inner|
136
+ Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
137
+ end
117
138
  if response != 0
118
139
  raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
119
140
  end
@@ -122,16 +143,17 @@ module Rdkafka
122
143
  end
123
144
  end
124
145
 
125
- # Return the current subscription to topics and partitions
126
- #
127
- # @raise [RdkafkaError] When getting the subscription fails.
146
+ # Returns the current subscription to topics and partitions
128
147
  #
129
148
  # @return [TopicPartitionList]
149
+ # @raise [RdkafkaError] When getting the subscription fails.
130
150
  def subscription
131
151
  closed_consumer_check(__method__)
132
152
 
133
153
  ptr = FFI::MemoryPointer.new(:pointer)
134
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
154
+ response = @native_kafka.with_inner do |inner|
155
+ Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
156
+ end
135
157
 
136
158
  if response != 0
137
159
  raise Rdkafka::RdkafkaError.new(response)
@@ -149,7 +171,6 @@ module Rdkafka
149
171
  # Atomic assignment of partitions to consume
150
172
  #
151
173
  # @param list [TopicPartitionList] The topic with partitions to assign
152
- #
153
174
  # @raise [RdkafkaError] When assigning fails
154
175
  def assign(list)
155
176
  closed_consumer_check(__method__)
@@ -161,7 +182,9 @@ module Rdkafka
161
182
  tpl = list.to_native_tpl
162
183
 
163
184
  begin
164
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
185
+ response = @native_kafka.with_inner do |inner|
186
+ Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
187
+ end
165
188
  if response != 0
166
189
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
167
190
  end
@@ -172,14 +195,15 @@ module Rdkafka
172
195
 
173
196
  # Returns the current partition assignment.
174
197
  #
175
- # @raise [RdkafkaError] When getting the assignment fails.
176
- #
177
198
  # @return [TopicPartitionList]
199
+ # @raise [RdkafkaError] When getting the assignment fails.
178
200
  def assignment
179
201
  closed_consumer_check(__method__)
180
202
 
181
203
  ptr = FFI::MemoryPointer.new(:pointer)
182
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
204
+ response = @native_kafka.with_inner do |inner|
205
+ Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
206
+ end
183
207
  if response != 0
184
208
  raise Rdkafka::RdkafkaError.new(response)
185
209
  end
@@ -197,16 +221,25 @@ module Rdkafka
197
221
  ptr.free unless ptr.nil?
198
222
  end
199
223
 
224
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
225
+ def assignment_lost?
226
+ closed_consumer_check(__method__)
227
+
228
+ @native_kafka.with_inner do |inner|
229
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
230
+ end
231
+ end
232
+
200
233
  # Return the current committed offset per partition for this consumer group.
201
- # The offset field of each requested partition will either be set to stored offset or to -1001 in case there was no stored offset for that partition.
234
+ # The offset field of each requested partition will either be set to stored offset or to -1001
235
+ # in case there was no stored offset for that partition.
202
236
  #
203
- # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
237
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil
238
+ # to use the current subscription.
204
239
  # @param timeout_ms [Integer] The timeout for fetching this information.
205
- #
206
- # @raise [RdkafkaError] When getting the committed positions fails.
207
- #
208
240
  # @return [TopicPartitionList]
209
- def committed(list=nil, timeout_ms=1200)
241
+ # @raise [RdkafkaError] When getting the committed positions fails.
242
+ def committed(list=nil, timeout_ms=2000)
210
243
  closed_consumer_check(__method__)
211
244
 
212
245
  if list.nil?
@@ -218,7 +251,9 @@ module Rdkafka
218
251
  tpl = list.to_native_tpl
219
252
 
220
253
  begin
221
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
254
+ response = @native_kafka.with_inner do |inner|
255
+ Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
256
+ end
222
257
  if response != 0
223
258
  raise Rdkafka::RdkafkaError.new(response)
224
259
  end
@@ -228,29 +263,57 @@ module Rdkafka
228
263
  end
229
264
  end
230
265
 
266
+ # Return the current positions (offsets) for topics and partitions.
267
+ # The offset field of each requested partition will be set to the offset of the last consumed message + 1, or nil in case there was no previous message.
268
+ #
269
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
270
+ #
271
+ # @return [TopicPartitionList]
272
+ #
273
+ # @raise [RdkafkaError] When getting the positions fails.
274
+ def position(list=nil)
275
+ if list.nil?
276
+ list = assignment
277
+ elsif !list.is_a?(TopicPartitionList)
278
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
279
+ end
280
+
281
+ tpl = list.to_native_tpl
282
+
283
+ response = @native_kafka.with_inner do |inner|
284
+ Rdkafka::Bindings.rd_kafka_position(inner, tpl)
285
+ end
286
+
287
+ if response != 0
288
+ raise Rdkafka::RdkafkaError.new(response)
289
+ end
290
+
291
+ TopicPartitionList.from_native_tpl(tpl)
292
+ end
293
+
231
294
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
232
295
  #
233
296
  # @param topic [String] The topic to query
234
297
  # @param partition [Integer] The partition to query
235
298
  # @param timeout_ms [Integer] The timeout for querying the broker
236
- #
237
- # @raise [RdkafkaError] When querying the broker fails.
238
- #
239
299
  # @return [Integer] The low and high watermark
240
- def query_watermark_offsets(topic, partition, timeout_ms=200)
300
+ # @raise [RdkafkaError] When querying the broker fails.
301
+ def query_watermark_offsets(topic, partition, timeout_ms=1000)
241
302
  closed_consumer_check(__method__)
242
303
 
243
304
  low = FFI::MemoryPointer.new(:int64, 1)
244
305
  high = FFI::MemoryPointer.new(:int64, 1)
245
306
 
246
- response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
247
- @native_kafka,
248
- topic,
249
- partition,
250
- low,
251
- high,
252
- timeout_ms,
253
- )
307
+ response = @native_kafka.with_inner do |inner|
308
+ Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
309
+ inner,
310
+ topic,
311
+ partition,
312
+ low,
313
+ high,
314
+ timeout_ms,
315
+ )
316
+ end
254
317
  if response != 0
255
318
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
256
319
  end
@@ -268,11 +331,10 @@ module Rdkafka
268
331
  #
269
332
  # @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
270
333
  # @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
271
- #
334
+ # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
335
+ # per partition
272
336
  # @raise [RdkafkaError] When querying the broker fails.
273
- #
274
- # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
275
- def lag(topic_partition_list, watermark_timeout_ms=100)
337
+ def lag(topic_partition_list, watermark_timeout_ms=1000)
276
338
  out = {}
277
339
 
278
340
  topic_partition_list.to_h.each do |topic, partitions|
@@ -298,7 +360,9 @@ module Rdkafka
298
360
  # @return [String, nil]
299
361
  def cluster_id
300
362
  closed_consumer_check(__method__)
301
- Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
363
+ @native_kafka.with_inner do |inner|
364
+ Rdkafka::Bindings.rd_kafka_clusterid(inner)
365
+ end
302
366
  end
303
367
 
304
368
  # Returns this client's broker-assigned group member id
@@ -308,7 +372,9 @@ module Rdkafka
308
372
  # @return [String, nil]
309
373
  def member_id
310
374
  closed_consumer_check(__method__)
311
- Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
375
+ @native_kafka.with_inner do |inner|
376
+ Rdkafka::Bindings.rd_kafka_memberid(inner)
377
+ end
312
378
  end
313
379
 
314
380
  # Store offset of a message to be used in the next commit of this consumer
@@ -316,52 +382,51 @@ module Rdkafka
316
382
  # When using this `enable.auto.offset.store` should be set to `false` in the config.
317
383
  #
318
384
  # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
319
- #
320
- # @raise [RdkafkaError] When storing the offset fails
321
- #
322
385
  # @return [nil]
386
+ # @raise [RdkafkaError] When storing the offset fails
323
387
  def store_offset(message)
324
388
  closed_consumer_check(__method__)
325
389
 
326
- # rd_kafka_offset_store is one of the few calls that does not support
327
- # a string as the topic, so create a native topic for it.
328
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
329
- @native_kafka,
390
+ list = TopicPartitionList.new
391
+ list.add_topic_and_partitions_with_offsets(
330
392
  message.topic,
331
- nil
332
- )
333
- response = Rdkafka::Bindings.rd_kafka_offset_store(
334
- native_topic,
335
- message.partition,
336
- message.offset
393
+ message.partition => message.offset + 1
337
394
  )
395
+
396
+ tpl = list.to_native_tpl
397
+
398
+ response = @native_kafka.with_inner do |inner|
399
+ Rdkafka::Bindings.rd_kafka_offsets_store(
400
+ inner,
401
+ tpl
402
+ )
403
+ end
404
+
338
405
  if response != 0
339
406
  raise Rdkafka::RdkafkaError.new(response)
340
407
  end
341
408
  ensure
342
- if native_topic && !native_topic.null?
343
- Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
344
- end
409
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
345
410
  end
346
411
 
347
412
  # Seek to a particular message. The next poll on the topic/partition will return the
348
413
  # message at the given offset.
349
414
  #
350
415
  # @param message [Rdkafka::Consumer::Message] The message to which to seek
351
- #
352
- # @raise [RdkafkaError] When seeking fails
353
- #
354
416
  # @return [nil]
417
+ # @raise [RdkafkaError] When seeking fails
355
418
  def seek(message)
356
419
  closed_consumer_check(__method__)
357
420
 
358
421
  # rd_kafka_offset_store is one of the few calls that does not support
359
422
  # a string as the topic, so create a native topic for it.
360
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
361
- @native_kafka,
362
- message.topic,
363
- nil
364
- )
423
+ native_topic = @native_kafka.with_inner do |inner|
424
+ Rdkafka::Bindings.rd_kafka_topic_new(
425
+ inner,
426
+ message.topic,
427
+ nil
428
+ )
429
+ end
365
430
  response = Rdkafka::Bindings.rd_kafka_seek(
366
431
  native_topic,
367
432
  message.partition,
@@ -377,6 +442,39 @@ module Rdkafka
377
442
  end
378
443
  end
379
444
 
445
+ # Lookup offset for the given partitions by timestamp.
446
+ #
447
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
448
+ #
449
+ # @return [TopicPartitionList]
450
+ #
451
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
452
+ def offsets_for_times(list, timeout_ms = 1000)
453
+ closed_consumer_check(__method__)
454
+
455
+ if !list.is_a?(TopicPartitionList)
456
+ raise TypeError.new("list has to be a TopicPartitionList")
457
+ end
458
+
459
+ tpl = list.to_native_tpl
460
+
461
+ response = @native_kafka.with_inner do |inner|
462
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
463
+ inner,
464
+ tpl,
465
+ timeout_ms # timeout
466
+ )
467
+ end
468
+
469
+ if response != 0
470
+ raise Rdkafka::RdkafkaError.new(response)
471
+ end
472
+
473
+ TopicPartitionList.from_native_tpl(tpl)
474
+ ensure
475
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
476
+ end
477
+
380
478
  # Manually commit the current offsets of this consumer.
381
479
  #
382
480
  # To use this set `enable.auto.commit`to `false` to disable automatic triggering
@@ -388,10 +486,8 @@ module Rdkafka
388
486
  #
389
487
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
390
488
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
391
- #
392
- # @raise [RdkafkaError] When committing fails
393
- #
394
489
  # @return [nil]
490
+ # @raise [RdkafkaError] When committing fails
395
491
  def commit(list=nil, async=false)
396
492
  closed_consumer_check(__method__)
397
493
 
@@ -402,7 +498,9 @@ module Rdkafka
402
498
  tpl = list ? list.to_native_tpl : nil
403
499
 
404
500
  begin
405
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
501
+ response = @native_kafka.with_inner do |inner|
502
+ Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
503
+ end
406
504
  if response != 0
407
505
  raise Rdkafka::RdkafkaError.new(response)
408
506
  end
@@ -414,14 +512,14 @@ module Rdkafka
414
512
  # Poll for the next message on one of the subscribed topics
415
513
  #
416
514
  # @param timeout_ms [Integer] Timeout of this poll
417
- #
418
- # @raise [RdkafkaError] When polling fails
419
- #
420
515
  # @return [Message, nil] A message or nil if there was no new message within the timeout
516
+ # @raise [RdkafkaError] When polling fails
421
517
  def poll(timeout_ms)
422
518
  closed_consumer_check(__method__)
423
519
 
424
- message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
520
+ message_ptr = @native_kafka.with_inner do |inner|
521
+ Rdkafka::Bindings.rd_kafka_consumer_poll(inner, timeout_ms)
522
+ end
425
523
  if message_ptr.null?
426
524
  nil
427
525
  else
@@ -436,30 +534,53 @@ module Rdkafka
436
534
  end
437
535
  ensure
438
536
  # Clean up rdkafka message if there is one
439
- if !message_ptr.nil? && !message_ptr.null?
537
+ if message_ptr && !message_ptr.null?
440
538
  Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
441
539
  end
442
540
  end
443
541
 
542
+ # Polls the main rdkafka queue (not the consumer one). Do **NOT** use it if `consumer_poll_set`
543
+ # was set to `true`.
544
+ #
545
+ # Events will cause application-provided callbacks to be called.
546
+ #
547
+ # Events (in the context of the consumer):
548
+ # - error callbacks
549
+ # - stats callbacks
550
+ # - any other callbacks supported by librdkafka that are not part of the consumer_poll, that
551
+ # would have a callback configured and activated.
552
+ #
553
+ # This method needs to be called at regular intervals to serve any queued callbacks waiting to
554
+ # be called. When in use, does **NOT** replace `#poll` but needs to run complementary with it.
555
+ #
556
+ # @param timeout_ms [Integer] poll timeout. If set to 0 will run async, when set to -1 will
557
+ # block until any events available.
558
+ #
559
+ # @note This method technically should be called `#poll` and the current `#poll` should be
560
+ # called `#consumer_poll` though we keep the current naming convention to make it backward
561
+ # compatible.
562
+ def events_poll(timeout_ms = 0)
563
+ @native_kafka.with_inner do |inner|
564
+ Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
565
+ end
566
+ end
567
+
444
568
  # Poll for new messages and yield for each received one. Iteration
445
569
  # will end when the consumer is closed.
446
570
  #
447
- # If `enable.partition.eof` is turned on in the config this will raise an
448
- # error when an eof is reached, so you probably want to disable that when
449
- # using this method of iteration.
450
- #
451
- # @raise [RdkafkaError] When polling fails
571
+ # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
572
+ # reached, so you probably want to disable that when using this method of iteration.
452
573
  #
453
574
  # @yieldparam message [Message] Received message
454
- #
455
575
  # @return [nil]
576
+ # @raise [RdkafkaError] When polling fails
456
577
  def each
457
578
  loop do
458
579
  message = poll(250)
459
580
  if message
460
581
  yield(message)
461
582
  else
462
- if @closing
583
+ if closed?
463
584
  break
464
585
  else
465
586
  next
@@ -468,10 +589,6 @@ module Rdkafka
468
589
  end
469
590
  end
470
591
 
471
- def closed_consumer_check(method)
472
- raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
- end
474
-
475
592
  # Poll for new messages and yield them in batches that may contain
476
593
  # messages from more than one partition.
477
594
  #
@@ -508,26 +625,25 @@ module Rdkafka
508
625
  # that you may or may not see again.
509
626
  #
510
627
  # @param max_items [Integer] Maximum size of the yielded array of messages
511
- #
512
628
  # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
513
- #
514
629
  # @param timeout_ms [Integer] max time to wait for up to max_items
515
630
  #
516
- # @raise [RdkafkaError] When polling fails
517
- #
518
- # @yield [messages, pending_exception]
519
631
  # @yieldparam messages [Array] An array of received Message
520
632
  # @yieldparam pending_exception [Exception] normally nil, or an exception
633
+ #
634
+ # @yield [messages, pending_exception]
521
635
  # which will be propagated after processing of the partial batch is complete.
522
636
  #
523
637
  # @return [nil]
638
+ #
639
+ # @raise [RdkafkaError] When polling fails
524
640
  def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
525
641
  closed_consumer_check(__method__)
526
642
  slice = []
527
643
  bytes = 0
528
644
  end_time = monotonic_now + timeout_ms / 1000.0
529
645
  loop do
530
- break if @closing
646
+ break if closed?
531
647
  max_wait = end_time - monotonic_now
532
648
  max_wait_ms = if max_wait <= 0
533
649
  0 # should not block, but may retrieve a message
@@ -545,7 +661,7 @@ module Rdkafka
545
661
  end
546
662
  if message
547
663
  slice << message
548
- bytes += message.payload.bytesize
664
+ bytes += message.payload.bytesize if message.payload
549
665
  end
550
666
  if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
667
  yield slice.dup, nil
@@ -556,10 +672,26 @@ module Rdkafka
556
672
  end
557
673
  end
558
674
 
675
+ # Returns pointer to the consumer group metadata. It is used only in the context of
676
+ # exactly-once-semantics in transactions, this is why it is never remapped to Ruby
677
+ #
678
+ # This API is **not** usable by itself from Ruby
679
+ #
680
+ # @note This pointer **needs** to be removed with `#rd_kafka_consumer_group_metadata_destroy`
681
+ #
682
+ # @private
683
+ def consumer_group_metadata_pointer
684
+ closed_consumer_check(__method__)
685
+
686
+ @native_kafka.with_inner do |inner|
687
+ Bindings.rd_kafka_consumer_group_metadata(inner)
688
+ end
689
+ end
690
+
559
691
  private
560
- def monotonic_now
561
- # needed because Time.now can go backwards
562
- Process.clock_gettime(Process::CLOCK_MONOTONIC)
692
+
693
+ def closed_consumer_check(method)
694
+ raise Rdkafka::ClosedConsumerError.new(method) if closed?
563
695
  end
564
696
  end
565
697
  end
data/lib/rdkafka/error.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # Base error class.
3
5
  class BaseError < RuntimeError; end
@@ -83,4 +85,17 @@ module Rdkafka
83
85
  super("Illegal call to #{method.to_s} on a closed producer")
84
86
  end
85
87
  end
88
+
89
+ # Error class for public consumer method calls on a closed admin.
90
+ class ClosedAdminError < BaseError
91
+ def initialize(method)
92
+ super("Illegal call to #{method.to_s} on a closed admin")
93
+ end
94
+ end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
86
101
  end
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Namespace for some small utilities used in multiple components
5
+ module Helpers
6
+ # Time related methods used across Karafka
7
+ module Time
8
+ # @return [Float] current monotonic time in seconds with microsecond precision
9
+ def monotonic_now
10
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ end
12
+ end
13
+ end
14
+ end