rdkafka 0.22.0.beta1-x86_64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +7 -0
  2. data/.github/CODEOWNERS +3 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
  5. data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
  6. data/.github/workflows/ci_macos_arm64.yml +306 -0
  7. data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
  8. data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
  9. data/.github/workflows/push_macos_arm64.yml +54 -0
  10. data/.github/workflows/push_ruby.yml +37 -0
  11. data/.github/workflows/verify-action-pins.yml +16 -0
  12. data/.gitignore +14 -0
  13. data/.rspec +2 -0
  14. data/.ruby-gemset +1 -0
  15. data/.ruby-version +1 -0
  16. data/.yardopts +2 -0
  17. data/CHANGELOG.md +247 -0
  18. data/Gemfile +5 -0
  19. data/MIT-LICENSE +22 -0
  20. data/README.md +178 -0
  21. data/Rakefile +96 -0
  22. data/docker-compose.yml +25 -0
  23. data/ext/README.md +19 -0
  24. data/ext/Rakefile +131 -0
  25. data/ext/build_common.sh +361 -0
  26. data/ext/build_linux_x86_64_gnu.sh +306 -0
  27. data/ext/build_linux_x86_64_musl.sh +763 -0
  28. data/ext/build_macos_arm64.sh +550 -0
  29. data/ext/librdkafka.so +0 -0
  30. data/lib/rdkafka/abstract_handle.rb +116 -0
  31. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  32. data/lib/rdkafka/admin/config_binding_result.rb +30 -0
  33. data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
  34. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  35. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  36. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  37. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  38. data/lib/rdkafka/admin/create_topic_handle.rb +29 -0
  39. data/lib/rdkafka/admin/create_topic_report.rb +24 -0
  40. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  41. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  42. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  43. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  44. data/lib/rdkafka/admin/delete_topic_handle.rb +29 -0
  45. data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
  46. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  47. data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
  48. data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
  49. data/lib/rdkafka/admin/describe_configs_report.rb +54 -0
  50. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
  51. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +54 -0
  52. data/lib/rdkafka/admin.rb +833 -0
  53. data/lib/rdkafka/bindings.rb +566 -0
  54. data/lib/rdkafka/callbacks.rb +415 -0
  55. data/lib/rdkafka/config.rb +398 -0
  56. data/lib/rdkafka/consumer/headers.rb +79 -0
  57. data/lib/rdkafka/consumer/message.rb +86 -0
  58. data/lib/rdkafka/consumer/partition.rb +51 -0
  59. data/lib/rdkafka/consumer/topic_partition_list.rb +169 -0
  60. data/lib/rdkafka/consumer.rb +653 -0
  61. data/lib/rdkafka/error.rb +101 -0
  62. data/lib/rdkafka/helpers/oauth.rb +58 -0
  63. data/lib/rdkafka/helpers/time.rb +14 -0
  64. data/lib/rdkafka/metadata.rb +115 -0
  65. data/lib/rdkafka/native_kafka.rb +139 -0
  66. data/lib/rdkafka/producer/delivery_handle.rb +40 -0
  67. data/lib/rdkafka/producer/delivery_report.rb +46 -0
  68. data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
  69. data/lib/rdkafka/producer.rb +430 -0
  70. data/lib/rdkafka/version.rb +7 -0
  71. data/lib/rdkafka.rb +54 -0
  72. data/rdkafka.gemspec +65 -0
  73. data/renovate.json +92 -0
  74. data/spec/rdkafka/abstract_handle_spec.rb +117 -0
  75. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  76. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  77. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  78. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  79. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  80. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  81. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  82. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  83. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  84. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  85. data/spec/rdkafka/admin_spec.rb +770 -0
  86. data/spec/rdkafka/bindings_spec.rb +223 -0
  87. data/spec/rdkafka/callbacks_spec.rb +20 -0
  88. data/spec/rdkafka/config_spec.rb +258 -0
  89. data/spec/rdkafka/consumer/headers_spec.rb +73 -0
  90. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  91. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  92. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
  93. data/spec/rdkafka/consumer_spec.rb +1274 -0
  94. data/spec/rdkafka/error_spec.rb +89 -0
  95. data/spec/rdkafka/metadata_spec.rb +79 -0
  96. data/spec/rdkafka/native_kafka_spec.rb +130 -0
  97. data/spec/rdkafka/producer/delivery_handle_spec.rb +45 -0
  98. data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
  99. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
  100. data/spec/rdkafka/producer_spec.rb +1052 -0
  101. data/spec/spec_helper.rb +195 -0
  102. metadata +276 -0
@@ -0,0 +1,653 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
5
+ # brokers automatically assign partitions and load balance partitions over consumers that
6
+ # have the same `:"group.id"` set in their configuration.
7
+ #
8
+ # To create a consumer set up a {Config} and call {Config#consumer consumer} on that. It is
9
+ # mandatory to set `:"group.id"` in the configuration.
10
+ #
11
+ # Consumer implements `Enumerable`, so you can use `each` to consume messages, or for example
12
+ # `each_slice` to consume batches of messages.
13
+ class Consumer
14
+ include Enumerable
15
+ include Helpers::Time
16
+ include Helpers::OAuth
17
+
18
+ # @private
19
+ def initialize(native_kafka)
20
+ @native_kafka = native_kafka
21
+ end
22
+
23
+ # Starts the native Kafka polling thread and kicks off the init polling
24
+ # @note Not needed to run unless explicit start was disabled
25
+ def start
26
+ @native_kafka.start
27
+ end
28
+
29
+ # @return [String] consumer name
30
+ def name
31
+ @name ||= @native_kafka.with_inner do |inner|
32
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
33
+ end
34
+ end
35
+
36
+ def finalizer
37
+ ->(_) { close }
38
+ end
39
+
40
+ # Close this consumer
41
+ # @return [nil]
42
+ def close
43
+ return if closed?
44
+ ObjectSpace.undefine_finalizer(self)
45
+
46
+ @native_kafka.synchronize do |inner|
47
+ Rdkafka::Bindings.rd_kafka_consumer_close(inner)
48
+ end
49
+
50
+ @native_kafka.close
51
+ end
52
+
53
+ # Whether this consumer has closed
54
+ def closed?
55
+ @native_kafka.closed?
56
+ end
57
+
58
+ # Subscribes to one or more topics letting Kafka handle partition assignments.
59
+ #
60
+ # @param topics [Array<String>] One or more topic names
61
+ # @return [nil]
62
+ # @raise [RdkafkaError] When subscribing fails
63
+ def subscribe(*topics)
64
+ closed_consumer_check(__method__)
65
+
66
+ # Create topic partition list with topics and no partition set
67
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
68
+
69
+ topics.each do |topic|
70
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, -1)
71
+ end
72
+
73
+ # Subscribe to topic partition list and check this was successful
74
+ response = @native_kafka.with_inner do |inner|
75
+ Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
76
+ end
77
+ if response != 0
78
+ raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
79
+ end
80
+ ensure
81
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
82
+ end
83
+
84
+ # Unsubscribe from all subscribed topics.
85
+ #
86
+ # @return [nil]
87
+ # @raise [RdkafkaError] When unsubscribing fails
88
+ def unsubscribe
89
+ closed_consumer_check(__method__)
90
+
91
+ response = @native_kafka.with_inner do |inner|
92
+ Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
93
+ end
94
+ if response != 0
95
+ raise Rdkafka::RdkafkaError.new(response)
96
+ end
97
+ end
98
+
99
+ # Pause producing or consumption for the provided list of partitions
100
+ #
101
+ # @param list [TopicPartitionList] The topic with partitions to pause
102
+ # @return [nil]
103
+ # @raise [RdkafkaTopicPartitionListError] When pausing subscription fails.
104
+ def pause(list)
105
+ closed_consumer_check(__method__)
106
+
107
+ unless list.is_a?(TopicPartitionList)
108
+ raise TypeError.new("list has to be a TopicPartitionList")
109
+ end
110
+
111
+ tpl = list.to_native_tpl
112
+
113
+ begin
114
+ response = @native_kafka.with_inner do |inner|
115
+ Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
116
+ end
117
+
118
+ if response != 0
119
+ list = TopicPartitionList.from_native_tpl(tpl)
120
+ raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
121
+ end
122
+ ensure
123
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
124
+ end
125
+ end
126
+
127
+ # Resumes producing consumption for the provided list of partitions
128
+ #
129
+ # @param list [TopicPartitionList] The topic with partitions to pause
130
+ # @return [nil]
131
+ # @raise [RdkafkaError] When resume subscription fails.
132
+ def resume(list)
133
+ closed_consumer_check(__method__)
134
+
135
+ unless list.is_a?(TopicPartitionList)
136
+ raise TypeError.new("list has to be a TopicPartitionList")
137
+ end
138
+
139
+ tpl = list.to_native_tpl
140
+
141
+ begin
142
+ response = @native_kafka.with_inner do |inner|
143
+ Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
144
+ end
145
+ if response != 0
146
+ raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
147
+ end
148
+ ensure
149
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
150
+ end
151
+ end
152
+
153
+ # Returns the current subscription to topics and partitions
154
+ #
155
+ # @return [TopicPartitionList]
156
+ # @raise [RdkafkaError] When getting the subscription fails.
157
+ def subscription
158
+ closed_consumer_check(__method__)
159
+
160
+ ptr = FFI::MemoryPointer.new(:pointer)
161
+ response = @native_kafka.with_inner do |inner|
162
+ Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
163
+ end
164
+
165
+ if response != 0
166
+ raise Rdkafka::RdkafkaError.new(response)
167
+ end
168
+
169
+ native = ptr.read_pointer
170
+
171
+ begin
172
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(native)
173
+ ensure
174
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native)
175
+ end
176
+ end
177
+
178
+ # Atomic assignment of partitions to consume
179
+ #
180
+ # @param list [TopicPartitionList] The topic with partitions to assign
181
+ # @raise [RdkafkaError] When assigning fails
182
+ def assign(list)
183
+ closed_consumer_check(__method__)
184
+
185
+ unless list.is_a?(TopicPartitionList)
186
+ raise TypeError.new("list has to be a TopicPartitionList")
187
+ end
188
+
189
+ tpl = list.to_native_tpl
190
+
191
+ begin
192
+ response = @native_kafka.with_inner do |inner|
193
+ Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
194
+ end
195
+ if response != 0
196
+ raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
197
+ end
198
+ ensure
199
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
200
+ end
201
+ end
202
+
203
+ # Returns the current partition assignment.
204
+ #
205
+ # @return [TopicPartitionList]
206
+ # @raise [RdkafkaError] When getting the assignment fails.
207
+ def assignment
208
+ closed_consumer_check(__method__)
209
+
210
+ ptr = FFI::MemoryPointer.new(:pointer)
211
+ response = @native_kafka.with_inner do |inner|
212
+ Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
213
+ end
214
+ if response != 0
215
+ raise Rdkafka::RdkafkaError.new(response)
216
+ end
217
+
218
+ tpl = ptr.read_pointer
219
+
220
+ if !tpl.null?
221
+ begin
222
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
223
+ ensure
224
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
225
+ end
226
+ end
227
+ ensure
228
+ ptr.free unless ptr.nil?
229
+ end
230
+
231
+ # @return [Boolean] true if our current assignment has been lost involuntarily.
232
+ def assignment_lost?
233
+ closed_consumer_check(__method__)
234
+
235
+ @native_kafka.with_inner do |inner|
236
+ !Rdkafka::Bindings.rd_kafka_assignment_lost(inner).zero?
237
+ end
238
+ end
239
+
240
+ # Return the current committed offset per partition for this consumer group.
241
+ # The offset field of each requested partition will either be set to stored offset or to -1001
242
+ # in case there was no stored offset for that partition.
243
+ #
244
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil
245
+ # to use the current subscription.
246
+ # @param timeout_ms [Integer] The timeout for fetching this information.
247
+ # @return [TopicPartitionList]
248
+ # @raise [RdkafkaError] When getting the committed positions fails.
249
+ def committed(list=nil, timeout_ms=2000)
250
+ closed_consumer_check(__method__)
251
+
252
+ if list.nil?
253
+ list = assignment
254
+ elsif !list.is_a?(TopicPartitionList)
255
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
256
+ end
257
+
258
+ tpl = list.to_native_tpl
259
+
260
+ begin
261
+ response = @native_kafka.with_inner do |inner|
262
+ Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
263
+ end
264
+ if response != 0
265
+ raise Rdkafka::RdkafkaError.new(response)
266
+ end
267
+ TopicPartitionList.from_native_tpl(tpl)
268
+ ensure
269
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
270
+ end
271
+ end
272
+
273
+ # Return the current positions (offsets) for topics and partitions.
274
+ # The offset field of each requested partition will be set to the offset of the last consumed message + 1, or nil in case there was no previous message.
275
+ #
276
+ # @param list [TopicPartitionList, nil] The topic with partitions to get the offsets for or nil to use the current subscription.
277
+ #
278
+ # @return [TopicPartitionList]
279
+ #
280
+ # @raise [RdkafkaError] When getting the positions fails.
281
+ def position(list=nil)
282
+ if list.nil?
283
+ list = assignment
284
+ elsif !list.is_a?(TopicPartitionList)
285
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
286
+ end
287
+
288
+ tpl = list.to_native_tpl
289
+
290
+ response = @native_kafka.with_inner do |inner|
291
+ Rdkafka::Bindings.rd_kafka_position(inner, tpl)
292
+ end
293
+
294
+ if response != 0
295
+ raise Rdkafka::RdkafkaError.new(response)
296
+ end
297
+
298
+ TopicPartitionList.from_native_tpl(tpl)
299
+ end
300
+
301
+ # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
302
+ #
303
+ # @param topic [String] The topic to query
304
+ # @param partition [Integer] The partition to query
305
+ # @param timeout_ms [Integer] The timeout for querying the broker
306
+ # @return [Integer] The low and high watermark
307
+ # @raise [RdkafkaError] When querying the broker fails.
308
+ def query_watermark_offsets(topic, partition, timeout_ms=1000)
309
+ closed_consumer_check(__method__)
310
+
311
+ low = FFI::MemoryPointer.new(:int64, 1)
312
+ high = FFI::MemoryPointer.new(:int64, 1)
313
+
314
+ response = @native_kafka.with_inner do |inner|
315
+ Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
316
+ inner,
317
+ topic,
318
+ partition,
319
+ low,
320
+ high,
321
+ timeout_ms,
322
+ )
323
+ end
324
+ if response != 0
325
+ raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
326
+ end
327
+
328
+ return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
329
+ ensure
330
+ low.free unless low.nil?
331
+ high.free unless high.nil?
332
+ end
333
+
334
+ # Calculate the consumer lag per partition for the provided topic partition list.
335
+ # You can get a suitable list by calling {committed} or {position} (TODO). It is also
336
+ # possible to create one yourself, in this case you have to provide a list that
337
+ # already contains all the partitions you need the lag for.
338
+ #
339
+ # @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
340
+ # @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
341
+ # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag
342
+ # per partition
343
+ # @raise [RdkafkaError] When querying the broker fails.
344
+ def lag(topic_partition_list, watermark_timeout_ms=1000)
345
+ out = {}
346
+
347
+ topic_partition_list.to_h.each do |topic, partitions|
348
+ # Query high watermarks for this topic's partitions
349
+ # and compare to the offset in the list.
350
+ topic_out = {}
351
+ partitions.each do |p|
352
+ next if p.offset.nil?
353
+ low, high = query_watermark_offsets(
354
+ topic,
355
+ p.partition,
356
+ watermark_timeout_ms
357
+ )
358
+ topic_out[p.partition] = high - p.offset
359
+ end
360
+ out[topic] = topic_out
361
+ end
362
+ out
363
+ end
364
+
365
+ # Returns the ClusterId as reported in broker metadata.
366
+ #
367
+ # @return [String, nil]
368
+ def cluster_id
369
+ closed_consumer_check(__method__)
370
+ @native_kafka.with_inner do |inner|
371
+ Rdkafka::Bindings.rd_kafka_clusterid(inner)
372
+ end
373
+ end
374
+
375
+ # Returns this client's broker-assigned group member id
376
+ #
377
+ # This currently requires the high-level KafkaConsumer
378
+ #
379
+ # @return [String, nil]
380
+ def member_id
381
+ closed_consumer_check(__method__)
382
+ @native_kafka.with_inner do |inner|
383
+ Rdkafka::Bindings.rd_kafka_memberid(inner)
384
+ end
385
+ end
386
+
387
+ # Store offset of a message to be used in the next commit of this consumer
388
+ #
389
+ # When using this `enable.auto.offset.store` should be set to `false` in the config.
390
+ #
391
+ # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
392
+ # @return [nil]
393
+ # @raise [RdkafkaError] When storing the offset fails
394
+ def store_offset(message)
395
+ closed_consumer_check(__method__)
396
+
397
+ list = TopicPartitionList.new
398
+ list.add_topic_and_partitions_with_offsets(
399
+ message.topic,
400
+ message.partition => message.offset + 1
401
+ )
402
+
403
+ tpl = list.to_native_tpl
404
+
405
+ response = @native_kafka.with_inner do |inner|
406
+ Rdkafka::Bindings.rd_kafka_offsets_store(
407
+ inner,
408
+ tpl
409
+ )
410
+ end
411
+
412
+ if response != 0
413
+ raise Rdkafka::RdkafkaError.new(response)
414
+ end
415
+ ensure
416
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
417
+ end
418
+
419
+ # Seek to a particular message. The next poll on the topic/partition will return the
420
+ # message at the given offset.
421
+ #
422
+ # @param message [Rdkafka::Consumer::Message] The message to which to seek
423
+ # @return [nil]
424
+ # @raise [RdkafkaError] When seeking fails
425
+ def seek(message)
426
+ seek_by(message.topic, message.partition, message.offset)
427
+ end
428
+
429
+ # Seek to a particular message by providing the topic, partition and offset.
430
+ # The next poll on the topic/partition will return the
431
+ # message at the given offset.
432
+ #
433
+ # @param topic [String] The topic in which to seek
434
+ # @param partition [Integer] The partition number to seek
435
+ # @param offset [Integer] The partition offset to seek
436
+ # @return [nil]
437
+ # @raise [RdkafkaError] When seeking fails
438
+ def seek_by(topic, partition, offset)
439
+ closed_consumer_check(__method__)
440
+
441
+ # rd_kafka_offset_store is one of the few calls that does not support
442
+ # a string as the topic, so create a native topic for it.
443
+ native_topic = @native_kafka.with_inner do |inner|
444
+ Rdkafka::Bindings.rd_kafka_topic_new(
445
+ inner,
446
+ topic,
447
+ nil
448
+ )
449
+ end
450
+ response = Rdkafka::Bindings.rd_kafka_seek(
451
+ native_topic,
452
+ partition,
453
+ offset,
454
+ 0 # timeout
455
+ )
456
+ if response != 0
457
+ raise Rdkafka::RdkafkaError.new(response)
458
+ end
459
+ ensure
460
+ if native_topic && !native_topic.null?
461
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
462
+ end
463
+ end
464
+
465
+ # Lookup offset for the given partitions by timestamp.
466
+ #
467
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
468
+ #
469
+ # @return [TopicPartitionList]
470
+ #
471
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
472
+ def offsets_for_times(list, timeout_ms = 1000)
473
+ closed_consumer_check(__method__)
474
+
475
+ if !list.is_a?(TopicPartitionList)
476
+ raise TypeError.new("list has to be a TopicPartitionList")
477
+ end
478
+
479
+ tpl = list.to_native_tpl
480
+
481
+ response = @native_kafka.with_inner do |inner|
482
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
483
+ inner,
484
+ tpl,
485
+ timeout_ms # timeout
486
+ )
487
+ end
488
+
489
+ if response != 0
490
+ raise Rdkafka::RdkafkaError.new(response)
491
+ end
492
+
493
+ TopicPartitionList.from_native_tpl(tpl)
494
+ ensure
495
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
496
+ end
497
+
498
+ # Manually commit the current offsets of this consumer.
499
+ #
500
+ # To use this set `enable.auto.commit`to `false` to disable automatic triggering
501
+ # of commits.
502
+ #
503
+ # If `enable.auto.offset.store` is set to `true` the offset of the last consumed
504
+ # message for every partition is used. If set to `false` you can use {store_offset} to
505
+ # indicate when a message has been fully processed.
506
+ #
507
+ # @param list [TopicPartitionList,nil] The topic with partitions to commit
508
+ # @param async [Boolean] Whether to commit async or wait for the commit to finish
509
+ # @return [nil]
510
+ # @raise [RdkafkaError] When committing fails
511
+ def commit(list=nil, async=false)
512
+ closed_consumer_check(__method__)
513
+
514
+ if !list.nil? && !list.is_a?(TopicPartitionList)
515
+ raise TypeError.new("list has to be nil or a TopicPartitionList")
516
+ end
517
+
518
+ tpl = list ? list.to_native_tpl : nil
519
+
520
+ begin
521
+ response = @native_kafka.with_inner do |inner|
522
+ Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
523
+ end
524
+ if response != 0
525
+ raise Rdkafka::RdkafkaError.new(response)
526
+ end
527
+ ensure
528
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
529
+ end
530
+ end
531
+
532
+ # Poll for the next message on one of the subscribed topics
533
+ #
534
+ # @param timeout_ms [Integer] Timeout of this poll
535
+ # @return [Message, nil] A message or nil if there was no new message within the timeout
536
+ # @raise [RdkafkaError] When polling fails
537
+ def poll(timeout_ms)
538
+ closed_consumer_check(__method__)
539
+
540
+ message_ptr = @native_kafka.with_inner do |inner|
541
+ Rdkafka::Bindings.rd_kafka_consumer_poll(inner, timeout_ms)
542
+ end
543
+ if message_ptr.null?
544
+ nil
545
+ else
546
+ # Create struct wrapper
547
+ native_message = Rdkafka::Bindings::Message.new(message_ptr)
548
+ # Raise error if needed
549
+ if native_message[:err] != 0
550
+ raise Rdkafka::RdkafkaError.new(native_message[:err])
551
+ end
552
+ # Create a message to pass out
553
+ Rdkafka::Consumer::Message.new(native_message)
554
+ end
555
+ ensure
556
+ # Clean up rdkafka message if there is one
557
+ if message_ptr && !message_ptr.null?
558
+ Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
559
+ end
560
+ end
561
+
562
+ # Polls the main rdkafka queue (not the consumer one). Do **NOT** use it if `consumer_poll_set`
563
+ # was set to `true`.
564
+ #
565
+ # Events will cause application-provided callbacks to be called.
566
+ #
567
+ # Events (in the context of the consumer):
568
+ # - error callbacks
569
+ # - stats callbacks
570
+ # - any other callbacks supported by librdkafka that are not part of the consumer_poll, that
571
+ # would have a callback configured and activated.
572
+ #
573
+ # This method needs to be called at regular intervals to serve any queued callbacks waiting to
574
+ # be called. When in use, does **NOT** replace `#poll` but needs to run complementary with it.
575
+ #
576
+ # @param timeout_ms [Integer] poll timeout. If set to 0 will run async, when set to -1 will
577
+ # block until any events available.
578
+ #
579
+ # @note This method technically should be called `#poll` and the current `#poll` should be
580
+ # called `#consumer_poll` though we keep the current naming convention to make it backward
581
+ # compatible.
582
+ def events_poll(timeout_ms = 0)
583
+ @native_kafka.with_inner do |inner|
584
+ Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
585
+ end
586
+ end
587
+
588
+ # Poll for new messages and yield for each received one. Iteration
589
+ # will end when the consumer is closed.
590
+ #
591
+ # If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
592
+ # reached, so you probably want to disable that when using this method of iteration.
593
+ #
594
+ # @yieldparam message [Message] Received message
595
+ # @return [nil]
596
+ # @raise [RdkafkaError] When polling fails
597
+ def each
598
+ loop do
599
+ message = poll(250)
600
+ if message
601
+ yield(message)
602
+ else
603
+ if closed?
604
+ break
605
+ else
606
+ next
607
+ end
608
+ end
609
+ end
610
+ end
611
+
612
+ # Deprecated. Please read the error message for more details.
613
+ def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
614
+ raise NotImplementedError, <<~ERROR
615
+ `each_batch` has been removed due to data consistency concerns.
616
+
617
+ This method was removed because it did not properly handle partition reassignments,
618
+ which could lead to processing messages from partitions that were no longer owned
619
+ by this consumer, resulting in duplicate message processing and data inconsistencies.
620
+
621
+ Recommended alternatives:
622
+
623
+ 1. Implement your own batching logic using rebalance callbacks to properly handle
624
+ partition revocations and ensure message processing correctness.
625
+
626
+ 2. Use a high-level batching library that supports proper partition reassignment
627
+ handling out of the box (such as the Karafka framework).
628
+ ERROR
629
+ end
630
+
631
+ # Returns pointer to the consumer group metadata. It is used only in the context of
632
+ # exactly-once-semantics in transactions, this is why it is never remapped to Ruby
633
+ #
634
+ # This API is **not** usable by itself from Ruby
635
+ #
636
+ # @note This pointer **needs** to be removed with `#rd_kafka_consumer_group_metadata_destroy`
637
+ #
638
+ # @private
639
+ def consumer_group_metadata_pointer
640
+ closed_consumer_check(__method__)
641
+
642
+ @native_kafka.with_inner do |inner|
643
+ Bindings.rd_kafka_consumer_group_metadata(inner)
644
+ end
645
+ end
646
+
647
+ private
648
+
649
+ def closed_consumer_check(method)
650
+ raise Rdkafka::ClosedConsumerError.new(method) if closed?
651
+ end
652
+ end
653
+ end