rdkafka 0.24.2 → 0.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/Gemfile +9 -0
- data/README.md +2 -1
- data/dist/{librdkafka-2.11.1.tar.gz → librdkafka-2.12.1.tar.gz} +0 -0
- data/docker-compose-ssl.yml +1 -1
- data/docker-compose.yml +1 -1
- data/lib/rdkafka/abstract_handle.rb +23 -5
- data/lib/rdkafka/admin/acl_binding_result.rb +1 -1
- data/lib/rdkafka/admin/config_resource_binding_result.rb +1 -0
- data/lib/rdkafka/admin/create_acl_handle.rb +3 -0
- data/lib/rdkafka/admin/create_acl_report.rb +3 -0
- data/lib/rdkafka/admin/create_partitions_handle.rb +3 -0
- data/lib/rdkafka/admin/create_partitions_report.rb +1 -0
- data/lib/rdkafka/admin/create_topic_handle.rb +3 -0
- data/lib/rdkafka/admin/create_topic_report.rb +3 -0
- data/lib/rdkafka/admin/delete_acl_handle.rb +3 -0
- data/lib/rdkafka/admin/delete_acl_report.rb +3 -0
- data/lib/rdkafka/admin/delete_groups_handle.rb +5 -0
- data/lib/rdkafka/admin/delete_groups_report.rb +3 -0
- data/lib/rdkafka/admin/delete_topic_handle.rb +3 -0
- data/lib/rdkafka/admin/delete_topic_report.rb +3 -0
- data/lib/rdkafka/admin/describe_acl_handle.rb +3 -0
- data/lib/rdkafka/admin/describe_acl_report.rb +3 -0
- data/lib/rdkafka/admin/describe_configs_handle.rb +3 -0
- data/lib/rdkafka/admin/describe_configs_report.rb +6 -0
- data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +3 -0
- data/lib/rdkafka/admin/incremental_alter_configs_report.rb +6 -0
- data/lib/rdkafka/admin.rb +108 -112
- data/lib/rdkafka/bindings.rb +62 -29
- data/lib/rdkafka/callbacks.rb +71 -11
- data/lib/rdkafka/config.rb +20 -8
- data/lib/rdkafka/consumer/headers.rb +3 -2
- data/lib/rdkafka/consumer/message.rb +7 -3
- data/lib/rdkafka/consumer/partition.rb +6 -2
- data/lib/rdkafka/consumer/topic_partition_list.rb +8 -8
- data/lib/rdkafka/consumer.rb +40 -28
- data/lib/rdkafka/defaults.rb +106 -0
- data/lib/rdkafka/error.rb +16 -1
- data/lib/rdkafka/helpers/oauth.rb +11 -5
- data/lib/rdkafka/metadata.rb +29 -5
- data/lib/rdkafka/native_kafka.rb +26 -2
- data/lib/rdkafka/producer/delivery_report.rb +6 -2
- data/lib/rdkafka/producer/partitions_count_cache.rb +24 -14
- data/lib/rdkafka/producer.rb +49 -23
- data/lib/rdkafka/version.rb +6 -3
- data/lib/rdkafka.rb +1 -0
- data/rdkafka.gemspec +0 -7
- data/renovate.json +1 -8
- metadata +4 -87
data/lib/rdkafka/consumer.rb
CHANGED
|
@@ -16,6 +16,7 @@ module Rdkafka
|
|
|
16
16
|
include Helpers::OAuth
|
|
17
17
|
|
|
18
18
|
# @private
|
|
19
|
+
# @param native_kafka [NativeKafka] wrapper around the native Kafka consumer handle
|
|
19
20
|
def initialize(native_kafka)
|
|
20
21
|
@native_kafka = native_kafka
|
|
21
22
|
end
|
|
@@ -33,6 +34,8 @@ module Rdkafka
|
|
|
33
34
|
end
|
|
34
35
|
end
|
|
35
36
|
|
|
37
|
+
# @return [Proc] finalizer proc for closing the consumer
|
|
38
|
+
# @private
|
|
36
39
|
def finalizer
|
|
37
40
|
->(_) { close }
|
|
38
41
|
end
|
|
@@ -67,14 +70,14 @@ module Rdkafka
|
|
|
67
70
|
tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
|
|
68
71
|
|
|
69
72
|
topics.each do |topic|
|
|
70
|
-
Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic,
|
|
73
|
+
Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, Rdkafka::Bindings::RD_KAFKA_PARTITION_UA)
|
|
71
74
|
end
|
|
72
75
|
|
|
73
76
|
# Subscribe to topic partition list and check this was successful
|
|
74
77
|
response = @native_kafka.with_inner do |inner|
|
|
75
78
|
Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
|
|
76
79
|
end
|
|
77
|
-
if response !=
|
|
80
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
78
81
|
raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
|
|
79
82
|
end
|
|
80
83
|
ensure
|
|
@@ -91,7 +94,7 @@ module Rdkafka
|
|
|
91
94
|
response = @native_kafka.with_inner do |inner|
|
|
92
95
|
Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
|
|
93
96
|
end
|
|
94
|
-
if response !=
|
|
97
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
95
98
|
raise Rdkafka::RdkafkaError.new(response)
|
|
96
99
|
end
|
|
97
100
|
end
|
|
@@ -115,7 +118,7 @@ module Rdkafka
|
|
|
115
118
|
Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
|
|
116
119
|
end
|
|
117
120
|
|
|
118
|
-
if response !=
|
|
121
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
119
122
|
list = TopicPartitionList.from_native_tpl(tpl)
|
|
120
123
|
raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
|
|
121
124
|
end
|
|
@@ -142,7 +145,7 @@ module Rdkafka
|
|
|
142
145
|
response = @native_kafka.with_inner do |inner|
|
|
143
146
|
Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
|
|
144
147
|
end
|
|
145
|
-
if response !=
|
|
148
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
146
149
|
raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
|
|
147
150
|
end
|
|
148
151
|
ensure
|
|
@@ -162,7 +165,7 @@ module Rdkafka
|
|
|
162
165
|
Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
|
|
163
166
|
end
|
|
164
167
|
|
|
165
|
-
if response !=
|
|
168
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
166
169
|
raise Rdkafka::RdkafkaError.new(response)
|
|
167
170
|
end
|
|
168
171
|
|
|
@@ -192,7 +195,7 @@ module Rdkafka
|
|
|
192
195
|
response = @native_kafka.with_inner do |inner|
|
|
193
196
|
Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
|
|
194
197
|
end
|
|
195
|
-
if response !=
|
|
198
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
196
199
|
raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
|
|
197
200
|
end
|
|
198
201
|
ensure
|
|
@@ -211,7 +214,7 @@ module Rdkafka
|
|
|
211
214
|
response = @native_kafka.with_inner do |inner|
|
|
212
215
|
Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
|
|
213
216
|
end
|
|
214
|
-
if response !=
|
|
217
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
215
218
|
raise Rdkafka::RdkafkaError.new(response)
|
|
216
219
|
end
|
|
217
220
|
|
|
@@ -246,7 +249,7 @@ module Rdkafka
|
|
|
246
249
|
# @param timeout_ms [Integer] The timeout for fetching this information.
|
|
247
250
|
# @return [TopicPartitionList]
|
|
248
251
|
# @raise [RdkafkaError] When getting the committed positions fails.
|
|
249
|
-
def committed(list=nil, timeout_ms=
|
|
252
|
+
def committed(list = nil, timeout_ms = Defaults::CONSUMER_COMMITTED_TIMEOUT_MS)
|
|
250
253
|
closed_consumer_check(__method__)
|
|
251
254
|
|
|
252
255
|
if list.nil?
|
|
@@ -261,7 +264,7 @@ module Rdkafka
|
|
|
261
264
|
response = @native_kafka.with_inner do |inner|
|
|
262
265
|
Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
|
|
263
266
|
end
|
|
264
|
-
if response !=
|
|
267
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
265
268
|
raise Rdkafka::RdkafkaError.new(response)
|
|
266
269
|
end
|
|
267
270
|
TopicPartitionList.from_native_tpl(tpl)
|
|
@@ -291,7 +294,7 @@ module Rdkafka
|
|
|
291
294
|
Rdkafka::Bindings.rd_kafka_position(inner, tpl)
|
|
292
295
|
end
|
|
293
296
|
|
|
294
|
-
if response !=
|
|
297
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
295
298
|
raise Rdkafka::RdkafkaError.new(response)
|
|
296
299
|
end
|
|
297
300
|
|
|
@@ -305,7 +308,7 @@ module Rdkafka
|
|
|
305
308
|
# @param timeout_ms [Integer] The timeout for querying the broker
|
|
306
309
|
# @return [Integer] The low and high watermark
|
|
307
310
|
# @raise [RdkafkaError] When querying the broker fails.
|
|
308
|
-
def query_watermark_offsets(topic, partition, timeout_ms=
|
|
311
|
+
def query_watermark_offsets(topic, partition, timeout_ms = Defaults::CONSUMER_QUERY_WATERMARK_TIMEOUT_MS)
|
|
309
312
|
closed_consumer_check(__method__)
|
|
310
313
|
|
|
311
314
|
low = FFI::MemoryPointer.new(:int64, 1)
|
|
@@ -321,7 +324,7 @@ module Rdkafka
|
|
|
321
324
|
timeout_ms,
|
|
322
325
|
)
|
|
323
326
|
end
|
|
324
|
-
if response !=
|
|
327
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
325
328
|
raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
|
|
326
329
|
end
|
|
327
330
|
|
|
@@ -338,10 +341,10 @@ module Rdkafka
|
|
|
338
341
|
#
|
|
339
342
|
# @param topic_partition_list [TopicPartitionList] The list to calculate lag for.
|
|
340
343
|
# @param watermark_timeout_ms [Integer] The timeout for each query watermark call.
|
|
341
|
-
# @return [Hash
|
|
344
|
+
# @return [Hash{String => Hash{Integer => Integer}}] A hash containing all topics with the lag
|
|
342
345
|
# per partition
|
|
343
346
|
# @raise [RdkafkaError] When querying the broker fails.
|
|
344
|
-
def lag(topic_partition_list, watermark_timeout_ms=
|
|
347
|
+
def lag(topic_partition_list, watermark_timeout_ms = Defaults::CONSUMER_LAG_TIMEOUT_MS)
|
|
345
348
|
out = {}
|
|
346
349
|
|
|
347
350
|
topic_partition_list.to_h.each do |topic, partitions|
|
|
@@ -409,7 +412,7 @@ module Rdkafka
|
|
|
409
412
|
)
|
|
410
413
|
end
|
|
411
414
|
|
|
412
|
-
if response !=
|
|
415
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
413
416
|
raise Rdkafka::RdkafkaError.new(response)
|
|
414
417
|
end
|
|
415
418
|
ensure
|
|
@@ -451,9 +454,9 @@ module Rdkafka
|
|
|
451
454
|
native_topic,
|
|
452
455
|
partition,
|
|
453
456
|
offset,
|
|
454
|
-
|
|
457
|
+
Defaults::CONSUMER_SEEK_TIMEOUT_MS
|
|
455
458
|
)
|
|
456
|
-
if response !=
|
|
459
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
457
460
|
raise Rdkafka::RdkafkaError.new(response)
|
|
458
461
|
end
|
|
459
462
|
ensure
|
|
@@ -465,11 +468,10 @@ module Rdkafka
|
|
|
465
468
|
# Lookup offset for the given partitions by timestamp.
|
|
466
469
|
#
|
|
467
470
|
# @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
|
|
468
|
-
#
|
|
471
|
+
# @param timeout_ms [Integer] timeout in milliseconds for the operation
|
|
469
472
|
# @return [TopicPartitionList]
|
|
470
|
-
#
|
|
471
473
|
# @raise [RdKafkaError] When the OffsetForTimes lookup fails
|
|
472
|
-
def offsets_for_times(list, timeout_ms =
|
|
474
|
+
def offsets_for_times(list, timeout_ms = Defaults::CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS)
|
|
473
475
|
closed_consumer_check(__method__)
|
|
474
476
|
|
|
475
477
|
if !list.is_a?(TopicPartitionList)
|
|
@@ -486,7 +488,7 @@ module Rdkafka
|
|
|
486
488
|
)
|
|
487
489
|
end
|
|
488
490
|
|
|
489
|
-
if response !=
|
|
491
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
490
492
|
raise Rdkafka::RdkafkaError.new(response)
|
|
491
493
|
end
|
|
492
494
|
|
|
@@ -521,7 +523,7 @@ module Rdkafka
|
|
|
521
523
|
response = @native_kafka.with_inner do |inner|
|
|
522
524
|
Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
|
|
523
525
|
end
|
|
524
|
-
if response !=
|
|
526
|
+
if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
525
527
|
raise Rdkafka::RdkafkaError.new(response)
|
|
526
528
|
end
|
|
527
529
|
ensure
|
|
@@ -546,7 +548,7 @@ module Rdkafka
|
|
|
546
548
|
# Create struct wrapper
|
|
547
549
|
native_message = Rdkafka::Bindings::Message.new(message_ptr)
|
|
548
550
|
# Raise error if needed
|
|
549
|
-
if native_message[:err] !=
|
|
551
|
+
if native_message[:err] != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
|
|
550
552
|
raise Rdkafka::RdkafkaError.new(native_message[:err])
|
|
551
553
|
end
|
|
552
554
|
# Create a message to pass out
|
|
@@ -579,7 +581,7 @@ module Rdkafka
|
|
|
579
581
|
# @note This method technically should be called `#poll` and the current `#poll` should be
|
|
580
582
|
# called `#consumer_poll` though we keep the current naming convention to make it backward
|
|
581
583
|
# compatible.
|
|
582
|
-
def events_poll(timeout_ms =
|
|
584
|
+
def events_poll(timeout_ms = Defaults::CONSUMER_EVENTS_POLL_TIMEOUT_MS)
|
|
583
585
|
@native_kafka.with_inner do |inner|
|
|
584
586
|
Rdkafka::Bindings.rd_kafka_poll(inner, timeout_ms)
|
|
585
587
|
end
|
|
@@ -591,12 +593,13 @@ module Rdkafka
|
|
|
591
593
|
# If `enable.partition.eof` is turned on in the config this will raise an error when an eof is
|
|
592
594
|
# reached, so you probably want to disable that when using this method of iteration.
|
|
593
595
|
#
|
|
596
|
+
# @param timeout_ms [Integer] Timeout for each poll iteration
|
|
594
597
|
# @yieldparam message [Message] Received message
|
|
595
598
|
# @return [nil]
|
|
596
599
|
# @raise [RdkafkaError] When polling fails
|
|
597
|
-
def each
|
|
600
|
+
def each(timeout_ms: Defaults::CONSUMER_POLL_TIMEOUT_MS)
|
|
598
601
|
loop do
|
|
599
|
-
message = poll(
|
|
602
|
+
message = poll(timeout_ms)
|
|
600
603
|
if message
|
|
601
604
|
yield(message)
|
|
602
605
|
else
|
|
@@ -609,7 +612,13 @@ module Rdkafka
|
|
|
609
612
|
end
|
|
610
613
|
end
|
|
611
614
|
|
|
612
|
-
#
|
|
615
|
+
# @deprecated This method has been removed due to data consistency concerns
|
|
616
|
+
# @param max_items [Integer] unused
|
|
617
|
+
# @param bytes_threshold [Numeric] unused
|
|
618
|
+
# @param timeout_ms [Integer] unused
|
|
619
|
+
# @param yield_on_error [Boolean] unused
|
|
620
|
+
# @param block [Proc] unused block
|
|
621
|
+
# @raise [NotImplementedError] Always raises as this method is no longer supported
|
|
613
622
|
def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
|
|
614
623
|
raise NotImplementedError, <<~ERROR
|
|
615
624
|
`each_batch` has been removed due to data consistency concerns.
|
|
@@ -646,6 +655,9 @@ module Rdkafka
|
|
|
646
655
|
|
|
647
656
|
private
|
|
648
657
|
|
|
658
|
+
# Checks if the consumer is closed and raises an error if so
|
|
659
|
+
# @param method [Symbol] name of the calling method for error context
|
|
660
|
+
# @raise [ClosedConsumerError] when the consumer is closed
|
|
649
661
|
def closed_consumer_check(method)
|
|
650
662
|
raise Rdkafka::ClosedConsumerError.new(method) if closed?
|
|
651
663
|
end
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Rdkafka
|
|
4
|
+
# Default timeout and timing values used throughout rdkafka-ruby.
|
|
5
|
+
#
|
|
6
|
+
# All timeout values can be overridden per-call via method parameters.
|
|
7
|
+
# These constants provide a central place to understand and reference
|
|
8
|
+
# the default values used across the library.
|
|
9
|
+
#
|
|
10
|
+
# @note These are rdkafka-ruby defaults, not librdkafka configuration options.
|
|
11
|
+
# For librdkafka options, see:
|
|
12
|
+
# https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md
|
|
13
|
+
#
|
|
14
|
+
# @example Overriding a timeout per-call
|
|
15
|
+
# consumer.committed(timeout_ms: 5_000) # Use 5 seconds instead of default 2 seconds
|
|
16
|
+
#
|
|
17
|
+
# @example Checking the default value
|
|
18
|
+
# Rdkafka::Defaults::CONSUMER_COMMITTED_TIMEOUT_MS # => 2000
|
|
19
|
+
module Defaults
|
|
20
|
+
# Consumer timeouts (in milliseconds)
|
|
21
|
+
|
|
22
|
+
# Default timeout for fetching committed offsets
|
|
23
|
+
# @see Consumer#committed
|
|
24
|
+
CONSUMER_COMMITTED_TIMEOUT_MS = 2_000
|
|
25
|
+
|
|
26
|
+
# Default timeout for querying watermark offsets
|
|
27
|
+
# @see Consumer#query_watermark_offsets
|
|
28
|
+
CONSUMER_QUERY_WATERMARK_TIMEOUT_MS = 1_000
|
|
29
|
+
|
|
30
|
+
# Default timeout for lag calculation watermark queries
|
|
31
|
+
# @see Consumer#lag
|
|
32
|
+
CONSUMER_LAG_TIMEOUT_MS = 1_000
|
|
33
|
+
|
|
34
|
+
# Default timeout for offsets_for_times operation
|
|
35
|
+
# @see Consumer#offsets_for_times
|
|
36
|
+
CONSUMER_OFFSETS_FOR_TIMES_TIMEOUT_MS = 1_000
|
|
37
|
+
|
|
38
|
+
# Default poll timeout for Consumer#each iterator
|
|
39
|
+
# @see Consumer#each
|
|
40
|
+
CONSUMER_POLL_TIMEOUT_MS = 250
|
|
41
|
+
|
|
42
|
+
# Seek operation timeout (0 = non-blocking)
|
|
43
|
+
# @see Consumer#seek_by
|
|
44
|
+
CONSUMER_SEEK_TIMEOUT_MS = 0
|
|
45
|
+
|
|
46
|
+
# Events poll timeout (0 = non-blocking/async)
|
|
47
|
+
# @see Consumer#events_poll
|
|
48
|
+
CONSUMER_EVENTS_POLL_TIMEOUT_MS = 0
|
|
49
|
+
|
|
50
|
+
# Producer timeouts (in milliseconds)
|
|
51
|
+
|
|
52
|
+
# Default timeout for producer flush operation
|
|
53
|
+
# @see Producer#flush
|
|
54
|
+
PRODUCER_FLUSH_TIMEOUT_MS = 5_000
|
|
55
|
+
|
|
56
|
+
# Default flush timeout during purge operation
|
|
57
|
+
# @see Producer#purge
|
|
58
|
+
PRODUCER_PURGE_FLUSH_TIMEOUT_MS = 100
|
|
59
|
+
|
|
60
|
+
# Metadata timeouts (in milliseconds)
|
|
61
|
+
|
|
62
|
+
# Default timeout for metadata requests
|
|
63
|
+
# @see Admin#metadata
|
|
64
|
+
# @see Metadata#initialize
|
|
65
|
+
METADATA_TIMEOUT_MS = 2_000
|
|
66
|
+
|
|
67
|
+
# Handle wait timeouts (in milliseconds)
|
|
68
|
+
|
|
69
|
+
# Default maximum wait timeout for async handles (delivery, admin operations)
|
|
70
|
+
# @see AbstractHandle#wait
|
|
71
|
+
HANDLE_WAIT_TIMEOUT_MS = 60_000
|
|
72
|
+
|
|
73
|
+
# Native Kafka polling (in milliseconds)
|
|
74
|
+
|
|
75
|
+
# Default poll timeout for producer/admin native polling thread
|
|
76
|
+
# @see Config#producer
|
|
77
|
+
# @see Config#admin
|
|
78
|
+
NATIVE_KAFKA_POLL_TIMEOUT_MS = 100
|
|
79
|
+
|
|
80
|
+
# Internal timing (in milliseconds)
|
|
81
|
+
|
|
82
|
+
# Sleep interval during purge wait loop
|
|
83
|
+
# @see Producer#purge
|
|
84
|
+
PRODUCER_PURGE_SLEEP_INTERVAL_MS = 1
|
|
85
|
+
|
|
86
|
+
# Sleep interval while waiting for operations to complete in NativeKafka#synchronize
|
|
87
|
+
# @see NativeKafka#synchronize
|
|
88
|
+
NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS = 10
|
|
89
|
+
|
|
90
|
+
# Base backoff factor for metadata retry in milliseconds (multiplied by 2^attempt)
|
|
91
|
+
# @see Metadata#initialize
|
|
92
|
+
METADATA_RETRY_BACKOFF_BASE_MS = 100
|
|
93
|
+
|
|
94
|
+
# Cache settings (in milliseconds)
|
|
95
|
+
|
|
96
|
+
# Default time-to-live for cached partition counts
|
|
97
|
+
# @see Producer::PartitionsCountCache
|
|
98
|
+
PARTITIONS_COUNT_CACHE_TTL_MS = 30_000
|
|
99
|
+
|
|
100
|
+
# Configuration values (not time-based)
|
|
101
|
+
|
|
102
|
+
# Maximum number of metadata fetch retry attempts
|
|
103
|
+
# @see Metadata#initialize
|
|
104
|
+
METADATA_MAX_RETRIES = 10
|
|
105
|
+
end
|
|
106
|
+
end
|
data/lib/rdkafka/error.rb
CHANGED
|
@@ -19,6 +19,9 @@ module Rdkafka
|
|
|
19
19
|
attr_reader :broker_message
|
|
20
20
|
|
|
21
21
|
# @private
|
|
22
|
+
# @param response [Integer] the raw error response code from librdkafka
|
|
23
|
+
# @param message_prefix [String, nil] optional prefix for error messages
|
|
24
|
+
# @param broker_message [String, nil] optional error message from the broker
|
|
22
25
|
def initialize(response, message_prefix=nil, broker_message: nil)
|
|
23
26
|
raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
|
|
24
27
|
@rdkafka_response = response
|
|
@@ -55,6 +58,8 @@ module Rdkafka
|
|
|
55
58
|
end
|
|
56
59
|
|
|
57
60
|
# Error comparison
|
|
61
|
+
# @param another_error [Object] object to compare with
|
|
62
|
+
# @return [Boolean]
|
|
58
63
|
def ==(another_error)
|
|
59
64
|
another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
|
|
60
65
|
end
|
|
@@ -66,6 +71,9 @@ module Rdkafka
|
|
|
66
71
|
attr_reader :topic_partition_list
|
|
67
72
|
|
|
68
73
|
# @private
|
|
74
|
+
# @param response [Integer] the raw error response code from librdkafka
|
|
75
|
+
# @param topic_partition_list [TopicPartitionList] the topic partition list with error info
|
|
76
|
+
# @param message_prefix [String, nil] optional prefix for error messages
|
|
69
77
|
def initialize(response, topic_partition_list, message_prefix=nil)
|
|
70
78
|
super(response, message_prefix)
|
|
71
79
|
@topic_partition_list = topic_partition_list
|
|
@@ -74,6 +82,7 @@ module Rdkafka
|
|
|
74
82
|
|
|
75
83
|
# Error class for public consumer method calls on a closed consumer.
|
|
76
84
|
class ClosedConsumerError < BaseError
|
|
85
|
+
# @param method [Symbol] the method that was called
|
|
77
86
|
def initialize(method)
|
|
78
87
|
super("Illegal call to #{method.to_s} on a closed consumer")
|
|
79
88
|
end
|
|
@@ -81,21 +90,27 @@ module Rdkafka
|
|
|
81
90
|
|
|
82
91
|
# Error class for public producer method calls on a closed producer.
|
|
83
92
|
class ClosedProducerError < BaseError
|
|
93
|
+
# @param method [Symbol] the method that was called
|
|
84
94
|
def initialize(method)
|
|
85
95
|
super("Illegal call to #{method.to_s} on a closed producer")
|
|
86
96
|
end
|
|
87
97
|
end
|
|
88
98
|
|
|
89
|
-
# Error class for public
|
|
99
|
+
# Error class for public admin method calls on a closed admin.
|
|
90
100
|
class ClosedAdminError < BaseError
|
|
101
|
+
# @param method [Symbol] the method that was called
|
|
91
102
|
def initialize(method)
|
|
92
103
|
super("Illegal call to #{method.to_s} on a closed admin")
|
|
93
104
|
end
|
|
94
105
|
end
|
|
95
106
|
|
|
107
|
+
# Error class for calls on a closed inner librdkafka instance.
|
|
96
108
|
class ClosedInnerError < BaseError
|
|
97
109
|
def initialize
|
|
98
110
|
super("Illegal call to a closed inner librdkafka instance")
|
|
99
111
|
end
|
|
100
112
|
end
|
|
113
|
+
|
|
114
|
+
# Error class for librdkafka library loading failures (e.g., glibc compatibility issues).
|
|
115
|
+
class LibraryLoadError < BaseError; end
|
|
101
116
|
end
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
module Rdkafka
|
|
2
2
|
module Helpers
|
|
3
|
-
|
|
3
|
+
# OAuth helper methods for setting and refreshing SASL/OAUTHBEARER tokens
|
|
4
4
|
module OAuth
|
|
5
5
|
|
|
6
6
|
# Set the OAuthBearer token
|
|
@@ -47,8 +47,11 @@ module Rdkafka
|
|
|
47
47
|
|
|
48
48
|
private
|
|
49
49
|
|
|
50
|
-
# Convert extensions hash to FFI::MemoryPointer (const char
|
|
51
|
-
#
|
|
50
|
+
# Convert extensions hash to FFI::MemoryPointer (`const char **`).
|
|
51
|
+
#
|
|
52
|
+
# @param extensions [Hash, nil] extension key-value pairs
|
|
53
|
+
# @return [Array<FFI::MemoryPointer, Array<FFI::MemoryPointer>>] array pointer and string pointers
|
|
54
|
+
# @note The returned pointers must be freed manually (autorelease = false).
|
|
52
55
|
def map_extensions(extensions)
|
|
53
56
|
return [nil, nil] if extensions.nil? || extensions.empty?
|
|
54
57
|
|
|
@@ -74,8 +77,11 @@ module Rdkafka
|
|
|
74
77
|
[array_ptr, str_ptrs]
|
|
75
78
|
end
|
|
76
79
|
|
|
77
|
-
#
|
|
78
|
-
#
|
|
80
|
+
# Returns the extension size (number of keys + values).
|
|
81
|
+
#
|
|
82
|
+
# @param extensions [Hash, nil] extension key-value pairs
|
|
83
|
+
# @return [Integer] non-negative even number representing keys + values count
|
|
84
|
+
# @see https://github.com/confluentinc/librdkafka/blob/master/src/rdkafka_sasl_oauthbearer.c#L327-L347
|
|
79
85
|
def extension_size(extensions)
|
|
80
86
|
return 0 unless extensions
|
|
81
87
|
extensions.size * 2
|
data/lib/rdkafka/metadata.rb
CHANGED
|
@@ -1,8 +1,12 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
3
|
module Rdkafka
|
|
4
|
+
# Provides cluster metadata information
|
|
4
5
|
class Metadata
|
|
5
|
-
|
|
6
|
+
# @return [Array<Hash>] list of broker metadata
|
|
7
|
+
attr_reader :brokers
|
|
8
|
+
# @return [Array<Hash>] list of topic metadata
|
|
9
|
+
attr_reader :topics
|
|
6
10
|
|
|
7
11
|
# Errors upon which we retry the metadata fetch
|
|
8
12
|
RETRIED_ERRORS = %i[
|
|
@@ -12,7 +16,13 @@ module Rdkafka
|
|
|
12
16
|
|
|
13
17
|
private_constant :RETRIED_ERRORS
|
|
14
18
|
|
|
15
|
-
|
|
19
|
+
# Fetches metadata from the Kafka cluster
|
|
20
|
+
#
|
|
21
|
+
# @param native_client [FFI::Pointer] pointer to the native Kafka client
|
|
22
|
+
# @param topic_name [String, nil] specific topic to fetch metadata for, or nil for all topics
|
|
23
|
+
# @param timeout_ms [Integer] timeout in milliseconds
|
|
24
|
+
# @raise [RdkafkaError] when metadata fetch fails
|
|
25
|
+
def initialize(native_client, topic_name = nil, timeout_ms = Defaults::METADATA_TIMEOUT_MS)
|
|
16
26
|
attempt ||= 0
|
|
17
27
|
attempt += 1
|
|
18
28
|
|
|
@@ -35,12 +45,12 @@ module Rdkafka
|
|
|
35
45
|
metadata_from_native(ptr.read_pointer)
|
|
36
46
|
rescue ::Rdkafka::RdkafkaError => e
|
|
37
47
|
raise unless RETRIED_ERRORS.include?(e.code)
|
|
38
|
-
raise if attempt >
|
|
48
|
+
raise if attempt > Defaults::METADATA_MAX_RETRIES
|
|
39
49
|
|
|
40
50
|
backoff_factor = 2**attempt
|
|
41
|
-
|
|
51
|
+
timeout_ms = backoff_factor * Defaults::METADATA_RETRY_BACKOFF_BASE_MS
|
|
42
52
|
|
|
43
|
-
sleep(
|
|
53
|
+
sleep(timeout_ms / 1000.0)
|
|
44
54
|
|
|
45
55
|
retry
|
|
46
56
|
ensure
|
|
@@ -50,6 +60,8 @@ module Rdkafka
|
|
|
50
60
|
|
|
51
61
|
private
|
|
52
62
|
|
|
63
|
+
# Extracts metadata from native pointer
|
|
64
|
+
# @param ptr [FFI::Pointer] pointer to native metadata
|
|
53
65
|
def metadata_from_native(ptr)
|
|
54
66
|
metadata = Metadata.new(ptr)
|
|
55
67
|
@brokers = Array.new(metadata[:brokers_count]) do |i|
|
|
@@ -69,7 +81,11 @@ module Rdkafka
|
|
|
69
81
|
end
|
|
70
82
|
end
|
|
71
83
|
|
|
84
|
+
# Base class for metadata FFI structs with hash conversion
|
|
85
|
+
# @private
|
|
72
86
|
class CustomFFIStruct < FFI::Struct
|
|
87
|
+
# Converts struct to a hash
|
|
88
|
+
# @return [Hash]
|
|
73
89
|
def to_h
|
|
74
90
|
members.each_with_object({}) do |mem, hsh|
|
|
75
91
|
val = self.[](mem)
|
|
@@ -80,6 +96,8 @@ module Rdkafka
|
|
|
80
96
|
end
|
|
81
97
|
end
|
|
82
98
|
|
|
99
|
+
# @private
|
|
100
|
+
# FFI struct for rd_kafka_metadata_t
|
|
83
101
|
class Metadata < CustomFFIStruct
|
|
84
102
|
layout :brokers_count, :int,
|
|
85
103
|
:brokers_metadata, :pointer,
|
|
@@ -89,12 +107,16 @@ module Rdkafka
|
|
|
89
107
|
:broker_name, :string
|
|
90
108
|
end
|
|
91
109
|
|
|
110
|
+
# @private
|
|
111
|
+
# FFI struct for rd_kafka_metadata_broker_t
|
|
92
112
|
class BrokerMetadata < CustomFFIStruct
|
|
93
113
|
layout :broker_id, :int32,
|
|
94
114
|
:broker_name, :string,
|
|
95
115
|
:broker_port, :int
|
|
96
116
|
end
|
|
97
117
|
|
|
118
|
+
# @private
|
|
119
|
+
# FFI struct for rd_kafka_metadata_topic_t
|
|
98
120
|
class TopicMetadata < CustomFFIStruct
|
|
99
121
|
layout :topic_name, :string,
|
|
100
122
|
:partition_count, :int,
|
|
@@ -102,6 +124,8 @@ module Rdkafka
|
|
|
102
124
|
:rd_kafka_resp_err, :int
|
|
103
125
|
end
|
|
104
126
|
|
|
127
|
+
# @private
|
|
128
|
+
# FFI struct for rd_kafka_metadata_partition_t
|
|
105
129
|
class PartitionMetadata < CustomFFIStruct
|
|
106
130
|
layout :partition_id, :int32,
|
|
107
131
|
:rd_kafka_resp_err, :int,
|
data/lib/rdkafka/native_kafka.rb
CHANGED
|
@@ -4,7 +4,13 @@ module Rdkafka
|
|
|
4
4
|
# @private
|
|
5
5
|
# A wrapper around a native kafka that polls and cleanly exits
|
|
6
6
|
class NativeKafka
|
|
7
|
-
|
|
7
|
+
# Creates a new NativeKafka wrapper
|
|
8
|
+
# @param inner [FFI::Pointer] pointer to the native Kafka handle
|
|
9
|
+
# @param run_polling_thread [Boolean] whether to run a background polling thread
|
|
10
|
+
# @param opaque [Rdkafka::Opaque] opaque object for callback context
|
|
11
|
+
# @param auto_start [Boolean] whether to start the polling thread automatically
|
|
12
|
+
# @param timeout_ms [Integer] poll timeout in milliseconds
|
|
13
|
+
def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: Defaults::NATIVE_KAFKA_POLL_TIMEOUT_MS)
|
|
8
14
|
@inner = inner
|
|
9
15
|
@opaque = opaque
|
|
10
16
|
# Lock around external access
|
|
@@ -37,6 +43,8 @@ module Rdkafka
|
|
|
37
43
|
@closing = false
|
|
38
44
|
end
|
|
39
45
|
|
|
46
|
+
# Starts the polling thread if configured
|
|
47
|
+
# @return [nil]
|
|
40
48
|
def start
|
|
41
49
|
synchronize do
|
|
42
50
|
return if @started
|
|
@@ -69,6 +77,10 @@ module Rdkafka
|
|
|
69
77
|
end
|
|
70
78
|
end
|
|
71
79
|
|
|
80
|
+
# Executes a block with the inner native Kafka handle
|
|
81
|
+
# @yield [FFI::Pointer] the inner native Kafka handle
|
|
82
|
+
# @return [Object] the result of the block
|
|
83
|
+
# @raise [ClosedInnerError] when the inner handle is nil
|
|
72
84
|
def with_inner
|
|
73
85
|
if @access_mutex.owned?
|
|
74
86
|
@operations_in_progress += 1
|
|
@@ -81,26 +93,38 @@ module Rdkafka
|
|
|
81
93
|
@decrement_mutex.synchronize { @operations_in_progress -= 1 }
|
|
82
94
|
end
|
|
83
95
|
|
|
96
|
+
# Executes a block while holding exclusive access to the native Kafka handle
|
|
97
|
+
# @param block [Proc] block to execute with the native handle
|
|
98
|
+
# @yield [FFI::Pointer] the inner native Kafka handle
|
|
99
|
+
# @return [Object] the result of the block
|
|
84
100
|
def synchronize(&block)
|
|
85
101
|
@access_mutex.synchronize do
|
|
86
102
|
# Wait for any commands using the inner to finish
|
|
87
103
|
# This can take a while on blocking operations like polling but is essential not to proceed
|
|
88
104
|
# with certain types of operations like resources destruction as it can cause the process
|
|
89
105
|
# to hang or crash
|
|
90
|
-
sleep(0
|
|
106
|
+
sleep(Defaults::NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS / 1000.0) until @operations_in_progress.zero?
|
|
91
107
|
|
|
92
108
|
with_inner(&block)
|
|
93
109
|
end
|
|
94
110
|
end
|
|
95
111
|
|
|
112
|
+
# Returns a finalizer proc for closing this native Kafka handle
|
|
113
|
+
# @return [Proc] finalizer proc
|
|
96
114
|
def finalizer
|
|
97
115
|
->(_) { close }
|
|
98
116
|
end
|
|
99
117
|
|
|
118
|
+
# Returns whether this native Kafka handle is closed or closing
|
|
119
|
+
# @return [Boolean] true if closed or closing
|
|
100
120
|
def closed?
|
|
101
121
|
@closing || @inner.nil?
|
|
102
122
|
end
|
|
103
123
|
|
|
124
|
+
# Closes the native Kafka handle and cleans up resources
|
|
125
|
+
# @param object_id [Integer, nil] optional object ID (unused, for finalizer compatibility)
|
|
126
|
+
# @yield optional block to execute before destroying the handle
|
|
127
|
+
# @return [nil]
|
|
104
128
|
def close(object_id=nil)
|
|
105
129
|
return if closed?
|
|
106
130
|
|
|
@@ -32,8 +32,12 @@ module Rdkafka
|
|
|
32
32
|
# We do not remove the original `#topic_name` because of backwards compatibility
|
|
33
33
|
alias topic topic_name
|
|
34
34
|
|
|
35
|
-
private
|
|
36
|
-
|
|
35
|
+
# @private
|
|
36
|
+
# @param partition [Integer] partition number
|
|
37
|
+
# @param offset [Integer] message offset
|
|
38
|
+
# @param topic_name [String, nil] topic name
|
|
39
|
+
# @param error [Integer, nil] error code if any
|
|
40
|
+
# @param label [Object, nil] user-defined label
|
|
37
41
|
def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
|
|
38
42
|
@partition = partition
|
|
39
43
|
@offset = offset
|