rdkafka 0.25.1 → 0.26.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 36d2a4ebad99bd98289d2af65b9dea7f244a70f5025d3d8dc96cff7020f762f3
4
- data.tar.gz: fa243ec34891717494123b0b1ed012d3ac37d8c6541f1d9ad8c38982d57c90fe
3
+ metadata.gz: cfa1b93687c48e464d6ed74591aaa63a719f3f8278d24729d47ed4517942a4a2
4
+ data.tar.gz: c9ae55f386c986a76bce3f1bb87b53647d8c657bb6a49a90eac556da32a272e8
5
5
  SHA512:
6
- metadata.gz: d15396b3341b8685d50167d9531e52be6c3fd96d3e9d4a9d34453fd982472c19bfb84c5754f71e91aa5bd1bfd6c9fc2c3bf6941ae84431c85165845f8ba86e57
7
- data.tar.gz: a6be61394c84fd7c939b72c2d1283f415c66692890c5f55888573b56a2cee5e3038ba8de39087f1fb70e9480781b2b3052d980597d1e2d1350c070740bfd8f6c
6
+ metadata.gz: 0d32f50f1313ff623dfde28d1453f7dadb0e8fd23049bfbfd8fdcd89ce4ce87611a496ad3a82ac57763827ba80cebf1b3801a7eb690a2b21abcde706e950c9a4
7
+ data.tar.gz: 61f908671be7b0e72cd4670cb8c21ea7a644dbb69b36821f5896255da5c26f2dd6f18a75cfc30acfa05255c143e7826ea56f4d4ed27dcc8779cb99981917ff42
data/CHANGELOG.md CHANGED
@@ -1,6 +1,15 @@
1
1
  # Rdkafka Changelog
2
2
 
3
+ ## 0.26.0 (2026-04-02)
4
+ - [Enhancement] Bump librdkafka to `2.13.2`
5
+ - [Enhancement] Embed a per-file SPEC_HASH in test topic and consumer group names for tracing Kafka warnings back to specific spec files.
6
+ - [Fix] Register `ObjectSpace.define_finalizer` in `Rdkafka::Consumer` to prevent segfaults when a consumer is GC'd without being explicitly closed (jturkel).
7
+ - [Fix] Remove dead `#finalizer` instance methods from `Consumer` and `Admin` that could never work as GC finalizers.
8
+ - [Fix] Prevent cascading test failures in admin specs when a single handle leaks into the registry.
9
+ - [Feature] Extend `Rdkafka::RdkafkaError` with `instance_name` attribute containing the `rd_kafka_name` for tying errors back to specific native Kafka instances (#181).
10
+
3
11
  ## 0.25.1 (2026-02-26)
12
+ - [Feature] Support `rd_kafka_ListOffsets` admin API for querying partition offsets by specification (earliest, latest, max_timestamp, or by timestamp) without requiring a consumer group (#613).
4
13
  - [Enhancement] Use native ARM64 runners instead of QEMU emulation for Alpine musl aarch64 builds, improving build performance and reliability.
5
14
  - [Enhancement] Enable parallel compilation (`make -j$(nproc)`) for ARM64 Alpine musl builds.
6
15
  - [Enhancement] Add file descriptor API for fiber scheduler integration. Expose `enable_queue_io_events` and `enable_background_queue_io_events` on `Consumer`, `Producer`, and `Admin` to enable non-blocking monitoring with select/poll/epoll for integration with Ruby fiber schedulers (Falcon, Async) and custom event loops.
data/README.md CHANGED
@@ -163,6 +163,7 @@ bundle exec rake produce_messages
163
163
 
164
164
  | rdkafka-ruby | librdkafka | patches |
165
165
  |-|-|-|
166
+ | 0.26.x (2026-03-30) | 2.13.2 (2026-03-30) | yes |
166
167
  | 0.25.x (2026-01-21) | 2.12.1 (2025-10-21) | yes |
167
168
  | 0.24.x (2025-10-10) | 2.11.1 (2025-08-18) | yes |
168
169
  | 0.23.x (2025-09-04) | 2.11.0 (2025-07-03) | yes |
@@ -12,6 +12,8 @@ allowed_patterns=(
12
12
  "Creating new"
13
13
  "Unloaded transaction metadata"
14
14
  "closing connection"
15
+ "sent a heartbeat request but received error REQUEST_TIMED_OUT"
16
+ "Topic '__consumer_offsets' already exists"
15
17
  )
16
18
 
17
19
  # Get all warnings
@@ -1,7 +1,7 @@
1
1
  services:
2
2
  kafka:
3
3
  container_name: kafka
4
- image: confluentinc/cp-kafka:8.1.1
4
+ image: confluentinc/cp-kafka:8.2.0
5
5
  ports:
6
6
  - 9092:9092 # Support PLAINTEXT so we can run one docker setup for SSL and PLAINTEXT
7
7
  - 9093:9093
@@ -19,7 +19,7 @@ services:
19
19
  KAFKA_BROKER_ID: 1
20
20
  KAFKA_CONTROLLER_QUORUM_VOTERS: 1@127.0.0.1:9094
21
21
  ALLOW_PLAINTEXT_LISTENER: 'yes'
22
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
22
+ KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
23
23
  KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
24
24
  KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
25
25
  KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
data/docker-compose.yml CHANGED
@@ -1,7 +1,7 @@
1
1
  services:
2
2
  kafka:
3
3
  container_name: kafka
4
- image: confluentinc/cp-kafka:8.1.1
4
+ image: confluentinc/cp-kafka:8.2.0
5
5
 
6
6
  ports:
7
7
  - 9092:9092
@@ -18,7 +18,7 @@ services:
18
18
  KAFKA_BROKER_ID: 1
19
19
  KAFKA_CONTROLLER_QUORUM_VOTERS: 1@127.0.0.1:9093
20
20
  ALLOW_PLAINTEXT_LISTENER: 'yes'
21
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
21
+ KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
22
22
  KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
23
23
  KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
24
24
  KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
@@ -0,0 +1,36 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ # Handle for list offsets operation
6
+ class ListOffsetsHandle < AbstractHandle
7
+ layout :pending, :bool,
8
+ :response, :int,
9
+ :response_string, :pointer,
10
+ :result_infos, :pointer,
11
+ :result_count, :int
12
+
13
+ # @return [String] the name of the operation.
14
+ def operation_name
15
+ "list offsets"
16
+ end
17
+
18
+ # @return [ListOffsetsReport] instance with partition offset information.
19
+ def create_result
20
+ ListOffsetsReport.new(
21
+ result_infos: self[:result_infos],
22
+ result_count: self[:result_count]
23
+ )
24
+ end
25
+
26
+ # Raises an error if the operation failed
27
+ # @raise [RdkafkaError]
28
+ def raise_error
29
+ raise RdkafkaError.new(
30
+ self[:response],
31
+ broker_message: self[:response_string].read_string
32
+ )
33
+ end
34
+ end
35
+ end
36
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ # Report for list offsets operation result
6
+ class ListOffsetsReport
7
+ attr_reader :offsets
8
+
9
+ # @param result_infos [FFI::Pointer] pointer to result info array
10
+ # @param result_count [Integer] number of result info entries
11
+ def initialize(result_infos:, result_count:)
12
+ @offsets = []
13
+
14
+ return if result_infos.null?
15
+
16
+ result_infos
17
+ .read_array_of_pointer(result_count)
18
+ .each { |result_info_ptr| validate!(result_info_ptr) }
19
+ .each do |result_info_ptr|
20
+ tp_ptr = Bindings.rd_kafka_ListOffsetsResultInfo_topic_partition(result_info_ptr)
21
+ tp = Bindings::TopicPartition.new(tp_ptr)
22
+ timestamp = Bindings.rd_kafka_ListOffsetsResultInfo_timestamp(result_info_ptr)
23
+ leader_epoch = Bindings.rd_kafka_topic_partition_get_leader_epoch(tp_ptr)
24
+
25
+ @offsets << {
26
+ topic: tp[:topic],
27
+ partition: tp[:partition],
28
+ offset: tp[:offset],
29
+ timestamp: timestamp,
30
+ leader_epoch: (leader_epoch == -1) ? nil : leader_epoch
31
+ }
32
+ end
33
+ end
34
+
35
+ private
36
+
37
+ # Validates the partition result and raises an error if invalid
38
+ # @param result_info_ptr [FFI::Pointer] pointer to the result info
39
+ # @raise [RdkafkaError] when the partition has an error
40
+ def validate!(result_info_ptr)
41
+ tp_ptr = Bindings.rd_kafka_ListOffsetsResultInfo_topic_partition(result_info_ptr)
42
+ tp = Bindings::TopicPartition.new(tp_ptr)
43
+ code = tp[:err]
44
+
45
+ return if code.zero?
46
+
47
+ raise RdkafkaError.new(code)
48
+ end
49
+ end
50
+ end
51
+ end
data/lib/rdkafka/admin.rb CHANGED
@@ -130,12 +130,6 @@ module Rdkafka
130
130
  end
131
131
  end
132
132
 
133
- # @return [Proc] finalizer proc for closing the admin
134
- # @private
135
- def finalizer
136
- ->(_) { close }
137
- end
138
-
139
133
  # Performs the metadata request using admin
140
134
  #
141
135
  # @param topic_name [String, nil] metadat about particular topic or all if nil
@@ -882,6 +876,111 @@ module Rdkafka
882
876
  handle
883
877
  end
884
878
 
879
+ # Queries partition offsets by specification (earliest, latest, max_timestamp, or by
880
+ # timestamp) without requiring a consumer group.
881
+ #
882
+ # @param topic_partition_offsets [Hash{String => Array<Hash>}] hash mapping topic names to
883
+ # arrays of partition offset specifications. Each specification is a hash with:
884
+ # - `:partition` [Integer] partition number
885
+ # - `:offset` [Symbol, Integer] offset specification - `:earliest`, `:latest`,
886
+ # `:max_timestamp`, or an integer timestamp in milliseconds
887
+ # @param isolation_level [Integer, nil] optional isolation level:
888
+ # - `RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED` (0) - default
889
+ # - `RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED` (1)
890
+ #
891
+ # @return [ListOffsetsHandle] handle that can be used to wait for the result
892
+ #
893
+ # @raise [ClosedAdminError] when the admin is closed
894
+ # @raise [ConfigError] when the background queue is unavailable
895
+ #
896
+ # @example Query earliest and latest offsets
897
+ # handle = admin.list_offsets(
898
+ # { "my_topic" => [
899
+ # { partition: 0, offset: :earliest },
900
+ # { partition: 1, offset: :latest }
901
+ # ] }
902
+ # )
903
+ # report = handle.wait(max_wait_timeout_ms: 15_000)
904
+ # report.offsets
905
+ # # => [{ topic: "my_topic", partition: 0, offset: 0, ... }, ...]
906
+ def list_offsets(topic_partition_offsets, isolation_level: nil)
907
+ closed_admin_check(__method__)
908
+
909
+ # Count total partitions for pre-allocation
910
+ total = topic_partition_offsets.sum { |_, partitions| partitions.size }
911
+
912
+ # Build native topic partition list
913
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(total)
914
+
915
+ topic_partition_offsets.each do |topic, partitions|
916
+ partitions.each do |spec|
917
+ partition = spec.fetch(:partition)
918
+ offset = spec.fetch(:offset)
919
+
920
+ native_offset = case offset
921
+ when :earliest then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_EARLIEST
922
+ when :latest then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_LATEST
923
+ when :max_timestamp then Rdkafka::Bindings::RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP
924
+ when Integer then offset
925
+ else
926
+ raise ArgumentError, "Unknown offset specification: #{offset.inspect}"
927
+ end
928
+
929
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, partition)
930
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(tpl, topic, partition, native_offset)
931
+ end
932
+ end
933
+
934
+ # Get a pointer to the queue that our request will be enqueued on
935
+ queue_ptr = @native_kafka.with_inner do |inner|
936
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
937
+ end
938
+
939
+ if queue_ptr.null?
940
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
941
+ raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
942
+ end
943
+
944
+ # Create and register the handle we will return to the caller
945
+ handle = ListOffsetsHandle.new
946
+ handle[:pending] = true
947
+ handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
948
+
949
+ admin_options_ptr = @native_kafka.with_inner do |inner|
950
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(
951
+ inner,
952
+ Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_LISTOFFSETS
953
+ )
954
+ end
955
+
956
+ if isolation_level
957
+ Rdkafka::Bindings.rd_kafka_AdminOptions_set_isolation_level(admin_options_ptr, isolation_level)
958
+ end
959
+
960
+ ListOffsetsHandle.register(handle)
961
+ Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, handle.to_ptr)
962
+
963
+ begin
964
+ @native_kafka.with_inner do |inner|
965
+ Rdkafka::Bindings.rd_kafka_ListOffsets(
966
+ inner,
967
+ tpl,
968
+ admin_options_ptr,
969
+ queue_ptr
970
+ )
971
+ end
972
+ rescue Exception
973
+ ListOffsetsHandle.remove(handle.to_ptr.address)
974
+ raise
975
+ ensure
976
+ Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
977
+ Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
978
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
979
+ end
980
+
981
+ handle
982
+ end
983
+
885
984
  private
886
985
 
887
986
  # Checks if the admin is closed and raises an error if so
@@ -188,6 +188,27 @@ module Rdkafka
188
188
  RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2
189
189
  RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3
190
190
 
191
+ # List Offsets
192
+ RD_KAFKA_ADMIN_OP_LISTOFFSETS = 20
193
+ RD_KAFKA_EVENT_LISTOFFSETS_RESULT = 0x400000
194
+
195
+ # rd_kafka_IsolationLevel_t
196
+ RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED = 0
197
+ RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED = 1
198
+
199
+ # rd_kafka_OffsetSpec_t
200
+ RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP = -3
201
+ RD_KAFKA_OFFSET_SPEC_EARLIEST = -2
202
+ RD_KAFKA_OFFSET_SPEC_LATEST = -1
203
+
204
+ attach_function :rd_kafka_ListOffsets, [:pointer, :pointer, :pointer, :pointer], :void, blocking: true
205
+ attach_function :rd_kafka_event_ListOffsets_result, [:pointer], :pointer
206
+ attach_function :rd_kafka_ListOffsets_result_infos, [:pointer, :pointer], :pointer
207
+ attach_function :rd_kafka_ListOffsetsResultInfo_topic_partition, [:pointer], :pointer
208
+ attach_function :rd_kafka_ListOffsetsResultInfo_timestamp, [:pointer], :int64
209
+ attach_function :rd_kafka_AdminOptions_set_isolation_level, [:pointer, :int], :pointer
210
+ attach_function :rd_kafka_topic_partition_get_leader_epoch, [:pointer], :int32
211
+
191
212
  # FFI struct for error description (rd_kafka_err_desc)
192
213
  class NativeErrorDesc < FFI::Struct
193
214
  layout :code, :int,
@@ -300,9 +321,10 @@ module Rdkafka
300
321
 
301
322
  ErrorCallback = FFI::Function.new(
302
323
  :void, [:pointer, :int, :string, :pointer]
303
- ) do |_client_prr, err_code, reason, _opaque|
324
+ ) do |client_ptr, err_code, reason, _opaque|
304
325
  if Rdkafka::Config.error_callback
305
- error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason)
326
+ instance_name = client_ptr.null? ? nil : Rdkafka::Bindings.rd_kafka_name(client_ptr)
327
+ error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason, instance_name: instance_name)
306
328
  error.set_backtrace(caller)
307
329
  Rdkafka::Config.error_callback.call(error)
308
330
  end
@@ -180,6 +180,27 @@ module Rdkafka
180
180
  end
181
181
  end
182
182
 
183
+ # Extracts attributes of rd_kafka_ListOffsets_result_t
184
+ #
185
+ # @private
186
+ class ListOffsetsResult
187
+ attr_reader :result_error, :error_string, :result_infos, :result_count
188
+
189
+ # @param event_ptr [FFI::Pointer] pointer to the event
190
+ def initialize(event_ptr)
191
+ @result_infos = FFI::Pointer::NULL
192
+ @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
193
+ @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
194
+
195
+ if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
196
+ list_offsets_result = Rdkafka::Bindings.rd_kafka_event_ListOffsets_result(event_ptr)
197
+ pointer_to_size_t = FFI::MemoryPointer.new(:size_t)
198
+ @result_infos = Rdkafka::Bindings.rd_kafka_ListOffsets_result_infos(list_offsets_result, pointer_to_size_t)
199
+ @result_count = pointer_to_size_t.read(:size_t)
200
+ end
201
+ end
202
+ end
203
+
183
204
  # @private
184
205
  class BackgroundEventCallback
185
206
  # Handles background events from librdkafka
@@ -206,6 +227,8 @@ module Rdkafka
206
227
  process_describe_acl(event_ptr)
207
228
  when Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEGROUPS_RESULT
208
229
  process_delete_groups(event_ptr)
230
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_LISTOFFSETS_RESULT
231
+ process_list_offsets(event_ptr)
209
232
  end
210
233
  end
211
234
 
@@ -392,6 +415,26 @@ module Rdkafka
392
415
  describe_acl_handle.unlock
393
416
  end
394
417
  end
418
+
419
+ # Processes list offsets result event
420
+ # @param event_ptr [FFI::Pointer] pointer to the event
421
+ def self.process_list_offsets(event_ptr)
422
+ list_offsets = ListOffsetsResult.new(event_ptr)
423
+ list_offsets_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
424
+
425
+ if list_offsets_handle = Rdkafka::Admin::ListOffsetsHandle.remove(list_offsets_handle_ptr.address)
426
+ list_offsets_handle[:response] = list_offsets.result_error
427
+ list_offsets_handle[:response_string] = list_offsets.error_string
428
+ list_offsets_handle[:pending] = false
429
+
430
+ if list_offsets.result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
431
+ list_offsets_handle[:result_infos] = list_offsets.result_infos
432
+ list_offsets_handle[:result_count] = list_offsets.result_count
433
+ end
434
+
435
+ list_offsets_handle.unlock
436
+ end
437
+ end
395
438
  end
396
439
 
397
440
  # @private
@@ -89,10 +89,10 @@ module Rdkafka
89
89
  # If this callback is not set, global errors such as brokers becoming unavailable will only be sent to the logger, as defined by librdkafka.
90
90
  # The callback is called with an instance of RdKafka::Error.
91
91
  #
92
- # @param callback [Proc, #call] callable object to handle errors
92
+ # @param callback [Proc, #call, nil] callable object to handle errors or nil to clear
93
93
  # @return [nil]
94
94
  def self.error_callback=(callback)
95
- raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
95
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call) || callback.nil?
96
96
  @@error_callback = callback
97
97
  end
98
98
 
@@ -19,6 +19,9 @@ module Rdkafka
19
19
  # @param native_kafka [NativeKafka] wrapper around the native Kafka consumer handle
20
20
  def initialize(native_kafka)
21
21
  @native_kafka = native_kafka
22
+
23
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
24
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
22
25
  end
23
26
 
24
27
  # Starts the native Kafka polling thread and kicks off the init polling
@@ -172,12 +175,6 @@ module Rdkafka
172
175
  end
173
176
  end
174
177
 
175
- # @return [Proc] finalizer proc for closing the consumer
176
- # @private
177
- def finalizer
178
- ->(_) { close }
179
- end
180
-
181
178
  # Close this consumer
182
179
  # @return [nil]
183
180
  def close
data/lib/rdkafka/error.rb CHANGED
@@ -18,15 +18,21 @@ module Rdkafka
18
18
  # @return [String]
19
19
  attr_reader :broker_message
20
20
 
21
+ # The name of the rdkafka instance that generated this error
22
+ # @return [String, nil]
23
+ attr_reader :instance_name
24
+
21
25
  # @private
22
26
  # @param response [Integer] the raw error response code from librdkafka
23
27
  # @param message_prefix [String, nil] optional prefix for error messages
24
28
  # @param broker_message [String, nil] optional error message from the broker
25
- def initialize(response, message_prefix = nil, broker_message: nil)
29
+ # @param instance_name [String, nil] optional name of the rdkafka instance
30
+ def initialize(response, message_prefix = nil, broker_message: nil, instance_name: nil)
26
31
  raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
27
32
  @rdkafka_response = response
28
33
  @message_prefix = message_prefix
29
34
  @broker_message = broker_message
35
+ @instance_name = instance_name
30
36
  end
31
37
 
32
38
  # This error's code, for example `:partition_eof`, `:msg_size_too_large`.
@@ -48,7 +54,12 @@ module Rdkafka
48
54
  else
49
55
  ""
50
56
  end
51
- "#{message_prefix_part}#{Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)} (#{code})"
57
+ instance_name_part = if instance_name
58
+ " [#{instance_name}]"
59
+ else
60
+ ""
61
+ end
62
+ "#{message_prefix_part}#{Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)} (#{code})#{instance_name_part}"
52
63
  end
53
64
 
54
65
  # Whether this error indicates the partition is EOF.
@@ -2,9 +2,9 @@
2
2
 
3
3
  module Rdkafka
4
4
  # Current rdkafka-ruby gem version
5
- VERSION = "0.25.1"
5
+ VERSION = "0.26.0"
6
6
  # Target librdkafka version to be used
7
- LIBRDKAFKA_VERSION = "2.12.1"
7
+ LIBRDKAFKA_VERSION = "2.13.2"
8
8
  # SHA256 hash of the librdkafka source tarball for verification
9
- LIBRDKAFKA_SOURCE_SHA256 = "ec103fa05cb0f251e375f6ea0b6112cfc9d0acd977dc5b69fdc54242ba38a16f"
9
+ LIBRDKAFKA_SOURCE_SHA256 = "14972092e4115f6e99f798a7cb420cbf6daa0c73502b3c52ae42fb5b418eea8f"
10
10
  end
data/lib/rdkafka.rb CHANGED
@@ -29,6 +29,8 @@ require "rdkafka/admin/describe_configs_handle"
29
29
  require "rdkafka/admin/describe_configs_report"
30
30
  require "rdkafka/admin/incremental_alter_configs_handle"
31
31
  require "rdkafka/admin/incremental_alter_configs_report"
32
+ require "rdkafka/admin/list_offsets_handle"
33
+ require "rdkafka/admin/list_offsets_report"
32
34
  require "rdkafka/admin/acl_binding_result"
33
35
  require "rdkafka/admin/config_binding_result"
34
36
  require "rdkafka/admin/config_resource_binding_result"
data/package-lock.json CHANGED
@@ -217,9 +217,9 @@
217
217
  }
218
218
  },
219
219
  "node_modules/picomatch": {
220
- "version": "2.3.1",
221
- "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
222
- "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
220
+ "version": "2.3.2",
221
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
222
+ "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
223
223
  "dev": true,
224
224
  "license": "MIT",
225
225
  "engines": {
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.25.1
4
+ version: 0.26.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
@@ -96,7 +96,7 @@ files:
96
96
  - README.md
97
97
  - Rakefile
98
98
  - bin/verify_kafka_warnings
99
- - dist/librdkafka-2.12.1.tar.gz
99
+ - dist/librdkafka-2.13.2.tar.gz
100
100
  - dist/patches/rdkafka_global_init.patch
101
101
  - docker-compose-ssl.yml
102
102
  - docker-compose.yml
@@ -125,6 +125,8 @@ files:
125
125
  - lib/rdkafka/admin/describe_configs_report.rb
126
126
  - lib/rdkafka/admin/incremental_alter_configs_handle.rb
127
127
  - lib/rdkafka/admin/incremental_alter_configs_report.rb
128
+ - lib/rdkafka/admin/list_offsets_handle.rb
129
+ - lib/rdkafka/admin/list_offsets_report.rb
128
130
  - lib/rdkafka/bindings.rb
129
131
  - lib/rdkafka/callbacks.rb
130
132
  - lib/rdkafka/config.rb
@@ -172,7 +174,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
172
174
  - !ruby/object:Gem::Version
173
175
  version: '0'
174
176
  requirements: []
175
- rubygems_version: 4.0.3
177
+ rubygems_version: 4.0.6
176
178
  specification_version: 4
177
179
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
178
180
  It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+