karafka-rdkafka 0.23.0.beta1-aarch64-linux-gnu → 0.23.1-aarch64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1a05ee0f1c84cb74133c7f845a100668d0520d8e158b8ddd84861c0a3c339fc8
4
- data.tar.gz: a8e2167cf2c3e260a186a3926606fadf93d8d2dd34d12c600efebdd60550245c
3
+ metadata.gz: f2cba22565892e3f20d07ec6184aa511e72ec10bf9dbfa19c4efbda213239406
4
+ data.tar.gz: 1ffc792ec3e6b942f88142212c3fdd16ccaa8d15a4f416bc9f53b328086ad829
5
5
  SHA512:
6
- metadata.gz: dba50e19735faee2f0149d1620a361ece26e4517a0cf3d4830f193a41d41e6df8254a7ad73b623b74ff3f6ee45e39f505199964ba07aaff706a70ed9a90a394a
7
- data.tar.gz: ad4d95ee82869e46a9d2f92aeb55fd79e2758480d5d12032c7db01a2f97bf77bb70880e8a12ef2d416f84102812eaf4c741b8fdd05cd4a245bd823e3e71a6eec
6
+ metadata.gz: 4bb0992f055c2bc94f840de140b25d57beaf71671767b29a220b2445202db64c4f6f8f7891bb06483d91683ace3b63217279547984103239a51dc070e534d45b
7
+ data.tar.gz: 07c58780a1586cc31edfa88c94496e603defc3488f9ab206d87e4a0e971ac3aa1b12fb5ab03da9d1b2b353016ef07fd2b0b0e021366561583bbabed9d613ba5c
data/CHANGELOG.md CHANGED
@@ -1,7 +1,22 @@
1
1
  # Rdkafka Changelog
2
2
 
3
- ## 0.23.0 (Unreleased)
4
- - [Enhancement] Bump librdkafka to 2.12.0.
3
+ ## 0.23.1 (2025-11-14)
4
+ - **[Feature]** Add integrated fatal error handling in `RdkafkaError.validate!` - automatically detects and handles fatal errors (-150) with single entrypoint API.
5
+ - [Enhancement] Add optional `client_ptr` parameter to `validate!` for automatic fatal error remapping to actual underlying error codes.
6
+ - [Enhancement] Update all Producer and Consumer `validate!` calls to provide `client_ptr` for comprehensive fatal error handling.
7
+ - [Enhancement] Add `rd_kafka_fatal_error()` FFI binding to retrieve actual fatal error details.
8
+ - [Enhancement] Add `rd_kafka_test_fatal_error()` FFI binding for testing fatal error scenarios.
9
+ - [Enhancement] Add `RdkafkaError.build_fatal` class method for centralized fatal error construction.
10
+ - [Enhancement] Add comprehensive tests for fatal error handling including unit tests and integration tests.
11
+ - [Enhancement] Add `RD_KAFKA_PARTITION_UA` constant for unassigned partition (-1).
12
+ - [Enhancement] Replace magic numbers with named constants: use `RD_KAFKA_RESP_ERR_NO_ERROR` instead of `0` for error code checks (18 instances) and `RD_KAFKA_PARTITION_UA` instead of `-1` for partition values (9 instances) across the codebase for better code clarity and maintainability.
13
+ - [Enhancement] Add `Rdkafka::Testing` module for testing fatal error scenarios on both producers and consumers.
14
+ - [Deprecated] `RdkafkaError.validate_fatal!` - use `validate!` with `client_ptr` parameter instead.
15
+
16
+ ## 0.23.0 (2025-11-01)
17
+ - [Enhancement] Bump librdkafka to 2.12.1.
18
+ - [Enhancement] Force lock FFI to 1.17.1 or higher to include critical bug fixes around GCC, write barriers, and thread restarts for forks.
19
+ - [Fix] Fix for Core dump when providing extensions to oauthbearer_set_token (dssjoblom)
5
20
 
6
21
  ## 0.22.2 (2025-10-09)
7
22
  - [Fix] Fix Github Action Ruby reference preventing non-compiled releases.
@@ -11,7 +26,7 @@
11
26
  - [Enhancement] Optimize producer header processing with early returns and efficient array operations (69% faster for nil headers, 41% faster for empty headers, 12-32% faster when headers are present, with larger improvements for complex header scenarios).
12
27
 
13
28
  ## 0.22.0 (2025-09-26)
14
- - **[Breaking]** Drop support for Ruby 3.1 to move forward with the fiber scheduler work.
29
+ - **[EOL]** Drop support for Ruby 3.1 to move forward with the fiber scheduler work.
15
30
  - [Enhancement] Bump librdkafka to 2.11.1.
16
31
  - [Enhancement] Improve sigstore attestation for precompiled releases.
17
32
  - [Fix] Fix incorrectly set default SSL certs dir.
@@ -71,7 +86,7 @@
71
86
  - [Fix] Do not run `Rdkafka::Bindings.rd_kafka_global_init` on require to prevent some of macos versions from hanging on Puma fork.
72
87
 
73
88
  ## 0.18.0 (2024-11-26)
74
- - **[Breaking]** Drop Ruby 3.0 support
89
+ - **[EOL]** Drop Ruby 3.0 support
75
90
  - [Enhancement] Bump librdkafka to 2.6.1
76
91
  - [Enhancement] Use default oauth callback if none is passed (bachmanity1)
77
92
  - [Enhancement] Expose `rd_kafka_global_init` to mitigate macos forking issues.
@@ -112,7 +127,7 @@
112
127
  - **[Breaking]** `HashWithSymbolKeysTreatedLikeStrings` has been removed so headers are regular hashes with string keys.
113
128
  - [Enhancement] Bump librdkafka to 2.4.0
114
129
  - [Enhancement] Save two objects on message produced and lower CPU usage on message produced with small improvements.
115
- - [Fix] Remove support for Ruby 2.7. Supporting it was a bug since rest of the karafka ecosystem no longer supports it.
130
+ - **[EOL]** Remove support for Ruby 2.7. Supporting it was a bug since rest of the karafka ecosystem no longer supports it.
116
131
 
117
132
  ## 0.15.2 (2024-07-10)
118
133
  - [Fix] Switch to local release of librdkafka to mitigate its unavailability.
@@ -193,7 +208,7 @@
193
208
  - [Enhancement] Get consumer position (thijsc & mensfeld)
194
209
 
195
210
  ## 0.13.7 (2023-10-31)
196
- - [Change] Drop support for Ruby 2.6 due to incompatibilities in usage of `ObjectSpace::WeakMap`
211
+ - **[EOL]** Drop support for Ruby 2.6 due to incompatibilities in usage of `ObjectSpace::WeakMap`
197
212
  - [Fix] Fix dangling Opaque references.
198
213
 
199
214
  ## 0.13.6 (2023-10-17)
@@ -260,7 +275,7 @@
260
275
 
261
276
  ## 0.11.0 (2021-11-17)
262
277
  - Upgrade librdkafka to 1.8.2
263
- - Bump supported minimum Ruby version to 2.6
278
+ - **[EOL]** Bump supported minimum Ruby version to 2.6
264
279
  - Better homebrew path detection
265
280
 
266
281
  ## 0.10.0 (2021-09-07)
data/README.md CHANGED
@@ -63,7 +63,7 @@ Contributions should generally be made to the upstream [rdkafka-ruby repository]
63
63
 
64
64
  | rdkafka-ruby | librdkafka | patches |
65
65
  |-|-|-|
66
- | 0.23.x (Unreleased) | 2.12.0 (2025-10-09) | yes |
66
+ | 0.23.x (2025-11-01) | 2.12.1 (2025-10-16) | yes |
67
67
  | 0.22.x (2025-09-26) | 2.11.1 (2025-08-18) | yes |
68
68
  | 0.21.x (2025-08-18) | 2.11.0 (2025-07-03) | yes |
69
69
  | 0.20.x (2025-07-17) | 2.8.0 (2025-01-07) | yes |
data/ext/librdkafka.so CHANGED
Binary file
@@ -67,7 +67,7 @@ Gem::Specification.new do |gem|
67
67
  gem.extensions = %w(ext/Rakefile)
68
68
  end
69
69
 
70
- gem.add_dependency 'ffi', '~> 1.15'
70
+ gem.add_dependency 'ffi', '~> 1.17.1'
71
71
  gem.add_dependency 'json', '> 2.0'
72
72
  gem.add_dependency 'logger'
73
73
  gem.add_dependency 'mini_portile2', '~> 2.6'
@@ -77,7 +77,7 @@ module Rdkafka
77
77
  "Waiting for #{operation_name} timed out after #{max_wait_timeout} seconds"
78
78
  )
79
79
  end
80
- elsif self[:response] != 0 && raise_response_error
80
+ elsif self[:response] != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR && raise_response_error
81
81
  raise_error
82
82
  else
83
83
  return create_result
data/lib/rdkafka/admin.rb CHANGED
@@ -149,7 +149,7 @@ module Rdkafka
149
149
  # Create and register the handle we will return to the caller
150
150
  create_topic_handle = CreateTopicHandle.new
151
151
  create_topic_handle[:pending] = true
152
- create_topic_handle[:response] = -1
152
+ create_topic_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
153
153
  CreateTopicHandle.register(create_topic_handle)
154
154
  admin_options_ptr = @native_kafka.with_inner do |inner|
155
155
  Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
@@ -202,7 +202,7 @@ module Rdkafka
202
202
  # Create and register the handle we will return to the caller
203
203
  delete_groups_handle = DeleteGroupsHandle.new
204
204
  delete_groups_handle[:pending] = true
205
- delete_groups_handle[:response] = -1
205
+ delete_groups_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
206
206
  DeleteGroupsHandle.register(delete_groups_handle)
207
207
  admin_options_ptr = @native_kafka.with_inner do |inner|
208
208
  Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
@@ -259,7 +259,7 @@ module Rdkafka
259
259
  # Create and register the handle we will return to the caller
260
260
  delete_topic_handle = DeleteTopicHandle.new
261
261
  delete_topic_handle[:pending] = true
262
- delete_topic_handle[:response] = -1
262
+ delete_topic_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
263
263
  DeleteTopicHandle.register(delete_topic_handle)
264
264
  admin_options_ptr = @native_kafka.with_inner do |inner|
265
265
  Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
@@ -327,7 +327,7 @@ module Rdkafka
327
327
  # Create and register the handle we will return to the caller
328
328
  create_partitions_handle = CreatePartitionsHandle.new
329
329
  create_partitions_handle[:pending] = true
330
- create_partitions_handle[:response] = -1
330
+ create_partitions_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
331
331
  CreatePartitionsHandle.register(create_partitions_handle)
332
332
  admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS)
333
333
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_partitions_handle.to_ptr)
@@ -427,7 +427,7 @@ module Rdkafka
427
427
  # Create and register the handle that we will return to the caller
428
428
  create_acl_handle = CreateAclHandle.new
429
429
  create_acl_handle[:pending] = true
430
- create_acl_handle[:response] = -1
430
+ create_acl_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
431
431
  CreateAclHandle.register(create_acl_handle)
432
432
 
433
433
  admin_options_ptr = @native_kafka.with_inner do |inner|
@@ -534,7 +534,7 @@ module Rdkafka
534
534
  # Create and register the handle that we will return to the caller
535
535
  delete_acl_handle = DeleteAclHandle.new
536
536
  delete_acl_handle[:pending] = true
537
- delete_acl_handle[:response] = -1
537
+ delete_acl_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
538
538
  DeleteAclHandle.register(delete_acl_handle)
539
539
 
540
540
  admin_options_ptr = @native_kafka.with_inner do |inner|
@@ -634,7 +634,7 @@ module Rdkafka
634
634
  # Create and register the handle that we will return to the caller
635
635
  describe_acl_handle = DescribeAclHandle.new
636
636
  describe_acl_handle[:pending] = true
637
- describe_acl_handle[:response] = -1
637
+ describe_acl_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
638
638
  DescribeAclHandle.register(describe_acl_handle)
639
639
 
640
640
  admin_options_ptr = @native_kafka.with_inner do |inner|
@@ -680,7 +680,7 @@ module Rdkafka
680
680
 
681
681
  handle = DescribeConfigsHandle.new
682
682
  handle[:pending] = true
683
- handle[:response] = -1
683
+ handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
684
684
 
685
685
  queue_ptr = @native_kafka.with_inner do |inner|
686
686
  Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
@@ -753,7 +753,7 @@ module Rdkafka
753
753
 
754
754
  handle = IncrementalAlterConfigsHandle.new
755
755
  handle[:pending] = true
756
- handle[:response] = -1
756
+ handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
757
757
 
758
758
  queue_ptr = @native_kafka.with_inner do |inner|
759
759
  Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
@@ -22,14 +22,55 @@ module Rdkafka
22
22
  end
23
23
  end
24
24
 
25
- ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
25
+ # Wrap ffi_lib to provide better error messages for glibc compatibility issues
26
+ begin
27
+ ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
28
+ rescue LoadError => e
29
+ error_message = e.message
30
+
31
+ # Check if this is a glibc version mismatch error
32
+ if error_message =~ /GLIBC_[\d.]+['"` ]?\s*not found/i
33
+ glibc_version = error_message[/GLIBC_([\d.]+)/, 1] || 'unknown'
34
+
35
+ raise Rdkafka::LibraryLoadError, <<~ERROR_MSG.strip
36
+ Failed to load librdkafka due to glibc compatibility issue.
37
+
38
+ The precompiled librdkafka binary requires glibc version #{glibc_version} or higher,
39
+ but your system has an older version installed.
40
+
41
+ To resolve this issue, you have two options:
42
+
43
+ 1. Upgrade your system to a supported platform (recommended)
44
+
45
+ 2. Force compilation from source by reinstalling without the precompiled binary:
46
+ gem install rdkafka --platform=ruby
47
+
48
+ Or if using Bundler, add to your Gemfile:
49
+ gem 'rdkafka', force_ruby_platform: true
50
+
51
+ Original error: #{error_message}
52
+ ERROR_MSG
53
+ else
54
+ # Re-raise the original error if it's not a glibc issue
55
+ raise
56
+ end
57
+ end
26
58
 
27
59
  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
28
60
  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
29
61
  RD_KAFKA_RESP_ERR__STATE = -172
30
62
  RD_KAFKA_RESP_ERR__NOENT = -156
63
+ RD_KAFKA_RESP_ERR__FATAL = -150
31
64
  RD_KAFKA_RESP_ERR_NO_ERROR = 0
32
65
 
66
+ # Buffer size for fatal error strings, matches librdkafka expectations
67
+ FATAL_ERROR_BUFFER_SIZE = 256
68
+
69
+ # Unassigned partition
70
+ RD_KAFKA_PARTITION_UA = -1
71
+ # String representation of unassigned partition (used in stats hash keys)
72
+ RD_KAFKA_PARTITION_UA_STR = RD_KAFKA_PARTITION_UA.to_s.freeze
73
+
33
74
  RD_KAFKA_OFFSET_END = -1
34
75
  RD_KAFKA_OFFSET_BEGINNING = -2
35
76
  RD_KAFKA_OFFSET_STORED = -1000
@@ -161,6 +202,8 @@ module Rdkafka
161
202
  attach_function :rd_kafka_error_txn_requires_abort, [:pointer], :int
162
203
  attach_function :rd_kafka_error_destroy, [:pointer], :void
163
204
  attach_function :rd_kafka_get_err_descs, [:pointer, :pointer], :void
205
+ attach_function :rd_kafka_fatal_error, [:pointer, :pointer, :int], :int
206
+ attach_function :rd_kafka_test_fatal_error, [:pointer, :int, :string], :int
164
207
 
165
208
  # Configuration
166
209
 
@@ -234,7 +277,7 @@ module Rdkafka
234
277
  # Since this cache is shared, having few consumers and/or producers in one process will
235
278
  # automatically improve the querying times even with low refresh times.
236
279
  (stats['topics'] || EMPTY_HASH).each do |topic_name, details|
237
- partitions_count = details['partitions'].keys.reject { |k| k == '-1' }.size
280
+ partitions_count = details['partitions'].keys.reject { |k| k == RD_KAFKA_PARTITION_UA_STR }.size
238
281
 
239
282
  next unless partitions_count.positive?
240
283
 
@@ -245,14 +288,51 @@ module Rdkafka
245
288
  end
246
289
 
247
290
  # Return 0 so librdkafka frees the json string
248
- 0
291
+ RD_KAFKA_RESP_ERR_NO_ERROR
292
+ end
293
+
294
+ # Retrieves fatal error details from a kafka client handle.
295
+ # This is a helper method to extract fatal error information consistently
296
+ # across different parts of the codebase (callbacks, testing utilities, etc.).
297
+ #
298
+ # @param client_ptr [FFI::Pointer] Native kafka client pointer
299
+ # @return [Hash, nil] Hash with :error_code and :error_string if fatal error occurred, nil otherwise
300
+ #
301
+ # @example
302
+ # details = Rdkafka::Bindings.extract_fatal_error(client_ptr)
303
+ # if details
304
+ # puts "Fatal error #{details[:error_code]}: #{details[:error_string]}"
305
+ # end
306
+ def self.extract_fatal_error(client_ptr)
307
+ error_buffer = FFI::MemoryPointer.new(:char, FATAL_ERROR_BUFFER_SIZE)
308
+
309
+ error_code = rd_kafka_fatal_error(client_ptr, error_buffer, FATAL_ERROR_BUFFER_SIZE)
310
+
311
+ return nil if error_code == RD_KAFKA_RESP_ERR_NO_ERROR
312
+
313
+ {
314
+ error_code: error_code,
315
+ error_string: error_buffer.read_string
316
+ }
249
317
  end
250
318
 
251
319
  ErrorCallback = FFI::Function.new(
252
320
  :void, [:pointer, :int, :string, :pointer]
253
- ) do |_client_prr, err_code, reason, _opaque|
321
+ ) do |client_ptr, err_code, reason, _opaque|
254
322
  if Rdkafka::Config.error_callback
255
- error = Rdkafka::RdkafkaError.build(err_code, broker_message: reason)
323
+ # Handle fatal errors according to librdkafka documentation:
324
+ # When ERR__FATAL is received, we must call rd_kafka_fatal_error()
325
+ # to get the actual underlying fatal error code and description.
326
+ if err_code == RD_KAFKA_RESP_ERR__FATAL
327
+ error = Rdkafka::RdkafkaError.build_fatal(
328
+ client_ptr,
329
+ fallback_error_code: err_code,
330
+ fallback_message: reason
331
+ )
332
+ else
333
+ error = Rdkafka::RdkafkaError.build(err_code, broker_message: reason)
334
+ end
335
+
256
336
  error.set_backtrace(caller)
257
337
  Rdkafka::Config.error_callback.call(error)
258
338
  end
@@ -404,7 +484,7 @@ module Rdkafka
404
484
 
405
485
  def self.partitioner(topic_ptr, str, partition_count, partitioner = "consistent_random")
406
486
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
407
- return -1 unless partition_count&.nonzero?
487
+ return RD_KAFKA_PARTITION_UA unless partition_count&.nonzero?
408
488
 
409
489
  str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
410
490
  method_name = PARTITIONERS.fetch(partitioner) do
@@ -28,7 +28,7 @@ module Rdkafka
28
28
  native_error = Rdkafka::Bindings.rd_kafka_group_result_error(group_result_pointer)
29
29
 
30
30
  if native_error.null?
31
- @result_error = 0
31
+ @result_error = Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
32
32
  @error_string = FFI::Pointer::NULL
33
33
  else
34
34
  @result_error = native_error[:code]
@@ -76,7 +76,7 @@ module Rdkafka
76
76
  rd_kafka_error_pointer = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_error(acl_result_pointer)
77
77
  @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
78
78
  @error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
79
- if @result_error == 0
79
+ if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
80
80
  # Get the number of matching acls
81
81
  pointer_to_size_t = FFI::MemoryPointer.new(:int32)
82
82
  @matching_acls = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_matching_acls(acl_result_pointer, pointer_to_size_t)
@@ -102,7 +102,7 @@ module Rdkafka
102
102
  @matching_acls=[]
103
103
  @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
104
104
  @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
105
- if @result_error == 0
105
+ if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
106
106
  acl_describe_result = Rdkafka::Bindings.rd_kafka_event_DescribeAcls_result(event_ptr)
107
107
  # Get the number of matching acls
108
108
  pointer_to_size_t = FFI::MemoryPointer.new(:int32)
@@ -120,7 +120,7 @@ module Rdkafka
120
120
  @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
121
121
  @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
122
122
 
123
- if @result_error == 0
123
+ if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
124
124
  configs_describe_result = Rdkafka::Bindings.rd_kafka_event_DescribeConfigs_result(event_ptr)
125
125
  # Get the number of matching acls
126
126
  pointer_to_size_t = FFI::MemoryPointer.new(:int32)
@@ -138,7 +138,7 @@ module Rdkafka
138
138
  @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
139
139
  @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
140
140
 
141
- if @result_error == 0
141
+ if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
142
142
  incremental_alter_result = Rdkafka::Bindings.rd_kafka_event_IncrementalAlterConfigs_result(event_ptr)
143
143
  # Get the number of matching acls
144
144
  pointer_to_size_t = FFI::MemoryPointer.new(:int32)
@@ -202,7 +202,7 @@ module Rdkafka
202
202
  describe_configs_handle[:response_string] = describe_configs.error_string
203
203
  describe_configs_handle[:pending] = false
204
204
 
205
- if describe_configs.result_error == 0
205
+ if describe_configs.result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
206
206
  describe_configs_handle[:config_entries] = describe_configs.results
207
207
  describe_configs_handle[:entry_count] = describe_configs.results_count
208
208
  end
@@ -220,7 +220,7 @@ module Rdkafka
220
220
  incremental_alter_handle[:response_string] = incremental_alter.error_string
221
221
  incremental_alter_handle[:pending] = false
222
222
 
223
- if incremental_alter.result_error == 0
223
+ if incremental_alter.result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
224
224
  incremental_alter_handle[:config_entries] = incremental_alter.results
225
225
  incremental_alter_handle[:entry_count] = incremental_alter.results_count
226
226
  end
@@ -313,7 +313,7 @@ module Rdkafka
313
313
  delete_acl_handle[:response] = delete_acl_results[0].result_error
314
314
  delete_acl_handle[:response_string] = delete_acl_results[0].error_string
315
315
 
316
- if delete_acl_results[0].result_error == 0
316
+ if delete_acl_results[0].result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
317
317
  delete_acl_handle[:matching_acls] = delete_acl_results[0].matching_acls
318
318
  delete_acl_handle[:matching_acls_count] = delete_acl_results[0].matching_acls_count
319
319
  end
@@ -330,7 +330,7 @@ module Rdkafka
330
330
  describe_acl_handle[:response] = describe_acl.result_error
331
331
  describe_acl_handle[:response_string] = describe_acl.error_string
332
332
 
333
- if describe_acl.result_error == 0
333
+ if describe_acl.result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
334
334
  describe_acl_handle[:acls] = describe_acl.matching_acls
335
335
  describe_acl_handle[:acls_count] = describe_acl.matching_acls_count
336
336
  end
@@ -33,7 +33,7 @@ module Rdkafka
33
33
  def to_s
34
34
  message = "<Partition #{partition}"
35
35
  message += " offset=#{offset}" if offset
36
- message += " err=#{err}" if err != 0
36
+ message += " err=#{err}" if err != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
37
37
  message += " metadata=#{metadata}" if metadata != nil
38
38
  message += ">"
39
39
  message
@@ -57,7 +57,7 @@ module Rdkafka
57
57
  if partitions.is_a? Integer
58
58
  partitions = (0..partitions - 1)
59
59
  end
60
- @data[topic.to_s] = partitions.map { |p| Partition.new(p, nil, 0) }
60
+ @data[topic.to_s] = partitions.map { |p| Partition.new(p, nil, Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR) }
61
61
  end
62
62
  end
63
63
 
@@ -109,7 +109,7 @@ module Rdkafka
109
109
  native_tpl[:cnt].times do |i|
110
110
  ptr = native_tpl[:elems] + (i * Rdkafka::Bindings::TopicPartition.size)
111
111
  elem = Rdkafka::Bindings::TopicPartition.new(ptr)
112
- if elem[:partition] == -1
112
+ if elem[:partition] == Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
113
113
  data[elem[:topic]] = nil
114
114
  else
115
115
  partitions = data[elem[:topic]] || []
@@ -178,7 +178,7 @@ module Rdkafka
178
178
  Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
179
179
  tpl,
180
180
  topic,
181
- -1
181
+ Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
182
182
  )
183
183
  end
184
184
  end
@@ -67,15 +67,14 @@ module Rdkafka
67
67
  tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
68
68
 
69
69
  topics.each do |topic|
70
- Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, -1)
70
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, Rdkafka::Bindings::RD_KAFKA_PARTITION_UA)
71
71
  end
72
72
 
73
73
  # Subscribe to topic partition list and check this was successful
74
- response = @native_kafka.with_inner do |inner|
75
- Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
74
+ @native_kafka.with_inner do |inner|
75
+ response = Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
76
+ Rdkafka::RdkafkaError.validate!(response, "Error subscribing to '#{topics.join(', ')}'", client_ptr: inner)
76
77
  end
77
-
78
- Rdkafka::RdkafkaError.validate!(response, "Error subscribing to '#{topics.join(', ')}'")
79
78
  ensure
80
79
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
81
80
  end
@@ -87,12 +86,11 @@ module Rdkafka
87
86
  def unsubscribe
88
87
  closed_consumer_check(__method__)
89
88
 
90
- response = @native_kafka.with_inner do |inner|
91
- Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
89
+ @native_kafka.with_inner do |inner|
90
+ response = Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
91
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
92
92
  end
93
93
 
94
- Rdkafka::RdkafkaError.validate!(response)
95
-
96
94
  nil
97
95
  end
98
96
 
@@ -115,7 +113,7 @@ module Rdkafka
115
113
  Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
116
114
  end
117
115
 
118
- if response != 0
116
+ if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
119
117
  list = TopicPartitionList.from_native_tpl(tpl)
120
118
  raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
121
119
  end
@@ -139,12 +137,11 @@ module Rdkafka
139
137
  tpl = list.to_native_tpl
140
138
 
141
139
  begin
142
- response = @native_kafka.with_inner do |inner|
143
- Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
140
+ @native_kafka.with_inner do |inner|
141
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
142
+ Rdkafka::RdkafkaError.validate!(response, "Error resume '#{list.to_h}'", client_ptr: inner)
144
143
  end
145
144
 
146
- Rdkafka::RdkafkaError.validate!(response, "Error resume '#{list.to_h}'")
147
-
148
145
  nil
149
146
  ensure
150
147
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
@@ -159,12 +156,11 @@ module Rdkafka
159
156
  closed_consumer_check(__method__)
160
157
 
161
158
  ptr = FFI::MemoryPointer.new(:pointer)
162
- response = @native_kafka.with_inner do |inner|
163
- Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
159
+ @native_kafka.with_inner do |inner|
160
+ response = Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
161
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
164
162
  end
165
163
 
166
- Rdkafka::RdkafkaError.validate!(response)
167
-
168
164
  native = ptr.read_pointer
169
165
 
170
166
  begin
@@ -188,11 +184,10 @@ module Rdkafka
188
184
  tpl = list.to_native_tpl
189
185
 
190
186
  begin
191
- response = @native_kafka.with_inner do |inner|
192
- Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
187
+ @native_kafka.with_inner do |inner|
188
+ response = Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
189
+ Rdkafka::RdkafkaError.validate!(response, "Error assigning '#{list.to_h}'", client_ptr: inner)
193
190
  end
194
-
195
- Rdkafka::RdkafkaError.validate!(response, "Error assigning '#{list.to_h}'")
196
191
  ensure
197
192
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
198
193
  end
@@ -206,12 +201,11 @@ module Rdkafka
206
201
  closed_consumer_check(__method__)
207
202
 
208
203
  ptr = FFI::MemoryPointer.new(:pointer)
209
- response = @native_kafka.with_inner do |inner|
210
- Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
204
+ @native_kafka.with_inner do |inner|
205
+ response = Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
206
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
211
207
  end
212
208
 
213
- Rdkafka::RdkafkaError.validate!(response)
214
-
215
209
  tpl = ptr.read_pointer
216
210
 
217
211
  if !tpl.null?
@@ -255,12 +249,11 @@ module Rdkafka
255
249
  tpl = list.to_native_tpl
256
250
 
257
251
  begin
258
- response = @native_kafka.with_inner do |inner|
259
- Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
252
+ @native_kafka.with_inner do |inner|
253
+ response = Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
254
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
260
255
  end
261
256
 
262
- Rdkafka::RdkafkaError.validate!(response)
263
-
264
257
  TopicPartitionList.from_native_tpl(tpl)
265
258
  ensure
266
259
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
@@ -284,12 +277,11 @@ module Rdkafka
284
277
 
285
278
  tpl = list.to_native_tpl
286
279
 
287
- response = @native_kafka.with_inner do |inner|
288
- Rdkafka::Bindings.rd_kafka_position(inner, tpl)
280
+ @native_kafka.with_inner do |inner|
281
+ response = Rdkafka::Bindings.rd_kafka_position(inner, tpl)
282
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
289
283
  end
290
284
 
291
- Rdkafka::RdkafkaError.validate!(response)
292
-
293
285
  TopicPartitionList.from_native_tpl(tpl)
294
286
  end
295
287
 
@@ -306,8 +298,8 @@ module Rdkafka
306
298
  low = FFI::MemoryPointer.new(:int64, 1)
307
299
  high = FFI::MemoryPointer.new(:int64, 1)
308
300
 
309
- response = @native_kafka.with_inner do |inner|
310
- Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
301
+ @native_kafka.with_inner do |inner|
302
+ response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
311
303
  inner,
312
304
  topic,
313
305
  partition,
@@ -315,10 +307,9 @@ module Rdkafka
315
307
  high,
316
308
  timeout_ms,
317
309
  )
310
+ Rdkafka::RdkafkaError.validate!(response, "Error querying watermark offsets for partition #{partition} of #{topic}", client_ptr: inner)
318
311
  end
319
312
 
320
- Rdkafka::RdkafkaError.validate!(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
321
-
322
313
  return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
323
314
  ensure
324
315
  low.free unless low.nil?
@@ -414,15 +405,14 @@ module Rdkafka
414
405
 
415
406
  tpl = list.to_native_tpl
416
407
 
417
- response = @native_kafka.with_inner do |inner|
418
- Rdkafka::Bindings.rd_kafka_offsets_store(
408
+ @native_kafka.with_inner do |inner|
409
+ response = Rdkafka::Bindings.rd_kafka_offsets_store(
419
410
  inner,
420
411
  tpl
421
412
  )
413
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
422
414
  end
423
415
 
424
- Rdkafka::RdkafkaError.validate!(response)
425
-
426
416
  nil
427
417
  ensure
428
418
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
@@ -459,15 +449,19 @@ module Rdkafka
459
449
  nil
460
450
  )
461
451
  end
452
+
462
453
  response = Rdkafka::Bindings.rd_kafka_seek(
463
454
  native_topic,
464
455
  partition,
465
456
  offset,
466
457
  0 # timeout
467
458
  )
468
- Rdkafka::RdkafkaError.validate!(response)
469
459
 
470
- nil
460
+ return nil if response == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
461
+
462
+ @native_kafka.with_inner do |inner|
463
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
464
+ end
471
465
  ensure
472
466
  if native_topic && !native_topic.null?
473
467
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
@@ -490,16 +484,15 @@ module Rdkafka
490
484
 
491
485
  tpl = list.to_native_tpl
492
486
 
493
- response = @native_kafka.with_inner do |inner|
494
- Rdkafka::Bindings.rd_kafka_offsets_for_times(
487
+ @native_kafka.with_inner do |inner|
488
+ response = Rdkafka::Bindings.rd_kafka_offsets_for_times(
495
489
  inner,
496
490
  tpl,
497
491
  timeout_ms # timeout
498
492
  )
493
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
499
494
  end
500
495
 
501
- Rdkafka::RdkafkaError.validate!(response)
502
-
503
496
  TopicPartitionList.from_native_tpl(tpl)
504
497
  ensure
505
498
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
@@ -528,12 +521,11 @@ module Rdkafka
528
521
  tpl = list ? list.to_native_tpl : nil
529
522
 
530
523
  begin
531
- response = @native_kafka.with_inner do |inner|
532
- Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
524
+ @native_kafka.with_inner do |inner|
525
+ response = Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
526
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
533
527
  end
534
528
 
535
- Rdkafka::RdkafkaError.validate!(response)
536
-
537
529
  nil
538
530
  ensure
539
531
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
@@ -558,10 +550,12 @@ module Rdkafka
558
550
  native_message = Rdkafka::Bindings::Message.new(message_ptr)
559
551
 
560
552
  # Create a message to pass out
561
- return Rdkafka::Consumer::Message.new(native_message) if native_message[:err].zero?
553
+ return Rdkafka::Consumer::Message.new(native_message) if native_message[:err] == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
562
554
 
563
- # Raise error if needed
564
- Rdkafka::RdkafkaError.validate!(native_message)
555
+ # Raise error if needed - wrap just the validate! call with client access
556
+ @native_kafka.with_inner do |inner|
557
+ Rdkafka::RdkafkaError.validate!(native_message, client_ptr: inner)
558
+ end
565
559
  ensure
566
560
  # Clean up rdkafka message if there is one
567
561
  if message_ptr && !message_ptr.null?
data/lib/rdkafka/error.rb CHANGED
@@ -29,7 +29,7 @@ module Rdkafka
29
29
  def build_from_c(response_ptr, message_prefix = nil, broker_message: nil)
30
30
  code = Rdkafka::Bindings.rd_kafka_error_code(response_ptr)
31
31
 
32
- return false if code.zero?
32
+ return false if code == Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
33
33
 
34
34
  message = broker_message || Rdkafka::Bindings.rd_kafka_err2str(code)
35
35
  fatal = !Rdkafka::Bindings.rd_kafka_error_is_fatal(response_ptr).zero?
@@ -51,11 +51,11 @@ module Rdkafka
51
51
  def build(response_ptr_or_code, message_prefix = nil, broker_message: nil)
52
52
  case response_ptr_or_code
53
53
  when Integer
54
- return false if response_ptr_or_code.zero?
54
+ return false if response_ptr_or_code == Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
55
55
 
56
56
  new(response_ptr_or_code, message_prefix, broker_message: broker_message)
57
57
  when Bindings::Message
58
- return false if response_ptr_or_code[:err].zero?
58
+ return false if response_ptr_or_code[:err] == Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
59
59
 
60
60
  unless response_ptr_or_code[:payload].null?
61
61
  message_prefix ||= response_ptr_or_code[:payload].read_string(response_ptr_or_code[:len])
@@ -81,9 +81,49 @@ module Rdkafka
81
81
  end
82
82
  end
83
83
 
84
- def validate!(response_ptr_or_code, message_prefix = nil, broker_message: nil)
84
+ def validate!(response_ptr_or_code, message_prefix = nil, broker_message: nil, client_ptr: nil)
85
85
  error = build(response_ptr_or_code, message_prefix, broker_message: broker_message)
86
- error ? raise(error) : false
86
+
87
+ return false unless error
88
+
89
+ # Auto-detect and handle fatal errors (-150)
90
+ if error.rdkafka_response == Bindings::RD_KAFKA_RESP_ERR__FATAL && client_ptr
91
+ # Discover the underlying fatal error from librdkafka
92
+ error = build_fatal(
93
+ client_ptr,
94
+ fallback_error_code: error.rdkafka_response,
95
+ fallback_message: broker_message
96
+ )
97
+ end
98
+
99
+ raise error
100
+ end
101
+
102
+ # Build a fatal error from librdkafka's fatal error state.
103
+ # Calls rd_kafka_fatal_error() to get the actual underlying error code and description.
104
+ #
105
+ # @param client_ptr [FFI::Pointer] Pointer to rd_kafka_t client
106
+ # @param fallback_error_code [Integer] Error code to use if no fatal error found (default: -150)
107
+ # @param fallback_message [String, nil] Message to use if no fatal error found
108
+ # @return [RdkafkaError] Error object with fatal flag set to true
109
+ def build_fatal(client_ptr, fallback_error_code: -150, fallback_message: nil)
110
+ fatal_error_details = Rdkafka::Bindings.extract_fatal_error(client_ptr)
111
+
112
+ if fatal_error_details
113
+ new(
114
+ fatal_error_details[:error_code],
115
+ broker_message: fatal_error_details[:error_string],
116
+ fatal: true
117
+ )
118
+ else
119
+ # Fallback: if extract_fatal_error returns nil (shouldn't happen in practice),
120
+ # the error code itself still indicates a fatal condition
121
+ new(
122
+ fallback_error_code,
123
+ broker_message: fallback_message,
124
+ fatal: true
125
+ )
126
+ end
87
127
  end
88
128
  end
89
129
 
@@ -198,4 +238,7 @@ module Rdkafka
198
238
  super("Illegal call to a closed inner librdkafka instance")
199
239
  end
200
240
  end
241
+
242
+ # Error class for librdkafka library loading failures (e.g., glibc compatibility issues).
243
+ class LibraryLoadError < BaseError; end
201
244
  end
@@ -12,15 +12,21 @@ module Rdkafka
12
12
  # @return [Integer] 0 on success
13
13
  def oauthbearer_set_token(token:, lifetime_ms:, principal_name:, extensions: nil)
14
14
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
15
+ extensions_ptr, extensions_str_ptrs = map_extensions(extensions)
15
16
 
16
- response = @native_kafka.with_inner do |inner|
17
- Rdkafka::Bindings.rd_kafka_oauthbearer_set_token(
18
- inner, token, lifetime_ms, principal_name,
19
- flatten_extensions(extensions), extension_size(extensions), error_buffer, 256
20
- )
17
+ begin
18
+ response = @native_kafka.with_inner do |inner|
19
+ Rdkafka::Bindings.rd_kafka_oauthbearer_set_token(
20
+ inner, token, lifetime_ms, principal_name,
21
+ extensions_ptr, extension_size(extensions), error_buffer, 256
22
+ )
23
+ end
24
+ ensure
25
+ extensions_str_ptrs&.each { |ptr| ptr.free }
26
+ extensions_ptr&.free
21
27
  end
22
28
 
23
- return response if response.zero?
29
+ return response if response == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
24
30
 
25
31
  oauthbearer_set_token_failure("Failed to set token: #{error_buffer.read_string}")
26
32
 
@@ -41,10 +47,31 @@ module Rdkafka
41
47
 
42
48
  private
43
49
 
44
- # Flatten the extensions hash into a string according to the spec, https://datatracker.ietf.org/doc/html/rfc7628#section-3.1
45
- def flatten_extensions(extensions)
46
- return nil unless extensions
47
- "\x01#{extensions.map { |e| e.join("=") }.join("\x01")}"
50
+ # Convert extensions hash to FFI::MemoryPointer (const char **).
51
+ # Note: the returned pointers must be freed manually (autorelease = false).
52
+ def map_extensions(extensions)
53
+ return [nil, nil] if extensions.nil? || extensions.empty?
54
+
55
+ # https://github.com/confluentinc/librdkafka/blob/master/src/rdkafka_sasl_oauthbearer.c#L327-L347
56
+
57
+ # The method argument is const char **
58
+ array_ptr = FFI::MemoryPointer.new(:pointer, extension_size(extensions))
59
+ array_ptr.autorelease = false
60
+ str_ptrs = []
61
+
62
+ # Element i is the key, i + 1 is the value.
63
+ extensions.each_with_index do |(k, v), i|
64
+ k_ptr = FFI::MemoryPointer.from_string(k.to_s)
65
+ k_ptr.autorelease = false
66
+ str_ptrs << k_ptr
67
+ v_ptr = FFI::MemoryPointer.from_string(v.to_s)
68
+ v_ptr.autorelease = false
69
+ str_ptrs << v_ptr
70
+ array_ptr[i * 2].put_pointer(0, k_ptr)
71
+ array_ptr[i * 2 + 1].put_pointer(0, v_ptr)
72
+ end
73
+
74
+ [array_ptr, str_ptrs]
48
75
  end
49
76
 
50
77
  # extension_size is the number of keys + values which should be a non-negative even number
@@ -25,7 +25,7 @@ module Rdkafka
25
25
 
26
26
  # @return [DeliveryReport] a report on the delivery of the message
27
27
  def create_result
28
- if self[:response] == 0
28
+ if self[:response] == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
29
29
  DeliveryReport.new(
30
30
  self[:partition],
31
31
  self[:offset],
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Testing utilities for Producer instances.
5
+ # This module is NOT included by default and should only be used in test environments.
6
+ #
7
+ # This module provides librdkafka native testing utilities that are needed to trigger certain
8
+ # behaviours that are hard to reproduce in stable environments, particularly fatal error
9
+ # scenarios in idempotent and transactional producers.
10
+ #
11
+ # To use in tests for producers:
12
+ # producer.singleton_class.include(Rdkafka::Testing)
13
+ #
14
+ # Or include it for all producers in your test suite:
15
+ # Rdkafka::Producer.include(Rdkafka::Testing)
16
+ #
17
+ # IMPORTANT: Fatal errors leave the producer client in an unusable state. After triggering
18
+ # a fatal error, the producer should be closed and discarded. Do not attempt to reuse a
19
+ # producer that has experienced a fatal error.
20
+ module Testing
21
+ # Triggers a test fatal error using rd_kafka_test_fatal_error.
22
+ # This is useful for testing fatal error handling without needing actual broker issues.
23
+ #
24
+ # @param error_code [Integer] The error code to trigger (e.g., 47 for invalid_producer_epoch)
25
+ # @param reason [String] Descriptive reason for the error
26
+ # @return [Integer] Result code from rd_kafka_test_fatal_error (0 on success)
27
+ #
28
+ # @example
29
+ # producer.trigger_test_fatal_error(47, "Test producer fencing")
30
+ def trigger_test_fatal_error(error_code, reason)
31
+ @native_kafka.with_inner do |inner|
32
+ Rdkafka::Bindings.rd_kafka_test_fatal_error(inner, error_code, reason)
33
+ end
34
+ end
35
+
36
+ # Checks if a fatal error has occurred and retrieves error details.
37
+ # Calls rd_kafka_fatal_error to get the actual fatal error code and message.
38
+ #
39
+ # @return [Hash, nil] Hash with :error_code and :error_string if fatal error occurred, nil otherwise
40
+ #
41
+ # @example
42
+ # if fatal_error = producer.fatal_error
43
+ # puts "Fatal error #{fatal_error[:error_code]}: #{fatal_error[:error_string]}"
44
+ # end
45
+ def fatal_error
46
+ @native_kafka.with_inner do |inner|
47
+ Rdkafka::Bindings.extract_fatal_error(inner)
48
+ end
49
+ end
50
+ end
51
+ end
@@ -142,7 +142,7 @@ module Rdkafka
142
142
  @native_kafka.with_inner do |inner|
143
143
  response_ptr = Rdkafka::Bindings.rd_kafka_init_transactions(inner, -1)
144
144
 
145
- Rdkafka::RdkafkaError.validate!(response_ptr) || true
145
+ Rdkafka::RdkafkaError.validate!(response_ptr, client_ptr: inner) || true
146
146
  end
147
147
  end
148
148
 
@@ -152,7 +152,7 @@ module Rdkafka
152
152
  @native_kafka.with_inner do |inner|
153
153
  response_ptr = Rdkafka::Bindings.rd_kafka_begin_transaction(inner)
154
154
 
155
- Rdkafka::RdkafkaError.validate!(response_ptr) || true
155
+ Rdkafka::RdkafkaError.validate!(response_ptr, client_ptr: inner) || true
156
156
  end
157
157
  end
158
158
 
@@ -162,7 +162,7 @@ module Rdkafka
162
162
  @native_kafka.with_inner do |inner|
163
163
  response_ptr = Rdkafka::Bindings.rd_kafka_commit_transaction(inner, timeout_ms)
164
164
 
165
- Rdkafka::RdkafkaError.validate!(response_ptr) || true
165
+ Rdkafka::RdkafkaError.validate!(response_ptr, client_ptr: inner) || true
166
166
  end
167
167
  end
168
168
 
@@ -171,7 +171,7 @@ module Rdkafka
171
171
 
172
172
  @native_kafka.with_inner do |inner|
173
173
  response_ptr = Rdkafka::Bindings.rd_kafka_abort_transaction(inner, timeout_ms)
174
- Rdkafka::RdkafkaError.validate!(response_ptr) || true
174
+ Rdkafka::RdkafkaError.validate!(response_ptr, client_ptr: inner) || true
175
175
  end
176
176
  end
177
177
 
@@ -192,7 +192,7 @@ module Rdkafka
192
192
  @native_kafka.with_inner do |inner|
193
193
  response_ptr = Bindings.rd_kafka_send_offsets_to_transaction(inner, native_tpl, cgmetadata, timeout_ms)
194
194
 
195
- Rdkafka::RdkafkaError.validate!(response_ptr)
195
+ Rdkafka::RdkafkaError.validate!(response_ptr, client_ptr: inner)
196
196
  end
197
197
  ensure
198
198
  if cgmetadata && !cgmetadata.null?
@@ -266,7 +266,7 @@ module Rdkafka
266
266
  Bindings::RD_KAFKA_PURGE_F_QUEUE | Bindings::RD_KAFKA_PURGE_F_INFLIGHT
267
267
  )
268
268
 
269
- Rdkafka::RdkafkaError.validate!(response)
269
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
270
270
  end
271
271
 
272
272
  # Wait for the purge to affect everything
@@ -278,7 +278,7 @@ module Rdkafka
278
278
  # Partition count for a given topic.
279
279
  #
280
280
  # @param topic [String] The topic name.
281
- # @return [Integer] partition count for a given topic or `-1` if it could not be obtained.
281
+ # @return [Integer] partition count for a given topic or `RD_KAFKA_PARTITION_UA (-1)` if it could not be obtained.
282
282
  #
283
283
  # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
284
284
  # auto-created after returning nil.
@@ -299,13 +299,13 @@ module Rdkafka
299
299
  topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
300
300
  end
301
301
 
302
- topic_metadata ? topic_metadata[:partition_count] : -1
302
+ topic_metadata ? topic_metadata[:partition_count] : Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
303
303
  end
304
304
  rescue Rdkafka::RdkafkaError => e
305
305
  # If the topic does not exist, it will be created or if not allowed another error will be
306
- # raised. We here return -1 so this can happen without early error happening on metadata
307
- # discovery.
308
- return -1 if e.code == :unknown_topic_or_part
306
+ # raised. We here return RD_KAFKA_PARTITION_UA so this can happen without early error
307
+ # happening on metadata discovery.
308
+ return Rdkafka::Bindings::RD_KAFKA_PARTITION_UA if e.code == :unknown_topic_or_part
309
309
 
310
310
  raise(e)
311
311
  end
@@ -380,9 +380,9 @@ module Rdkafka
380
380
  selected_partitioner) if partition_count.positive?
381
381
  end
382
382
 
383
- # If partition is nil, use -1 to let librdafka set the partition randomly or
383
+ # If partition is nil, use RD_KAFKA_PARTITION_UA to let librdafka set the partition randomly or
384
384
  # based on the key when present.
385
- partition ||= -1
385
+ partition ||= Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
386
386
 
387
387
  # If timestamp is nil use 0 and let Kafka set one. If an integer or time
388
388
  # use it.
@@ -400,9 +400,9 @@ module Rdkafka
400
400
  delivery_handle.label = label
401
401
  delivery_handle.topic = topic
402
402
  delivery_handle[:pending] = true
403
- delivery_handle[:response] = -1
404
- delivery_handle[:partition] = -1
405
- delivery_handle[:offset] = -1
403
+ delivery_handle[:response] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
404
+ delivery_handle[:partition] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
405
+ delivery_handle[:offset] = Rdkafka::Bindings::RD_KAFKA_PARTITION_UA
406
406
  DeliveryHandle.register(delivery_handle)
407
407
 
408
408
  args = [
@@ -454,9 +454,12 @@ module Rdkafka
454
454
  end
455
455
 
456
456
  # Raise error if the produce call was not successful
457
- if response != 0
457
+ if response != Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
458
458
  DeliveryHandle.remove(delivery_handle.to_ptr.address)
459
- Rdkafka::RdkafkaError.validate!(response)
459
+
460
+ @native_kafka.with_inner do |inner|
461
+ Rdkafka::RdkafkaError.validate!(response, client_ptr: inner)
462
+ end
460
463
  end
461
464
 
462
465
  delivery_handle
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.23.0.beta1"
5
- LIBRDKAFKA_VERSION = "2.12.0"
6
- LIBRDKAFKA_SOURCE_SHA256 = "1355d81091d13643aed140ba0fe62437c02d9434b44e90975aaefab84c2bf237"
4
+ VERSION = "0.23.1"
5
+ LIBRDKAFKA_VERSION = "2.12.1"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "ec103fa05cb0f251e375f6ea0b6112cfc9d0acd977dc5b69fdc54242ba38a16f"
7
7
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.23.0.beta1
4
+ version: 0.23.1
5
5
  platform: aarch64-linux-gnu
6
6
  authors:
7
7
  - Thijs Cadier
@@ -16,14 +16,14 @@ dependencies:
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: '1.15'
19
+ version: 1.17.1
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: '1.15'
26
+ version: 1.17.1
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: json
29
29
  requirement: !ruby/object:Gem::Requirement
@@ -222,6 +222,7 @@ files:
222
222
  - lib/rdkafka/producer/delivery_handle.rb
223
223
  - lib/rdkafka/producer/delivery_report.rb
224
224
  - lib/rdkafka/producer/partitions_count_cache.rb
225
+ - lib/rdkafka/producer/testing.rb
225
226
  - lib/rdkafka/version.rb
226
227
  - renovate.json
227
228
  licenses: