karafka-rdkafka 0.23.1.rc2-x86_64-linux-gnu → 0.24.0.rc1-x86_64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +11 -1
  3. data/Gemfile +9 -0
  4. data/docker-compose-ssl.yml +1 -1
  5. data/docker-compose.yml +1 -1
  6. data/ext/librdkafka.so +0 -0
  7. data/karafka-rdkafka.gemspec +0 -7
  8. data/lib/rdkafka/abstract_handle.rb +23 -5
  9. data/lib/rdkafka/admin/acl_binding_result.rb +1 -1
  10. data/lib/rdkafka/admin/config_resource_binding_result.rb +1 -0
  11. data/lib/rdkafka/admin/create_acl_handle.rb +3 -0
  12. data/lib/rdkafka/admin/create_acl_report.rb +3 -0
  13. data/lib/rdkafka/admin/create_partitions_handle.rb +3 -0
  14. data/lib/rdkafka/admin/create_partitions_report.rb +1 -0
  15. data/lib/rdkafka/admin/create_topic_handle.rb +3 -0
  16. data/lib/rdkafka/admin/create_topic_report.rb +3 -0
  17. data/lib/rdkafka/admin/delete_acl_handle.rb +3 -0
  18. data/lib/rdkafka/admin/delete_acl_report.rb +3 -0
  19. data/lib/rdkafka/admin/delete_groups_handle.rb +5 -0
  20. data/lib/rdkafka/admin/delete_groups_report.rb +3 -0
  21. data/lib/rdkafka/admin/delete_topic_handle.rb +3 -0
  22. data/lib/rdkafka/admin/delete_topic_report.rb +3 -0
  23. data/lib/rdkafka/admin/describe_acl_handle.rb +3 -0
  24. data/lib/rdkafka/admin/describe_acl_report.rb +3 -0
  25. data/lib/rdkafka/admin/describe_configs_handle.rb +3 -0
  26. data/lib/rdkafka/admin/describe_configs_report.rb +6 -0
  27. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +3 -0
  28. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +6 -0
  29. data/lib/rdkafka/admin.rb +99 -103
  30. data/lib/rdkafka/bindings.rb +23 -25
  31. data/lib/rdkafka/callbacks.rb +62 -2
  32. data/lib/rdkafka/config.rb +20 -8
  33. data/lib/rdkafka/consumer/headers.rb +3 -2
  34. data/lib/rdkafka/consumer/message.rb +7 -3
  35. data/lib/rdkafka/consumer/partition.rb +6 -1
  36. data/lib/rdkafka/consumer/topic_partition_list.rb +5 -5
  37. data/lib/rdkafka/consumer.rb +24 -12
  38. data/lib/rdkafka/defaults.rb +84 -0
  39. data/lib/rdkafka/error.rb +46 -1
  40. data/lib/rdkafka/helpers/oauth.rb +11 -5
  41. data/lib/rdkafka/metadata.rb +29 -5
  42. data/lib/rdkafka/native_kafka.rb +26 -2
  43. data/lib/rdkafka/producer/delivery_report.rb +6 -2
  44. data/lib/rdkafka/producer/partitions_count_cache.rb +24 -14
  45. data/lib/rdkafka/producer/testing.rb +3 -3
  46. data/lib/rdkafka/producer.rb +60 -16
  47. data/lib/rdkafka/version.rb +6 -3
  48. data/lib/rdkafka.rb +2 -0
  49. data/renovate.json +1 -8
  50. metadata +3 -86
data/lib/rdkafka/error.rb CHANGED
@@ -26,6 +26,12 @@ module Rdkafka
26
26
  attr_reader :details
27
27
 
28
28
  class << self
29
+ # Build an error instance from a rd_kafka_error_t pointer
30
+ #
31
+ # @param response_ptr [FFI::Pointer] Pointer to rd_kafka_error_t
32
+ # @param message_prefix [String, nil] Optional prefix for the error message
33
+ # @param broker_message [String, nil] Optional broker error message
34
+ # @return [RdkafkaError, false] Error instance or false if no error
29
35
  def build_from_c(response_ptr, message_prefix = nil, broker_message: nil)
30
36
  code = Rdkafka::Bindings.rd_kafka_error_code(response_ptr)
31
37
 
@@ -48,6 +54,13 @@ module Rdkafka
48
54
  )
49
55
  end
50
56
 
57
+ # Build an error instance from various input types
58
+ #
59
+ # @param response_ptr_or_code [Integer, FFI::Pointer, Bindings::Message] Error code, pointer,
60
+ # or message struct
61
+ # @param message_prefix [String, nil] Optional prefix for the error message
62
+ # @param broker_message [String, nil] Optional broker error message
63
+ # @return [RdkafkaError, false] Error instance or false if no error
51
64
  def build(response_ptr_or_code, message_prefix = nil, broker_message: nil)
52
65
  case response_ptr_or_code
53
66
  when Integer
@@ -81,6 +94,16 @@ module Rdkafka
81
94
  end
82
95
  end
83
96
 
97
+ # Validate a response and raise an error if it indicates a failure
98
+ #
99
+ # @param response_ptr_or_code [Integer, FFI::Pointer, Bindings::Message] Error code, pointer,
100
+ # or message struct
101
+ # @param message_prefix [String, nil] Optional prefix for the error message
102
+ # @param broker_message [String, nil] Optional broker error message
103
+ # @param client_ptr [FFI::Pointer, nil] Optional pointer to rd_kafka_t client for fatal error
104
+ # detection
105
+ # @return [false] Returns false if no error
106
+ # @raise [RdkafkaError] if the response indicates an error
84
107
  def validate!(response_ptr_or_code, message_prefix = nil, broker_message: nil, client_ptr: nil)
85
108
  error = build(response_ptr_or_code, message_prefix, broker_message: broker_message)
86
109
 
@@ -128,6 +151,13 @@ module Rdkafka
128
151
  end
129
152
 
130
153
  # @private
154
+ # @param response [Integer] the raw error response code from librdkafka
155
+ # @param message_prefix [String, nil] optional prefix for error messages
156
+ # @param broker_message [String, nil] optional error message from the broker
157
+ # @param fatal [Boolean] whether this is a fatal error
158
+ # @param retryable [Boolean] whether this error is retryable
159
+ # @param abortable [Boolean] whether this error requires transaction abort
160
+ # @param details [Hash] additional error details
131
161
  def initialize(
132
162
  response,
133
163
  message_prefix=nil,
@@ -183,18 +213,26 @@ module Rdkafka
183
213
  end
184
214
 
185
215
  # Error comparison
216
+ # @param another_error [Object] object to compare with
217
+ # @return [Boolean]
186
218
  def ==(another_error)
187
219
  another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
188
220
  end
189
221
 
222
+ # Whether this error is fatal and the client instance is no longer usable
223
+ # @return [Boolean]
190
224
  def fatal?
191
225
  @fatal
192
226
  end
193
227
 
228
+ # Whether this error is retryable and the operation may succeed if retried
229
+ # @return [Boolean]
194
230
  def retryable?
195
231
  @retryable
196
232
  end
197
233
 
234
+ # Whether this error requires the current transaction to be aborted
235
+ # @return [Boolean]
198
236
  def abortable?
199
237
  @abortable
200
238
  end
@@ -206,6 +244,9 @@ module Rdkafka
206
244
  attr_reader :topic_partition_list
207
245
 
208
246
  # @private
247
+ # @param response [Integer] the raw error response code from librdkafka
248
+ # @param topic_partition_list [TopicPartitionList] the topic partition list with error info
249
+ # @param message_prefix [String, nil] optional prefix for error messages
209
250
  def initialize(response, topic_partition_list, message_prefix=nil)
210
251
  super(response, message_prefix)
211
252
  @topic_partition_list = topic_partition_list
@@ -214,6 +255,7 @@ module Rdkafka
214
255
 
215
256
  # Error class for public consumer method calls on a closed consumer.
216
257
  class ClosedConsumerError < BaseError
258
+ # @param method [Symbol] the method that was called
217
259
  def initialize(method)
218
260
  super("Illegal call to #{method.to_s} on a closed consumer")
219
261
  end
@@ -221,18 +263,21 @@ module Rdkafka
221
263
 
222
264
  # Error class for public producer method calls on a closed producer.
223
265
  class ClosedProducerError < BaseError
266
+ # @param method [Symbol] the method that was called
224
267
  def initialize(method)
225
268
  super("Illegal call to #{method.to_s} on a closed producer")
226
269
  end
227
270
  end
228
271
 
229
- # Error class for public consumer method calls on a closed admin.
272
+ # Error class for public admin method calls on a closed admin.
230
273
  class ClosedAdminError < BaseError
274
+ # @param method [Symbol] the method that was called
231
275
  def initialize(method)
232
276
  super("Illegal call to #{method.to_s} on a closed admin")
233
277
  end
234
278
  end
235
279
 
280
+ # Error class for calls on a closed inner librdkafka instance.
236
281
  class ClosedInnerError < BaseError
237
282
  def initialize
238
283
  super("Illegal call to a closed inner librdkafka instance")
@@ -1,6 +1,6 @@
1
1
  module Rdkafka
2
2
  module Helpers
3
-
3
+ # OAuth helper methods for setting and refreshing SASL/OAUTHBEARER tokens
4
4
  module OAuth
5
5
 
6
6
  # Set the OAuthBearer token
@@ -47,8 +47,11 @@ module Rdkafka
47
47
 
48
48
  private
49
49
 
50
- # Convert extensions hash to FFI::MemoryPointer (const char **).
51
- # Note: the returned pointers must be freed manually (autorelease = false).
50
+ # Convert extensions hash to FFI::MemoryPointer (`const char **`).
51
+ #
52
+ # @param extensions [Hash, nil] extension key-value pairs
53
+ # @return [Array<FFI::MemoryPointer, Array<FFI::MemoryPointer>>] array pointer and string pointers
54
+ # @note The returned pointers must be freed manually (autorelease = false).
52
55
  def map_extensions(extensions)
53
56
  return [nil, nil] if extensions.nil? || extensions.empty?
54
57
 
@@ -74,8 +77,11 @@ module Rdkafka
74
77
  [array_ptr, str_ptrs]
75
78
  end
76
79
 
77
- # extension_size is the number of keys + values which should be a non-negative even number
78
- # https://github.com/confluentinc/librdkafka/blob/master/src/rdkafka_sasl_oauthbearer.c#L327-L347
80
+ # Returns the extension size (number of keys + values).
81
+ #
82
+ # @param extensions [Hash, nil] extension key-value pairs
83
+ # @return [Integer] non-negative even number representing keys + values count
84
+ # @see https://github.com/confluentinc/librdkafka/blob/master/src/rdkafka_sasl_oauthbearer.c#L327-L347
79
85
  def extension_size(extensions)
80
86
  return 0 unless extensions
81
87
  extensions.size * 2
@@ -1,8 +1,12 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
+ # Provides cluster metadata information
4
5
  class Metadata
5
- attr_reader :brokers, :topics
6
+ # @return [Array<Hash>] list of broker metadata
7
+ attr_reader :brokers
8
+ # @return [Array<Hash>] list of topic metadata
9
+ attr_reader :topics
6
10
 
7
11
  # Errors upon which we retry the metadata fetch
8
12
  RETRIED_ERRORS = %i[
@@ -12,7 +16,13 @@ module Rdkafka
12
16
 
13
17
  private_constant :RETRIED_ERRORS
14
18
 
15
- def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
19
+ # Fetches metadata from the Kafka cluster
20
+ #
21
+ # @param native_client [FFI::Pointer] pointer to the native Kafka client
22
+ # @param topic_name [String, nil] specific topic to fetch metadata for, or nil for all topics
23
+ # @param timeout_ms [Integer] timeout in milliseconds
24
+ # @raise [RdkafkaError] when metadata fetch fails
25
+ def initialize(native_client, topic_name = nil, timeout_ms = Defaults::METADATA_TIMEOUT_MS)
16
26
  attempt ||= 0
17
27
  attempt += 1
18
28
 
@@ -34,12 +44,12 @@ module Rdkafka
34
44
  metadata_from_native(ptr.read_pointer)
35
45
  rescue ::Rdkafka::RdkafkaError => e
36
46
  raise unless RETRIED_ERRORS.include?(e.code)
37
- raise if attempt > 10
47
+ raise if attempt > Defaults::METADATA_MAX_RETRIES
38
48
 
39
49
  backoff_factor = 2**attempt
40
- timeout = backoff_factor * 0.1
50
+ timeout_ms = backoff_factor * Defaults::METADATA_RETRY_BACKOFF_BASE_MS
41
51
 
42
- sleep(timeout)
52
+ sleep(timeout_ms / 1_000.0)
43
53
 
44
54
  retry
45
55
  ensure
@@ -49,6 +59,8 @@ module Rdkafka
49
59
 
50
60
  private
51
61
 
62
+ # Extracts metadata from native pointer
63
+ # @param ptr [FFI::Pointer] pointer to native metadata
52
64
  def metadata_from_native(ptr)
53
65
  metadata = Metadata.new(ptr)
54
66
  @brokers = Array.new(metadata[:brokers_count]) do |i|
@@ -69,7 +81,11 @@ module Rdkafka
69
81
  end
70
82
  end
71
83
 
84
+ # Base class for metadata FFI structs with hash conversion
85
+ # @private
72
86
  class CustomFFIStruct < FFI::Struct
87
+ # Converts struct to a hash
88
+ # @return [Hash]
73
89
  def to_h
74
90
  members.each_with_object({}) do |mem, hsh|
75
91
  val = self.[](mem)
@@ -80,6 +96,8 @@ module Rdkafka
80
96
  end
81
97
  end
82
98
 
99
+ # @private
100
+ # FFI struct for rd_kafka_metadata_t
83
101
  class Metadata < CustomFFIStruct
84
102
  layout :brokers_count, :int,
85
103
  :brokers_metadata, :pointer,
@@ -89,12 +107,16 @@ module Rdkafka
89
107
  :broker_name, :string
90
108
  end
91
109
 
110
+ # @private
111
+ # FFI struct for rd_kafka_metadata_broker_t
92
112
  class BrokerMetadata < CustomFFIStruct
93
113
  layout :broker_id, :int32,
94
114
  :broker_name, :string,
95
115
  :broker_port, :int
96
116
  end
97
117
 
118
+ # @private
119
+ # FFI struct for rd_kafka_metadata_topic_t
98
120
  class TopicMetadata < CustomFFIStruct
99
121
  layout :topic_name, :string,
100
122
  :partition_count, :int,
@@ -102,6 +124,8 @@ module Rdkafka
102
124
  :rd_kafka_resp_err, :int
103
125
  end
104
126
 
127
+ # @private
128
+ # FFI struct for rd_kafka_metadata_partition_t
105
129
  class PartitionMetadata < CustomFFIStruct
106
130
  layout :partition_id, :int32,
107
131
  :rd_kafka_resp_err, :int,
@@ -4,7 +4,13 @@ module Rdkafka
4
4
  # @private
5
5
  # A wrapper around a native kafka that polls and cleanly exits
6
6
  class NativeKafka
7
- def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: 100)
7
+ # Creates a new NativeKafka wrapper
8
+ # @param inner [FFI::Pointer] pointer to the native Kafka handle
9
+ # @param run_polling_thread [Boolean] whether to run a background polling thread
10
+ # @param opaque [Rdkafka::Opaque] opaque object for callback context
11
+ # @param auto_start [Boolean] whether to start the polling thread automatically
12
+ # @param timeout_ms [Integer] poll timeout in milliseconds
13
+ def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: Defaults::NATIVE_KAFKA_POLL_TIMEOUT_MS)
8
14
  @inner = inner
9
15
  @opaque = opaque
10
16
  # Lock around external access
@@ -37,6 +43,8 @@ module Rdkafka
37
43
  @closing = false
38
44
  end
39
45
 
46
+ # Starts the polling thread if configured
47
+ # @return [nil]
40
48
  def start
41
49
  synchronize do
42
50
  return if @started
@@ -69,6 +77,10 @@ module Rdkafka
69
77
  end
70
78
  end
71
79
 
80
+ # Executes a block with the inner native Kafka handle
81
+ # @yield [FFI::Pointer] the inner native Kafka handle
82
+ # @return [Object] the result of the block
83
+ # @raise [ClosedInnerError] when the inner handle is nil
72
84
  def with_inner
73
85
  if @access_mutex.owned?
74
86
  @operations_in_progress += 1
@@ -81,26 +93,38 @@ module Rdkafka
81
93
  @decrement_mutex.synchronize { @operations_in_progress -= 1 }
82
94
  end
83
95
 
96
+ # Executes a block while holding exclusive access to the native Kafka handle
97
+ # @param block [Proc] block to execute with the native handle
98
+ # @yield [FFI::Pointer] the inner native Kafka handle
99
+ # @return [Object] the result of the block
84
100
  def synchronize(&block)
85
101
  @access_mutex.synchronize do
86
102
  # Wait for any commands using the inner to finish
87
103
  # This can take a while on blocking operations like polling but is essential not to proceed
88
104
  # with certain types of operations like resources destruction as it can cause the process
89
105
  # to hang or crash
90
- sleep(0.01) until @operations_in_progress.zero?
106
+ sleep(Defaults::NATIVE_KAFKA_SYNCHRONIZE_SLEEP_INTERVAL_MS / 1_000.0) until @operations_in_progress.zero?
91
107
 
92
108
  with_inner(&block)
93
109
  end
94
110
  end
95
111
 
112
+ # Returns a finalizer proc for closing this native Kafka handle
113
+ # @return [Proc] finalizer proc
96
114
  def finalizer
97
115
  ->(_) { close }
98
116
  end
99
117
 
118
+ # Returns whether this native Kafka handle is closed or closing
119
+ # @return [Boolean] true if closed or closing
100
120
  def closed?
101
121
  @closing || @inner.nil?
102
122
  end
103
123
 
124
+ # Closes the native Kafka handle and cleans up resources
125
+ # @param object_id [Integer, nil] optional object ID (unused, for finalizer compatibility)
126
+ # @yield optional block to execute before destroying the handle
127
+ # @return [nil]
104
128
  def close(object_id=nil)
105
129
  return if closed?
106
130
 
@@ -32,8 +32,12 @@ module Rdkafka
32
32
  # We do not remove the original `#topic_name` because of backwards compatibility
33
33
  alias topic topic_name
34
34
 
35
- private
36
-
35
+ # @private
36
+ # @param partition [Integer] partition number
37
+ # @param offset [Integer] message offset
38
+ # @param topic_name [String, nil] topic name
39
+ # @param error [Integer, nil] error code if any
40
+ # @param label [Object, nil] user-defined label
37
41
  def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
38
42
  @partition = partition
39
43
  @offset = offset
@@ -17,9 +17,9 @@ module Rdkafka
17
17
  #
18
18
  # 2. Edge case handling
19
19
  # If a user configures `statistics.interval.ms` much higher than the default cache TTL
20
- # (30 seconds), the cache will still function correctly. When statistics updates don't
21
- # occur frequently enough, the cache entries will expire naturally, triggering a
22
- # blocking refresh when needed.
20
+ # ({Defaults::PARTITIONS_COUNT_CACHE_TTL_MS}ms), the cache will still function correctly.
21
+ # When statistics updates don't occur frequently enough, the cache entries will expire
22
+ # naturally, triggering a blocking refresh when needed.
23
23
  #
24
24
  # 3. User configuration awareness
25
25
  # The cache respects user-defined settings. If `topic.metadata.refresh.interval.ms` is
@@ -46,22 +46,32 @@ module Rdkafka
46
46
  class PartitionsCountCache
47
47
  include Helpers::Time
48
48
 
49
- # Default time-to-live for cached partition counts in seconds
50
- #
51
- # @note This default was chosen to balance freshness of metadata with performance
52
- # optimization. Most Kafka cluster topology changes are planned operations, making 30
53
- # seconds a reasonable compromise.
54
- DEFAULT_TTL = 30
55
-
56
49
  # Creates a new partition count cache
57
50
  #
58
- # @param ttl [Integer] Time-to-live in seconds for cached values
59
- def initialize(ttl = DEFAULT_TTL)
51
+ # @param ttl [Integer, nil] DEPRECATED: Use ttl_ms instead.
52
+ # Time-to-live in seconds for cached values. Will be removed in v1.0.0.
53
+ # @param ttl_ms [Integer, nil] Time-to-live in milliseconds for cached values.
54
+ # Defaults to {Defaults::PARTITIONS_COUNT_CACHE_TTL_MS}.
55
+ def initialize(ttl = :not_provided, ttl_ms: :not_provided)
60
56
  @counts = {}
61
57
  @mutex_hash = {}
62
58
  # Used only for @mutex_hash access to ensure thread-safety when creating new mutexes
63
59
  @mutex_for_hash = Mutex.new
64
- @ttl = ttl
60
+
61
+ # Determine which TTL value to use
62
+ if ttl != :not_provided && ttl_ms != :not_provided
63
+ warn "DEPRECATION WARNING: Both ttl and ttl_ms were provided to PartitionsCountCache. " \
64
+ "Using ttl_ms. The ttl parameter is deprecated and will be removed in v1.0.0."
65
+ @ttl_ms = ttl_ms
66
+ elsif ttl != :not_provided
67
+ warn "DEPRECATION WARNING: ttl (seconds) parameter for PartitionsCountCache is deprecated. " \
68
+ "Use ttl_ms (milliseconds) instead. This parameter will be removed in v1.0.0."
69
+ @ttl_ms = (ttl * 1000).to_i
70
+ elsif ttl_ms == :not_provided
71
+ @ttl_ms = Defaults::PARTITIONS_COUNT_CACHE_TTL_MS
72
+ else
73
+ @ttl_ms = ttl_ms
74
+ end
65
75
  end
66
76
 
67
77
  # Reads partition count for a topic with automatic refresh when expired
@@ -209,7 +219,7 @@ module Rdkafka
209
219
  # @param timestamp [Float] Monotonic timestamp to check
210
220
  # @return [Boolean] true if expired, false otherwise
211
221
  def expired?(timestamp)
212
- monotonic_now - timestamp > @ttl
222
+ (monotonic_now - timestamp) * 1_000 > @ttl_ms
213
223
  end
214
224
  end
215
225
  end
@@ -14,9 +14,9 @@ module Rdkafka
14
14
  # Or include it for all producers in your test suite:
15
15
  # Rdkafka::Producer.include(Rdkafka::Testing)
16
16
  #
17
- # IMPORTANT: Fatal errors leave the producer client in an unusable state. After triggering
18
- # a fatal error, the producer should be closed and discarded. Do not attempt to reuse a
19
- # producer that has experienced a fatal error.
17
+ # @note Fatal errors leave the producer client in an unusable state. After triggering
18
+ # a fatal error, the producer should be closed and discarded. Do not attempt to reuse a
19
+ # producer that has experienced a fatal error.
20
20
  module Testing
21
21
  # Triggers a test fatal error using rd_kafka_test_fatal_error.
22
22
  # This is useful for testing fatal error handling without needing actual broker issues.
@@ -14,10 +14,9 @@ module Rdkafka
14
14
  # then. Since the partitions count can only grow and should be same for all consumers and
15
15
  # producers, we can use a global cache as long as we ensure that updates only move up.
16
16
  #
17
+ # @return [Rdkafka::Producer::PartitionsCountCache]
17
18
  # @note It is critical to remember, that not all users may have statistics callbacks enabled,
18
19
  # hence we should not make assumption that this cache is always updated from the stats.
19
- #
20
- # @return [Rdkafka::Producer::PartitionsCountCache]
21
20
  def self.partitions_count_cache
22
21
  @@partitions_count_cache
23
22
  end
@@ -64,12 +63,13 @@ module Rdkafka
64
63
  end
65
64
 
66
65
  # Sets alternative set of configuration details that can be set per topic
67
- # @note It is not allowed to re-set the same topic config twice because of the underlying
68
- # librdkafka caching
66
+ #
69
67
  # @param topic [String] The topic name
70
68
  # @param config [Hash] config we want to use per topic basis
71
69
  # @param config_hash [Integer] hash of the config. We expect it here instead of computing it,
72
70
  # because it is already computed during the retrieval attempt in the `#produce` flow.
71
+ # @note It is not allowed to re-set the same topic config twice because of the underlying
72
+ # librdkafka caching
73
73
  def set_topic_config(topic, config, config_hash)
74
74
  # Ensure lock on topic reference just in case
75
75
  @native_kafka.with_inner do |inner|
@@ -125,8 +125,7 @@ module Rdkafka
125
125
  # Set a callback that will be called every time a message is successfully produced.
126
126
  # The callback is called with a {DeliveryReport} and {DeliveryHandle}
127
127
  #
128
- # @param callback [Proc, #call] The callback
129
- #
128
+ # @param callback [Proc, #call] callable object to handle delivery reports
130
129
  # @return [nil]
131
130
  def delivery_callback=(callback)
132
131
  raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
@@ -134,8 +133,11 @@ module Rdkafka
134
133
  @delivery_callback_arity = arity(callback)
135
134
  end
136
135
 
137
- # Init transactions
138
- # Run once per producer
136
+ # Initialize transactions for the producer
137
+ # Must be called once before any transactional operations
138
+ #
139
+ # @return [true] Returns true on success
140
+ # @raise [RdkafkaError] if initialization fails
139
141
  def init_transactions
140
142
  closed_producer_check(__method__)
141
143
 
@@ -146,6 +148,11 @@ module Rdkafka
146
148
  end
147
149
  end
148
150
 
151
+ # Begin a new transaction
152
+ # Requires {#init_transactions} to have been called first
153
+ #
154
+ # @return [true] Returns true on success
155
+ # @raise [RdkafkaError] if beginning the transaction fails
149
156
  def begin_transaction
150
157
  closed_producer_check(__method__)
151
158
 
@@ -156,6 +163,11 @@ module Rdkafka
156
163
  end
157
164
  end
158
165
 
166
+ # Commit the current transaction
167
+ #
168
+ # @param timeout_ms [Integer] Timeout in milliseconds (-1 for infinite)
169
+ # @return [true] Returns true on success
170
+ # @raise [RdkafkaError] if committing the transaction fails
159
171
  def commit_transaction(timeout_ms = -1)
160
172
  closed_producer_check(__method__)
161
173
 
@@ -166,6 +178,11 @@ module Rdkafka
166
178
  end
167
179
  end
168
180
 
181
+ # Abort the current transaction
182
+ #
183
+ # @param timeout_ms [Integer] Timeout in milliseconds (-1 for infinite)
184
+ # @return [true] Returns true on success
185
+ # @raise [RdkafkaError] if aborting the transaction fails
169
186
  def abort_transaction(timeout_ms = -1)
170
187
  closed_producer_check(__method__)
171
188
 
@@ -181,7 +198,7 @@ module Rdkafka
181
198
  # @param tpl [Consumer::TopicPartitionList]
182
199
  # @param timeout_ms [Integer] offsets send timeout
183
200
  # @note Use **only** in the context of an active transaction
184
- def send_offsets_to_transaction(consumer, tpl, timeout_ms = 5_000)
201
+ def send_offsets_to_transaction(consumer, tpl, timeout_ms = Defaults::PRODUCER_SEND_OFFSETS_TIMEOUT_MS)
185
202
  closed_producer_check(__method__)
186
203
 
187
204
  return if tpl.empty?
@@ -236,7 +253,7 @@ module Rdkafka
236
253
  # should be no other errors.
237
254
  #
238
255
  # @note For `timed_out` we do not raise an error to keep it backwards compatible
239
- def flush(timeout_ms=5_000)
256
+ def flush(timeout_ms=Defaults::PRODUCER_FLUSH_TIMEOUT_MS)
240
257
  closed_producer_check(__method__)
241
258
 
242
259
  error = @native_kafka.with_inner do |inner|
@@ -270,11 +287,37 @@ module Rdkafka
270
287
  end
271
288
 
272
289
  # Wait for the purge to affect everything
273
- sleep(0.001) until flush(100)
290
+ sleep(Defaults::PRODUCER_PURGE_SLEEP_INTERVAL_MS / 1_000.0) until flush(Defaults::PRODUCER_PURGE_FLUSH_TIMEOUT_MS)
274
291
 
275
292
  true
276
293
  end
277
294
 
295
+ # Returns the number of messages and requests waiting to be sent to the broker as well as
296
+ # delivery reports queued for the application.
297
+ #
298
+ # This provides visibility into the producer's internal queue depth, useful for:
299
+ # - Monitoring producer backpressure
300
+ # - Implementing custom flow control
301
+ # - Debugging message delivery issues
302
+ # - Graceful shutdown logic (wait until queue is empty)
303
+ #
304
+ # @return [Integer] the number of messages in the queue
305
+ # @raise [Rdkafka::ClosedProducerError] if called on a closed producer
306
+ #
307
+ # @note This method is thread-safe as it uses the @native_kafka.with_inner synchronization
308
+ #
309
+ # @example
310
+ # producer.queue_size #=> 42
311
+ def queue_size
312
+ closed_producer_check(__method__)
313
+
314
+ @native_kafka.with_inner do |inner|
315
+ Rdkafka::Bindings.rd_kafka_outq_len(inner)
316
+ end
317
+ end
318
+
319
+ alias queue_length queue_size
320
+
278
321
  # Partition count for a given topic.
279
322
  #
280
323
  # @param topic [String] The topic name.
@@ -316,14 +359,15 @@ module Rdkafka
316
359
  # When a timestamp is provided this is used instead of the auto-generated timestamp.
317
360
  #
318
361
  # @param topic [String] The topic to produce to
319
- # @param payload [String,nil] The message's payload
320
- # @param key [String, nil] The message's key
321
- # @param partition [Integer,nil] Optional partition to produce to
362
+ # @param payload [String, nil]
363
+ # @param key [String, nil]
364
+ # @param partition [Integer, nil] Optional partition to produce to
322
365
  # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
323
- # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
324
- # @param headers [Hash<String,String|Array<String>>] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
366
+ # @param timestamp [Time, Integer, nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
367
+ # @param headers [Hash{String => String, Array<String>}] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
325
368
  # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
326
369
  # @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
370
+ # @param partitioner [String] name of the partitioner to use
327
371
  #
328
372
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
329
373
  #
@@ -1,7 +1,10 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.23.1.rc2"
5
- LIBRDKAFKA_VERSION = "2.12.1"
6
- LIBRDKAFKA_SOURCE_SHA256 = "ec103fa05cb0f251e375f6ea0b6112cfc9d0acd977dc5b69fdc54242ba38a16f"
4
+ # Current rdkafka-ruby gem version
5
+ VERSION = "0.24.0.rc1"
6
+ # Target librdkafka version to be used
7
+ LIBRDKAFKA_VERSION = "2.13.0"
8
+ # SHA256 hash of the librdkafka source tarball for verification
9
+ LIBRDKAFKA_SOURCE_SHA256 = "3bd351601d8ebcbc99b9a1316cae1b83b00edbcf9411c34287edf1791c507600"
7
10
  end
data/lib/rdkafka.rb CHANGED
@@ -6,8 +6,10 @@ require "ffi"
6
6
  require "json"
7
7
 
8
8
  require "rdkafka/version"
9
+ require "rdkafka/defaults"
9
10
  require "rdkafka/helpers/time"
10
11
  require "rdkafka/helpers/oauth"
12
+ require "rdkafka/defaults"
11
13
  require "rdkafka/abstract_handle"
12
14
  require "rdkafka/admin"
13
15
  require "rdkafka/admin/create_topic_handle"
data/renovate.json CHANGED
@@ -3,18 +3,11 @@
3
3
  "extends": [
4
4
  "config:recommended"
5
5
  ],
6
+ "minimumReleaseAge": "7 days",
6
7
  "github-actions": {
7
8
  "enabled": true,
8
9
  "pinDigests": true
9
10
  },
10
- "packageRules": [
11
- {
12
- "matchManagers": [
13
- "github-actions"
14
- ],
15
- "minimumReleaseAge": "7 days"
16
- }
17
- ],
18
11
  "customManagers": [
19
12
  {
20
13
  "customType": "regex",