rdkafka 0.22.0.beta1-x86_64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +7 -0
  2. data/.github/CODEOWNERS +3 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
  5. data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
  6. data/.github/workflows/ci_macos_arm64.yml +306 -0
  7. data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
  8. data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
  9. data/.github/workflows/push_macos_arm64.yml +54 -0
  10. data/.github/workflows/push_ruby.yml +37 -0
  11. data/.github/workflows/verify-action-pins.yml +16 -0
  12. data/.gitignore +14 -0
  13. data/.rspec +2 -0
  14. data/.ruby-gemset +1 -0
  15. data/.ruby-version +1 -0
  16. data/.yardopts +2 -0
  17. data/CHANGELOG.md +247 -0
  18. data/Gemfile +5 -0
  19. data/MIT-LICENSE +22 -0
  20. data/README.md +178 -0
  21. data/Rakefile +96 -0
  22. data/docker-compose.yml +25 -0
  23. data/ext/README.md +19 -0
  24. data/ext/Rakefile +131 -0
  25. data/ext/build_common.sh +361 -0
  26. data/ext/build_linux_x86_64_gnu.sh +306 -0
  27. data/ext/build_linux_x86_64_musl.sh +763 -0
  28. data/ext/build_macos_arm64.sh +550 -0
  29. data/ext/librdkafka.so +0 -0
  30. data/lib/rdkafka/abstract_handle.rb +116 -0
  31. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  32. data/lib/rdkafka/admin/config_binding_result.rb +30 -0
  33. data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
  34. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  35. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  36. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  37. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  38. data/lib/rdkafka/admin/create_topic_handle.rb +29 -0
  39. data/lib/rdkafka/admin/create_topic_report.rb +24 -0
  40. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  41. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  42. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  43. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  44. data/lib/rdkafka/admin/delete_topic_handle.rb +29 -0
  45. data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
  46. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  47. data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
  48. data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
  49. data/lib/rdkafka/admin/describe_configs_report.rb +54 -0
  50. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
  51. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +54 -0
  52. data/lib/rdkafka/admin.rb +833 -0
  53. data/lib/rdkafka/bindings.rb +566 -0
  54. data/lib/rdkafka/callbacks.rb +415 -0
  55. data/lib/rdkafka/config.rb +398 -0
  56. data/lib/rdkafka/consumer/headers.rb +79 -0
  57. data/lib/rdkafka/consumer/message.rb +86 -0
  58. data/lib/rdkafka/consumer/partition.rb +51 -0
  59. data/lib/rdkafka/consumer/topic_partition_list.rb +169 -0
  60. data/lib/rdkafka/consumer.rb +653 -0
  61. data/lib/rdkafka/error.rb +101 -0
  62. data/lib/rdkafka/helpers/oauth.rb +58 -0
  63. data/lib/rdkafka/helpers/time.rb +14 -0
  64. data/lib/rdkafka/metadata.rb +115 -0
  65. data/lib/rdkafka/native_kafka.rb +139 -0
  66. data/lib/rdkafka/producer/delivery_handle.rb +40 -0
  67. data/lib/rdkafka/producer/delivery_report.rb +46 -0
  68. data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
  69. data/lib/rdkafka/producer.rb +430 -0
  70. data/lib/rdkafka/version.rb +7 -0
  71. data/lib/rdkafka.rb +54 -0
  72. data/rdkafka.gemspec +65 -0
  73. data/renovate.json +92 -0
  74. data/spec/rdkafka/abstract_handle_spec.rb +117 -0
  75. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  76. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  77. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  78. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  79. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  80. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  81. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  82. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  83. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  84. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  85. data/spec/rdkafka/admin_spec.rb +770 -0
  86. data/spec/rdkafka/bindings_spec.rb +223 -0
  87. data/spec/rdkafka/callbacks_spec.rb +20 -0
  88. data/spec/rdkafka/config_spec.rb +258 -0
  89. data/spec/rdkafka/consumer/headers_spec.rb +73 -0
  90. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  91. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  92. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
  93. data/spec/rdkafka/consumer_spec.rb +1274 -0
  94. data/spec/rdkafka/error_spec.rb +89 -0
  95. data/spec/rdkafka/metadata_spec.rb +79 -0
  96. data/spec/rdkafka/native_kafka_spec.rb +130 -0
  97. data/spec/rdkafka/producer/delivery_handle_spec.rb +45 -0
  98. data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
  99. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
  100. data/spec/rdkafka/producer_spec.rb +1052 -0
  101. data/spec/spec_helper.rb +195 -0
  102. metadata +276 -0
@@ -0,0 +1,216 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Producer
5
+ # Caching mechanism for Kafka topic partition counts to avoid frequent cluster queries
6
+ #
7
+ # This cache is designed to optimize the process of obtaining partition counts for topics.
8
+ # It uses several strategies to minimize Kafka cluster queries:
9
+ #
10
+ # @note Design considerations:
11
+ #
12
+ # 1. Statistics-based updates
13
+ # When statistics callbacks are enabled (via `statistics.interval.ms`), we leverage
14
+ # this data to proactively update the partition counts cache. This approach costs
15
+ # approximately 0.02ms of processing time during each statistics interval (typically
16
+ # every 5 seconds) but eliminates the need for explicit blocking metadata queries.
17
+ #
18
+ # 2. Edge case handling
19
+ # If a user configures `statistics.interval.ms` much higher than the default cache TTL
20
+ # (30 seconds), the cache will still function correctly. When statistics updates don't
21
+ # occur frequently enough, the cache entries will expire naturally, triggering a
22
+ # blocking refresh when needed.
23
+ #
24
+ # 3. User configuration awareness
25
+ # The cache respects user-defined settings. If `topic.metadata.refresh.interval.ms` is
26
+ # set very high, the responsibility for potentially stale data falls on the user. This
27
+ # is an explicit design choice to honor user configuration preferences and align with
28
+ # librdkafka settings.
29
+ #
30
+ # 4. Process-wide efficiency
31
+ # Since this cache is shared across all Rdkafka producers and consumers within a process,
32
+ # having multiple clients improves overall efficiency. Each client contributes to keeping
33
+ # the cache updated, benefiting all other clients.
34
+ #
35
+ # 5. Thread-safety approach
36
+ # The implementation uses fine-grained locking with per-topic mutexes to minimize
37
+ # contention in multi-threaded environments while ensuring data consistency.
38
+ #
39
+ # 6. Topic recreation handling
40
+ # If a topic is deleted and recreated with fewer partitions, the cache will continue to
41
+ # report the higher count until either the TTL expires or the process is restarted. This
42
+ # design choice simplifies the implementation while relying on librdkafka's error handling
43
+ # for edge cases. In production environments, topic recreation with different partition
44
+ # counts is typically accompanied by application restarts to handle structural changes.
45
+ # This also aligns with the previous cache implementation.
46
+ class PartitionsCountCache
47
+ include Helpers::Time
48
+
49
+ # Default time-to-live for cached partition counts in seconds
50
+ #
51
+ # @note This default was chosen to balance freshness of metadata with performance
52
+ # optimization. Most Kafka cluster topology changes are planned operations, making 30
53
+ # seconds a reasonable compromise.
54
+ DEFAULT_TTL = 30
55
+
56
+ # Creates a new partition count cache
57
+ #
58
+ # @param ttl [Integer] Time-to-live in seconds for cached values
59
+ def initialize(ttl = DEFAULT_TTL)
60
+ @counts = {}
61
+ @mutex_hash = {}
62
+ # Used only for @mutex_hash access to ensure thread-safety when creating new mutexes
63
+ @mutex_for_hash = Mutex.new
64
+ @ttl = ttl
65
+ end
66
+
67
+ # Reads partition count for a topic with automatic refresh when expired
68
+ #
69
+ # This method will return the cached partition count if available and not expired.
70
+ # If the value is expired or not available, it will execute the provided block
71
+ # to fetch the current value from Kafka.
72
+ #
73
+ # @param topic [String] Kafka topic name
74
+ # @yield Block that returns the current partition count when cache needs refreshing
75
+ # @yieldreturn [Integer] Current partition count retrieved from Kafka
76
+ # @return [Integer] Partition count for the topic
77
+ #
78
+ # @note The implementation prioritizes read performance over write consistency
79
+ # since partition counts typically only increase during normal operation.
80
+ def get(topic)
81
+ current_info = @counts[topic]
82
+
83
+ if current_info.nil? || expired?(current_info[0])
84
+ new_count = yield
85
+
86
+ if current_info.nil?
87
+ # No existing data, create a new entry with mutex
88
+ set(topic, new_count)
89
+
90
+ return new_count
91
+ else
92
+ current_count = current_info[1]
93
+
94
+ if new_count > current_count
95
+ # Higher value needs mutex to update both timestamp and count
96
+ set(topic, new_count)
97
+
98
+ return new_count
99
+ else
100
+ # Same or lower value, just update timestamp without mutex
101
+ refresh_timestamp(topic)
102
+
103
+ return current_count
104
+ end
105
+ end
106
+ end
107
+
108
+ current_info[1]
109
+ end
110
+
111
+ # Update partition count for a topic when needed
112
+ #
113
+ # This method updates the partition count for a topic in the cache.
114
+ # It uses a mutex to ensure thread-safety during updates.
115
+ #
116
+ # @param topic [String] Kafka topic name
117
+ # @param new_count [Integer] New partition count value
118
+ #
119
+ # @note We prioritize higher partition counts and only accept them when using
120
+ # a mutex to ensure consistency. This design decision is based on the fact that
121
+ # partition counts in Kafka only increase during normal operation.
122
+ def set(topic, new_count)
123
+ # First check outside mutex to avoid unnecessary locking
124
+ current_info = @counts[topic]
125
+
126
+ # For lower values, we don't update count but might need to refresh timestamp
127
+ if current_info && new_count < current_info[1]
128
+ refresh_timestamp(topic)
129
+
130
+ return
131
+ end
132
+
133
+ # Only lock the specific topic mutex
134
+ mutex_for(topic).synchronize do
135
+ # Check again inside the lock as another thread might have updated
136
+ current_info = @counts[topic]
137
+
138
+ if current_info.nil?
139
+ # Create new entry
140
+ @counts[topic] = [monotonic_now, new_count]
141
+ else
142
+ current_count = current_info[1]
143
+
144
+ if new_count > current_count
145
+ # Update to higher count value
146
+ current_info[0] = monotonic_now
147
+ current_info[1] = new_count
148
+ else
149
+ # Same or lower count, update timestamp only
150
+ current_info[0] = monotonic_now
151
+ end
152
+ end
153
+ end
154
+ end
155
+
156
+ # @return [Hash] hash with ttls and partitions counts array
157
+ def to_h
158
+ @counts
159
+ end
160
+
161
+ private
162
+
163
+ # Get or create a mutex for a specific topic
164
+ #
165
+ # This method ensures that each topic has its own mutex,
166
+ # allowing operations on different topics to proceed in parallel.
167
+ #
168
+ # @param topic [String] Kafka topic name
169
+ # @return [Mutex] Mutex for the specified topic
170
+ #
171
+ # @note We use a separate mutex (@mutex_for_hash) to protect the creation
172
+ # of new topic mutexes. This pattern allows fine-grained locking while
173
+ # maintaining thread-safety.
174
+ def mutex_for(topic)
175
+ mutex = @mutex_hash[topic]
176
+
177
+ return mutex if mutex
178
+
179
+ # Use a separate mutex to protect the creation of new topic mutexes
180
+ @mutex_for_hash.synchronize do
181
+ # Check again in case another thread created it
182
+ @mutex_hash[topic] ||= Mutex.new
183
+ end
184
+
185
+ @mutex_hash[topic]
186
+ end
187
+
188
+ # Update the timestamp without acquiring the mutex
189
+ #
190
+ # This is an optimization that allows refreshing the TTL of existing entries
191
+ # without the overhead of mutex acquisition.
192
+ #
193
+ # @param topic [String] Kafka topic name
194
+ #
195
+ # @note This method is safe for refreshing existing data regardless of count
196
+ # because it only updates the timestamp, which doesn't affect the correctness
197
+ # of concurrent operations.
198
+ def refresh_timestamp(topic)
199
+ current_info = @counts[topic]
200
+
201
+ return unless current_info
202
+
203
+ # Update the timestamp in-place
204
+ current_info[0] = monotonic_now
205
+ end
206
+
207
+ # Check if a timestamp has expired based on the TTL
208
+ #
209
+ # @param timestamp [Float] Monotonic timestamp to check
210
+ # @return [Boolean] true if expired, false otherwise
211
+ def expired?(timestamp)
212
+ monotonic_now - timestamp > @ttl
213
+ end
214
+ end
215
+ end
216
+ end
@@ -0,0 +1,430 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
5
+ class Producer
6
+ include Helpers::Time
7
+ include Helpers::OAuth
8
+
9
+ # Empty hash used as a default
10
+ EMPTY_HASH = {}.freeze
11
+
12
+ # @private
13
+ @@partitions_count_cache = PartitionsCountCache.new
14
+
15
+ # Global (process wide) partitions cache. We use it to store number of topics partitions,
16
+ # either from the librdkafka statistics (if enabled) or via direct inline calls every now and
17
+ # then. Since the partitions count can only grow and should be same for all consumers and
18
+ # producers, we can use a global cache as long as we ensure that updates only move up.
19
+ #
20
+ # @note It is critical to remember, that not all users may have statistics callbacks enabled,
21
+ # hence we should not make assumption that this cache is always updated from the stats.
22
+ #
23
+ # @return [Rdkafka::Producer::PartitionsCountCache]
24
+ def self.partitions_count_cache
25
+ @@partitions_count_cache
26
+ end
27
+
28
+ # @param partitions_count_cache [Rdkafka::Producer::PartitionsCountCache]
29
+ def self.partitions_count_cache=(partitions_count_cache)
30
+ @@partitions_count_cache = partitions_count_cache
31
+ end
32
+
33
+ private_constant :EMPTY_HASH
34
+
35
+ # Raised when there was a critical issue when invoking rd_kafka_topic_new
36
+ # This is a temporary solution until https://github.com/karafka/rdkafka-ruby/issues/451 is
37
+ # resolved and this is normalized in all the places
38
+ class TopicHandleCreationError < RuntimeError; end
39
+
40
+ # @private
41
+ # Returns the current delivery callback, by default this is nil.
42
+ #
43
+ # @return [Proc, nil]
44
+ attr_reader :delivery_callback
45
+
46
+ # @private
47
+ # Returns the number of arguments accepted by the callback, by default this is nil.
48
+ #
49
+ # @return [Integer, nil]
50
+ attr_reader :delivery_callback_arity
51
+
52
+ # @private
53
+ # @param native_kafka [NativeKafka]
54
+ # @param partitioner_name [String, nil] name of the partitioner we want to use or nil to use
55
+ # the "consistent_random" default
56
+ def initialize(native_kafka, partitioner_name)
57
+ @topics_refs_map = {}
58
+ @topics_configs = {}
59
+ @native_kafka = native_kafka
60
+ @partitioner_name = partitioner_name || "consistent_random"
61
+
62
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
63
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
64
+ end
65
+
66
+ # Sets alternative set of configuration details that can be set per topic
67
+ # @note It is not allowed to re-set the same topic config twice because of the underlying
68
+ # librdkafka caching
69
+ # @param topic [String] The topic name
70
+ # @param config [Hash] config we want to use per topic basis
71
+ # @param config_hash [Integer] hash of the config. We expect it here instead of computing it,
72
+ # because it is already computed during the retrieval attempt in the `#produce` flow.
73
+ def set_topic_config(topic, config, config_hash)
74
+ # Ensure lock on topic reference just in case
75
+ @native_kafka.with_inner do |inner|
76
+ @topics_refs_map[topic] ||= {}
77
+ @topics_configs[topic] ||= {}
78
+
79
+ return if @topics_configs[topic].key?(config_hash)
80
+
81
+ # If config is empty, we create an empty reference that will be used with defaults
82
+ rd_topic_config = if config.empty?
83
+ nil
84
+ else
85
+ Rdkafka::Bindings.rd_kafka_topic_conf_new.tap do |topic_config|
86
+ config.each do |key, value|
87
+ error_buffer = FFI::MemoryPointer.new(:char, 256)
88
+ result = Rdkafka::Bindings.rd_kafka_topic_conf_set(
89
+ topic_config,
90
+ key.to_s,
91
+ value.to_s,
92
+ error_buffer,
93
+ 256
94
+ )
95
+
96
+ unless result == :config_ok
97
+ raise Config::ConfigError.new(error_buffer.read_string)
98
+ end
99
+ end
100
+ end
101
+ end
102
+
103
+ topic_handle = Bindings.rd_kafka_topic_new(inner, topic, rd_topic_config)
104
+
105
+ raise TopicHandleCreationError.new("Error creating topic handle for topic #{topic}") if topic_handle.null?
106
+
107
+ @topics_configs[topic][config_hash] = config
108
+ @topics_refs_map[topic][config_hash] = topic_handle
109
+ end
110
+ end
111
+
112
+ # Starts the native Kafka polling thread and kicks off the init polling
113
+ # @note Not needed to run unless explicit start was disabled
114
+ def start
115
+ @native_kafka.start
116
+ end
117
+
118
+ # @return [String] producer name
119
+ def name
120
+ @name ||= @native_kafka.with_inner do |inner|
121
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
122
+ end
123
+ end
124
+
125
+ # Set a callback that will be called every time a message is successfully produced.
126
+ # The callback is called with a {DeliveryReport} and {DeliveryHandle}
127
+ #
128
+ # @param callback [Proc, #call] The callback
129
+ #
130
+ # @return [nil]
131
+ def delivery_callback=(callback)
132
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
133
+ @delivery_callback = callback
134
+ @delivery_callback_arity = arity(callback)
135
+ end
136
+
137
+ # Close this producer and wait for the internal poll queue to empty.
138
+ def close
139
+ return if closed?
140
+ ObjectSpace.undefine_finalizer(self)
141
+
142
+ @native_kafka.close do
143
+ # We need to remove the topics references objects before we destroy the producer,
144
+ # otherwise they would leak out
145
+ @topics_refs_map.each_value do |refs|
146
+ refs.each_value do |ref|
147
+ Rdkafka::Bindings.rd_kafka_topic_destroy(ref)
148
+ end
149
+ end
150
+ end
151
+
152
+ @topics_refs_map.clear
153
+ end
154
+
155
+ # Whether this producer has closed
156
+ def closed?
157
+ @native_kafka.closed?
158
+ end
159
+
160
+ # Wait until all outstanding producer requests are completed, with the given timeout
161
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
162
+ #
163
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
164
+ # @return [Boolean] true if no more data and all was flushed, false in case there are still
165
+ # outgoing messages after the timeout
166
+ #
167
+ # @note We raise an exception for other errors because based on the librdkafka docs, there
168
+ # should be no other errors.
169
+ #
170
+ # @note For `timed_out` we do not raise an error to keep it backwards compatible
171
+ def flush(timeout_ms=5_000)
172
+ closed_producer_check(__method__)
173
+
174
+ code = nil
175
+
176
+ @native_kafka.with_inner do |inner|
177
+ code = Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
178
+ end
179
+
180
+ # Early skip not to build the error message
181
+ return true if code.zero?
182
+
183
+ error = Rdkafka::RdkafkaError.new(code)
184
+
185
+ return false if error.code == :timed_out
186
+
187
+ raise(error)
188
+ end
189
+
190
+ # Purges the outgoing queue and releases all resources.
191
+ #
192
+ # Useful when closing the producer with outgoing messages to unstable clusters or when for
193
+ # any other reasons waiting cannot go on anymore. This purges both the queue and all the
194
+ # inflight requests + updates the delivery handles statuses so they can be materialized into
195
+ # `purge_queue` errors.
196
+ def purge
197
+ closed_producer_check(__method__)
198
+
199
+ code = nil
200
+
201
+ @native_kafka.with_inner do |inner|
202
+ code = Bindings.rd_kafka_purge(
203
+ inner,
204
+ Bindings::RD_KAFKA_PURGE_F_QUEUE | Bindings::RD_KAFKA_PURGE_F_INFLIGHT
205
+ )
206
+ end
207
+
208
+ code.zero? || raise(Rdkafka::RdkafkaError.new(code))
209
+
210
+ # Wait for the purge to affect everything
211
+ sleep(0.001) until flush(100)
212
+
213
+ true
214
+ end
215
+
216
+ # Partition count for a given topic.
217
+ #
218
+ # @param topic [String] The topic name.
219
+ # @return [Integer] partition count for a given topic or `-1` if it could not be obtained.
220
+ #
221
+ # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
222
+ # auto-created after returning nil.
223
+ #
224
+ # @note We cache the partition count for a given topic for given time. If statistics are
225
+ # enabled for any producer or consumer, it will take precedence over per instance fetching.
226
+ #
227
+ # This prevents us in case someone uses `partition_key` from querying for the count with
228
+ # each message. Instead we query at most once every 30 seconds at most if we have a valid
229
+ # partition count or every 5 seconds in case we were not able to obtain number of partitions.
230
+ def partition_count(topic)
231
+ closed_producer_check(__method__)
232
+
233
+ self.class.partitions_count_cache.get(topic) do
234
+ topic_metadata = nil
235
+
236
+ @native_kafka.with_inner do |inner|
237
+ topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
238
+ end
239
+
240
+ topic_metadata ? topic_metadata[:partition_count] : -1
241
+ end
242
+ rescue Rdkafka::RdkafkaError => e
243
+ # If the topic does not exist, it will be created or if not allowed another error will be
244
+ # raised. We here return -1 so this can happen without early error happening on metadata
245
+ # discovery.
246
+ return -1 if e.code == :unknown_topic_or_part
247
+
248
+ raise(e)
249
+ end
250
+
251
+ # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
252
+ #
253
+ # When no partition is specified the underlying Kafka library picks a partition based on the key. If no key is specified, a random partition will be used.
254
+ # When a timestamp is provided this is used instead of the auto-generated timestamp.
255
+ #
256
+ # @param topic [String] The topic to produce to
257
+ # @param payload [String,nil] The message's payload
258
+ # @param key [String, nil] The message's key
259
+ # @param partition [Integer,nil] Optional partition to produce to
260
+ # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
261
+ # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
262
+ # @param headers [Hash<String,String|Array<String>>] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
263
+ # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
264
+ # @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
265
+ #
266
+ # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
267
+ #
268
+ # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
269
+ def produce(
270
+ topic:,
271
+ payload: nil,
272
+ key: nil,
273
+ partition: nil,
274
+ partition_key: nil,
275
+ timestamp: nil,
276
+ headers: nil,
277
+ label: nil,
278
+ topic_config: EMPTY_HASH
279
+ )
280
+ closed_producer_check(__method__)
281
+
282
+ # Start by checking and converting the input
283
+
284
+ # Get payload length
285
+ payload_size = if payload.nil?
286
+ 0
287
+ else
288
+ payload.bytesize
289
+ end
290
+
291
+ # Get key length
292
+ key_size = if key.nil?
293
+ 0
294
+ else
295
+ key.bytesize
296
+ end
297
+
298
+ topic_config_hash = topic_config.hash
299
+
300
+ # Checks if we have the rdkafka topic reference object ready. It saves us on object
301
+ # allocation and allows to use custom config on demand.
302
+ set_topic_config(topic, topic_config, topic_config_hash) unless @topics_refs_map.dig(topic, topic_config_hash)
303
+ topic_ref = @topics_refs_map.dig(topic, topic_config_hash)
304
+
305
+ if partition_key
306
+ partition_count = partition_count(topic)
307
+
308
+ # Check if there are no overrides for the partitioner and use the default one only when
309
+ # no per-topic is present.
310
+ partitioner_name = @topics_configs.dig(topic, topic_config_hash, :partitioner) || @partitioner_name
311
+
312
+ # If the topic is not present, set to -1
313
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, partitioner_name) if partition_count.positive?
314
+ end
315
+
316
+ # If partition is nil, use -1 to let librdafka set the partition randomly or
317
+ # based on the key when present.
318
+ partition ||= -1
319
+
320
+ # If timestamp is nil use 0 and let Kafka set one. If an integer or time
321
+ # use it.
322
+ raw_timestamp = if timestamp.nil?
323
+ 0
324
+ elsif timestamp.is_a?(Integer)
325
+ timestamp
326
+ elsif timestamp.is_a?(Time)
327
+ (timestamp.to_i * 1000) + (timestamp.usec / 1000)
328
+ else
329
+ raise TypeError.new("Timestamp has to be nil, an Integer or a Time")
330
+ end
331
+
332
+ delivery_handle = DeliveryHandle.new
333
+ delivery_handle.label = label
334
+ delivery_handle.topic = topic
335
+ delivery_handle[:pending] = true
336
+ delivery_handle[:response] = -1
337
+ delivery_handle[:partition] = -1
338
+ delivery_handle[:offset] = -1
339
+ DeliveryHandle.register(delivery_handle)
340
+
341
+ args = [
342
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_RKT, :pointer, topic_ref,
343
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_MSGFLAGS, :int, Rdkafka::Bindings::RD_KAFKA_MSG_F_COPY,
344
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_VALUE, :buffer_in, payload, :size_t, payload_size,
345
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_KEY, :buffer_in, key, :size_t, key_size,
346
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_PARTITION, :int32, partition,
347
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TIMESTAMP, :int64, raw_timestamp,
348
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_OPAQUE, :pointer, delivery_handle,
349
+ ]
350
+
351
+ if headers
352
+ headers.each do |key0, value0|
353
+ key = key0.to_s
354
+ if value0.is_a?(Array)
355
+ # Handle array of values per KIP-82
356
+ value0.each do |value|
357
+ value = value.to_s
358
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
359
+ args << :string << key
360
+ args << :pointer << value
361
+ args << :size_t << value.bytesize
362
+ end
363
+ else
364
+ # Handle single value
365
+ value = value0.to_s
366
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
367
+ args << :string << key
368
+ args << :pointer << value
369
+ args << :size_t << value.bytesize
370
+ end
371
+ end
372
+ end
373
+
374
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
375
+
376
+ # Produce the message
377
+ response = @native_kafka.with_inner do |inner|
378
+ Rdkafka::Bindings.rd_kafka_producev(
379
+ inner,
380
+ *args
381
+ )
382
+ end
383
+
384
+ # Raise error if the produce call was not successful
385
+ if response != 0
386
+ DeliveryHandle.remove(delivery_handle.to_ptr.address)
387
+ raise RdkafkaError.new(response)
388
+ end
389
+
390
+ delivery_handle
391
+ end
392
+
393
+ # Calls (if registered) the delivery callback
394
+ #
395
+ # @param delivery_report [Producer::DeliveryReport]
396
+ # @param delivery_handle [Producer::DeliveryHandle]
397
+ def call_delivery_callback(delivery_report, delivery_handle)
398
+ return unless @delivery_callback
399
+
400
+ case @delivery_callback_arity
401
+ when 0
402
+ @delivery_callback.call
403
+ when 1
404
+ @delivery_callback.call(delivery_report)
405
+ else
406
+ @delivery_callback.call(delivery_report, delivery_handle)
407
+ end
408
+ end
409
+
410
+ # Figures out the arity of a given block/method
411
+ #
412
+ # @param callback [#call, Proc]
413
+ # @return [Integer] arity of the provided block/method
414
+ def arity(callback)
415
+ return callback.arity if callback.respond_to?(:arity)
416
+
417
+ callback.method(:call).arity
418
+ end
419
+
420
+ private
421
+
422
+ # Ensures, no operations can happen on a closed producer
423
+ #
424
+ # @param method [Symbol] name of the method that invoked producer
425
+ # @raise [Rdkafka::ClosedProducerError]
426
+ def closed_producer_check(method)
427
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
428
+ end
429
+ end
430
+ end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ VERSION = "0.22.0.beta1"
5
+ LIBRDKAFKA_VERSION = "2.8.0"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "5bd1c46f63265f31c6bfcedcde78703f77d28238eadf23821c2b43fc30be3e25"
7
+ end